linux/drivers/staging/vme/bridges/vme_ca91cx42.c
<<
>>
Prefs
   1/*
   2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
   3 *
   4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
   5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
   6 *
   7 * Based on work by Tom Armistead and Ajit Prem
   8 * Copyright 2004 Motorola Inc.
   9 *
  10 * Derived from ca91c042.c by Michael Wyrick
  11 *
  12 * This program is free software; you can redistribute  it and/or modify it
  13 * under  the terms of  the GNU General  Public License as published by the
  14 * Free Software Foundation;  either version 2 of the  License, or (at your
  15 * option) any later version.
  16 */
  17
  18#include <linux/version.h>
  19#include <linux/module.h>
  20#include <linux/mm.h>
  21#include <linux/types.h>
  22#include <linux/errno.h>
  23#include <linux/pci.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/poll.h>
  26#include <linux/interrupt.h>
  27#include <linux/spinlock.h>
  28#include <linux/sched.h>
  29#include <asm/time.h>
  30#include <asm/io.h>
  31#include <asm/uaccess.h>
  32
  33#include "../vme.h"
  34#include "../vme_bridge.h"
  35#include "vme_ca91cx42.h"
  36
  37static int __init ca91cx42_init(void);
  38static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
  39static void ca91cx42_remove(struct pci_dev *);
  40static void __exit ca91cx42_exit(void);
  41
  42struct vme_bridge *ca91cx42_bridge;
  43wait_queue_head_t dma_queue;
  44wait_queue_head_t iack_queue;
  45wait_queue_head_t lm_queue;
  46wait_queue_head_t mbox_queue;
  47
  48void (*lm_callback[4])(int);    /* Called in interrupt handler, be careful! */
  49void *crcsr_kernel;
  50dma_addr_t crcsr_bus;
  51
  52struct mutex vme_rmw;   /* Only one RMW cycle at a time */
  53struct mutex vme_int;   /*
  54                         * Only one VME interrupt can be
  55                         * generated at a time, provide locking
  56                         */
  57struct mutex vme_irq;   /* Locking for VME irq callback configuration */
  58
  59
  60
  61static char driver_name[] = "vme_ca91cx42";
  62
  63static struct pci_device_id ca91cx42_ids[] = {
  64        { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
  65        { },
  66};
  67
  68static struct pci_driver ca91cx42_driver = {
  69        .name = driver_name,
  70        .id_table = ca91cx42_ids,
  71        .probe = ca91cx42_probe,
  72        .remove = ca91cx42_remove,
  73};
  74
  75static u32 ca91cx42_DMA_irqhandler(void)
  76{
  77        wake_up(&dma_queue);
  78
  79        return CA91CX42_LINT_DMA;
  80}
  81
  82static u32 ca91cx42_LM_irqhandler(u32 stat)
  83{
  84        int i;
  85        u32 serviced = 0;
  86
  87        for (i = 0; i < 4; i++) {
  88                if (stat & CA91CX42_LINT_LM[i]) {
  89                        /* We only enable interrupts if the callback is set */
  90                        lm_callback[i](i);
  91                        serviced |= CA91CX42_LINT_LM[i];
  92                }
  93        }
  94
  95        return serviced;
  96}
  97
  98/* XXX This needs to be split into 4 queues */
  99static u32 ca91cx42_MB_irqhandler(int mbox_mask)
 100{
 101        wake_up(&mbox_queue);
 102
 103        return CA91CX42_LINT_MBOX;
 104}
 105
 106static u32 ca91cx42_IACK_irqhandler(void)
 107{
 108        wake_up(&iack_queue);
 109
 110        return CA91CX42_LINT_SW_IACK;
 111}
 112
 113#if 0
 114int ca91cx42_bus_error_chk(int clrflag)
 115{
 116        int tmp;
 117        tmp = ioread32(ca91cx42_bridge->base + PCI_COMMAND);
 118        if (tmp & 0x08000000) { /* S_TA is Set */
 119                if (clrflag)
 120                        iowrite32(tmp | 0x08000000,
 121                               ca91cx42_bridge->base + PCI_COMMAND);
 122                return 1;
 123        }
 124        return 0;
 125}
 126#endif
 127
 128static u32 ca91cx42_VERR_irqhandler(void)
 129{
 130        int val;
 131
 132        val = ioread32(ca91cx42_bridge->base + DGCS);
 133
 134        if (!(val & 0x00000800)) {
 135                printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
 136                        "Error DGCS=%08X\n", val);
 137        }
 138
 139        return CA91CX42_LINT_VERR;
 140}
 141
 142static u32 ca91cx42_LERR_irqhandler(void)
 143{
 144        int val;
 145
 146        val = ioread32(ca91cx42_bridge->base + DGCS);
 147
 148        if (!(val & 0x00000800)) {
 149                printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
 150                        "Error DGCS=%08X\n", val);
 151
 152        }
 153
 154        return CA91CX42_LINT_LERR;
 155}
 156
 157
 158static u32 ca91cx42_VIRQ_irqhandler(int stat)
 159{
 160        int vec, i, serviced = 0;
 161        void (*call)(int, int, void *);
 162        void *priv_data;
 163
 164        for (i = 7; i > 0; i--) {
 165                if (stat & (1 << i)) {
 166                        vec = ioread32(ca91cx42_bridge->base +
 167                                CA91CX42_V_STATID[i]) & 0xff;
 168
 169                        call = ca91cx42_bridge->irq[i - 1].callback[vec].func;
 170                        priv_data =
 171                        ca91cx42_bridge->irq[i - 1].callback[vec].priv_data;
 172
 173                        if (call != NULL)
 174                                call(i, vec, priv_data);
 175                        else
 176                                printk("Spurilous VME interrupt, level:%x, "
 177                                        "vector:%x\n", i, vec);
 178
 179                        serviced |= (1 << i);
 180                }
 181        }
 182
 183        return serviced;
 184}
 185
 186static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id)
 187{
 188        u32 stat, enable, serviced = 0;
 189
 190        if (dev_id != ca91cx42_bridge->base)
 191                return IRQ_NONE;
 192
 193        enable = ioread32(ca91cx42_bridge->base + LINT_EN);
 194        stat = ioread32(ca91cx42_bridge->base + LINT_STAT);
 195
 196        /* Only look at unmasked interrupts */
 197        stat &= enable;
 198
 199        if (unlikely(!stat))
 200                return IRQ_NONE;
 201
 202        if (stat & CA91CX42_LINT_DMA)
 203                serviced |= ca91cx42_DMA_irqhandler();
 204        if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
 205                        CA91CX42_LINT_LM3))
 206                serviced |= ca91cx42_LM_irqhandler(stat);
 207        if (stat & CA91CX42_LINT_MBOX)
 208                serviced |= ca91cx42_MB_irqhandler(stat);
 209        if (stat & CA91CX42_LINT_SW_IACK)
 210                serviced |= ca91cx42_IACK_irqhandler();
 211        if (stat & CA91CX42_LINT_VERR)
 212                serviced |= ca91cx42_VERR_irqhandler();
 213        if (stat & CA91CX42_LINT_LERR)
 214                serviced |= ca91cx42_LERR_irqhandler();
 215        if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
 216                        CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
 217                        CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
 218                        CA91CX42_LINT_VIRQ7))
 219                serviced |= ca91cx42_VIRQ_irqhandler(stat);
 220
 221        /* Clear serviced interrupts */
 222        iowrite32(stat, ca91cx42_bridge->base + LINT_STAT);
 223
 224        return IRQ_HANDLED;
 225}
 226
 227static int ca91cx42_irq_init(struct vme_bridge *bridge)
 228{
 229        int result, tmp;
 230        struct pci_dev *pdev;
 231
 232        /* Need pdev */
 233        pdev = container_of(bridge->parent, struct pci_dev, dev);
 234
 235        /* Initialise list for VME bus errors */
 236        INIT_LIST_HEAD(&(bridge->vme_errors));
 237
 238        /* Disable interrupts from PCI to VME */
 239        iowrite32(0, bridge->base + VINT_EN);
 240
 241        /* Disable PCI interrupts */
 242        iowrite32(0, bridge->base + LINT_EN);
 243        /* Clear Any Pending PCI Interrupts */
 244        iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
 245
 246        result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
 247                        driver_name, pdev);
 248        if (result) {
 249                dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
 250                       pdev->irq);
 251                return result;
 252        }
 253
 254        /* Ensure all interrupts are mapped to PCI Interrupt 0 */
 255        iowrite32(0, bridge->base + LINT_MAP0);
 256        iowrite32(0, bridge->base + LINT_MAP1);
 257        iowrite32(0, bridge->base + LINT_MAP2);
 258
 259        /* Enable DMA, mailbox & LM Interrupts */
 260        tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
 261                CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
 262                CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
 263
 264        iowrite32(tmp, bridge->base + LINT_EN);
 265
 266        return 0;
 267}
 268
 269static void ca91cx42_irq_exit(struct pci_dev *pdev)
 270{
 271        /* Disable interrupts from PCI to VME */
 272        iowrite32(0, ca91cx42_bridge->base + VINT_EN);
 273
 274        /* Disable PCI interrupts */
 275        iowrite32(0, ca91cx42_bridge->base + LINT_EN);
 276        /* Clear Any Pending PCI Interrupts */
 277        iowrite32(0x00FFFFFF, ca91cx42_bridge->base + LINT_STAT);
 278
 279        free_irq(pdev->irq, pdev);
 280}
 281
 282/*
 283 * Set up an VME interrupt
 284 */
 285int ca91cx42_request_irq(int level, int statid,
 286        void (*callback)(int level, int vector, void *priv_data),
 287        void *priv_data)
 288{
 289        u32 tmp;
 290
 291        mutex_lock(&(vme_irq));
 292
 293        if (ca91cx42_bridge->irq[level - 1].callback[statid].func) {
 294                mutex_unlock(&(vme_irq));
 295                printk("VME Interrupt already taken\n");
 296                return -EBUSY;
 297        }
 298
 299
 300        ca91cx42_bridge->irq[level - 1].count++;
 301        ca91cx42_bridge->irq[level - 1].callback[statid].priv_data = priv_data;
 302        ca91cx42_bridge->irq[level - 1].callback[statid].func = callback;
 303
 304        /* Enable IRQ level */
 305        tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
 306        tmp |= CA91CX42_LINT_VIRQ[level];
 307        iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
 308
 309        mutex_unlock(&(vme_irq));
 310
 311        return 0;
 312}
 313
 314/*
 315 * Free VME interrupt
 316 */
 317void ca91cx42_free_irq(int level, int statid)
 318{
 319        u32 tmp;
 320        struct pci_dev *pdev;
 321
 322        mutex_lock(&(vme_irq));
 323
 324        ca91cx42_bridge->irq[level - 1].count--;
 325
 326        /* Disable IRQ level if no more interrupts attached at this level*/
 327        if (ca91cx42_bridge->irq[level - 1].count == 0) {
 328                tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
 329                tmp &= ~CA91CX42_LINT_VIRQ[level];
 330                iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
 331
 332                pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
 333                        dev);
 334
 335                synchronize_irq(pdev->irq);
 336        }
 337
 338        ca91cx42_bridge->irq[level - 1].callback[statid].func = NULL;
 339        ca91cx42_bridge->irq[level - 1].callback[statid].priv_data = NULL;
 340
 341        mutex_unlock(&(vme_irq));
 342}
 343
 344int ca91cx42_generate_irq(int level, int statid)
 345{
 346        u32 tmp;
 347
 348        /* Universe can only generate even vectors */
 349        if (statid & 1)
 350                return -EINVAL;
 351
 352        mutex_lock(&(vme_int));
 353
 354        tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
 355
 356        /* Set Status/ID */
 357        iowrite32(statid << 24, ca91cx42_bridge->base + STATID);
 358
 359        /* Assert VMEbus IRQ */
 360        tmp = tmp | (1 << (level + 24));
 361        iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
 362
 363        /* Wait for IACK */
 364        wait_event_interruptible(iack_queue, 0);
 365
 366        /* Return interrupt to low state */
 367        tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
 368        tmp = tmp & ~(1 << (level + 24));
 369        iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
 370
 371        mutex_unlock(&(vme_int));
 372
 373        return 0;
 374}
 375
 376int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
 377        unsigned long long vme_base, unsigned long long size,
 378        dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
 379{
 380        unsigned int i, addr = 0, granularity = 0;
 381        unsigned int temp_ctl = 0;
 382        unsigned int vme_bound, pci_offset;
 383
 384        i = image->number;
 385
 386        switch (aspace) {
 387        case VME_A16:
 388                addr |= CA91CX42_VSI_CTL_VAS_A16;
 389                break;
 390        case VME_A24:
 391                addr |= CA91CX42_VSI_CTL_VAS_A24;
 392                break;
 393        case VME_A32:
 394                addr |= CA91CX42_VSI_CTL_VAS_A32;
 395                break;
 396        case VME_USER1:
 397                addr |= CA91CX42_VSI_CTL_VAS_USER1;
 398                break;
 399        case VME_USER2:
 400                addr |= CA91CX42_VSI_CTL_VAS_USER2;
 401                break;
 402        case VME_A64:
 403        case VME_CRCSR:
 404        case VME_USER3:
 405        case VME_USER4:
 406        default:
 407                printk(KERN_ERR "Invalid address space\n");
 408                return -EINVAL;
 409                break;
 410        }
 411
 412        /*
 413         * Bound address is a valid address for the window, adjust
 414         * accordingly
 415         */
 416        vme_bound = vme_base + size - granularity;
 417        pci_offset = pci_base - vme_base;
 418
 419        /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
 420         * too big for registers
 421         */
 422
 423        if ((i == 0) || (i == 4))
 424                granularity = 0x1000;
 425        else
 426                granularity = 0x10000;
 427
 428        if (vme_base & (granularity - 1)) {
 429                printk(KERN_ERR "Invalid VME base alignment\n");
 430                return -EINVAL;
 431        }
 432        if (vme_bound & (granularity - 1)) {
 433                printk(KERN_ERR "Invalid VME bound alignment\n");
 434                return -EINVAL;
 435        }
 436        if (pci_offset & (granularity - 1)) {
 437                printk(KERN_ERR "Invalid PCI Offset alignment\n");
 438                return -EINVAL;
 439        }
 440
 441        /* Disable while we are mucking around */
 442        temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
 443        temp_ctl &= ~CA91CX42_VSI_CTL_EN;
 444        iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
 445
 446        /* Setup mapping */
 447        iowrite32(vme_base, ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
 448        iowrite32(vme_bound, ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
 449        iowrite32(pci_offset, ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
 450
 451/* XXX Prefetch stuff currently unsupported */
 452#if 0
 453        if (vmeIn->wrPostEnable)
 454                temp_ctl |= CA91CX42_VSI_CTL_PWEN;
 455        if (vmeIn->prefetchEnable)
 456                temp_ctl |= CA91CX42_VSI_CTL_PREN;
 457        if (vmeIn->rmwLock)
 458                temp_ctl |= CA91CX42_VSI_CTL_LLRMW;
 459        if (vmeIn->data64BitCapable)
 460                temp_ctl |= CA91CX42_VSI_CTL_LD64EN;
 461#endif
 462
 463        /* Setup address space */
 464        temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
 465        temp_ctl |= addr;
 466
 467        /* Setup cycle types */
 468        temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
 469        if (cycle & VME_SUPER)
 470                temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
 471        if (cycle & VME_USER)
 472                temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
 473        if (cycle & VME_PROG)
 474                temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
 475        if (cycle & VME_DATA)
 476                temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
 477
 478        /* Write ctl reg without enable */
 479        iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
 480
 481        if (enabled)
 482                temp_ctl |= CA91CX42_VSI_CTL_EN;
 483
 484        iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
 485
 486        return 0;
 487}
 488
 489int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
 490        unsigned long long *vme_base, unsigned long long *size,
 491        dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
 492{
 493        unsigned int i, granularity = 0, ctl = 0;
 494        unsigned long long vme_bound, pci_offset;
 495
 496        i = image->number;
 497
 498        if ((i == 0) || (i == 4))
 499                granularity = 0x1000;
 500        else
 501                granularity = 0x10000;
 502
 503        /* Read Registers */
 504        ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
 505
 506        *vme_base = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
 507        vme_bound = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
 508        pci_offset = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
 509
 510        *pci_base = (dma_addr_t)vme_base + pci_offset;
 511        *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
 512
 513        *enabled = 0;
 514        *aspace = 0;
 515        *cycle = 0;
 516
 517        if (ctl & CA91CX42_VSI_CTL_EN)
 518                *enabled = 1;
 519
 520        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
 521                *aspace = VME_A16;
 522        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
 523                *aspace = VME_A24;
 524        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
 525                *aspace = VME_A32;
 526        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
 527                *aspace = VME_USER1;
 528        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
 529                *aspace = VME_USER2;
 530
 531        if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
 532                *cycle |= VME_SUPER;
 533        if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
 534                *cycle |= VME_USER;
 535        if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
 536                *cycle |= VME_PROG;
 537        if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
 538                *cycle |= VME_DATA;
 539
 540        return 0;
 541}
 542
 543/*
 544 * Allocate and map PCI Resource
 545 */
 546static int ca91cx42_alloc_resource(struct vme_master_resource *image,
 547        unsigned long long size)
 548{
 549        unsigned long long existing_size;
 550        int retval = 0;
 551        struct pci_dev *pdev;
 552
 553        /* Find pci_dev container of dev */
 554        if (ca91cx42_bridge->parent == NULL) {
 555                printk(KERN_ERR "Dev entry NULL\n");
 556                return -EINVAL;
 557        }
 558        pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
 559
 560        existing_size = (unsigned long long)(image->pci_resource.end -
 561                image->pci_resource.start);
 562
 563        /* If the existing size is OK, return */
 564        if (existing_size == (size - 1))
 565                return 0;
 566
 567        if (existing_size != 0) {
 568                iounmap(image->kern_base);
 569                image->kern_base = NULL;
 570                if (image->pci_resource.name != NULL)
 571                        kfree(image->pci_resource.name);
 572                release_resource(&(image->pci_resource));
 573                memset(&(image->pci_resource), 0, sizeof(struct resource));
 574        }
 575
 576        if (image->pci_resource.name == NULL) {
 577                image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
 578                if (image->pci_resource.name == NULL) {
 579                        printk(KERN_ERR "Unable to allocate memory for resource"
 580                                " name\n");
 581                        retval = -ENOMEM;
 582                        goto err_name;
 583                }
 584        }
 585
 586        sprintf((char *)image->pci_resource.name, "%s.%d",
 587                ca91cx42_bridge->name, image->number);
 588
 589        image->pci_resource.start = 0;
 590        image->pci_resource.end = (unsigned long)size;
 591        image->pci_resource.flags = IORESOURCE_MEM;
 592
 593        retval = pci_bus_alloc_resource(pdev->bus,
 594                &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
 595                0, NULL, NULL);
 596        if (retval) {
 597                printk(KERN_ERR "Failed to allocate mem resource for "
 598                        "window %d size 0x%lx start 0x%lx\n",
 599                        image->number, (unsigned long)size,
 600                        (unsigned long)image->pci_resource.start);
 601                goto err_resource;
 602        }
 603
 604        image->kern_base = ioremap_nocache(
 605                image->pci_resource.start, size);
 606        if (image->kern_base == NULL) {
 607                printk(KERN_ERR "Failed to remap resource\n");
 608                retval = -ENOMEM;
 609                goto err_remap;
 610        }
 611
 612        return 0;
 613
 614        iounmap(image->kern_base);
 615        image->kern_base = NULL;
 616err_remap:
 617        release_resource(&(image->pci_resource));
 618err_resource:
 619        kfree(image->pci_resource.name);
 620        memset(&(image->pci_resource), 0, sizeof(struct resource));
 621err_name:
 622        return retval;
 623}
 624
 625/*
 626 *  * Free and unmap PCI Resource
 627 *   */
 628static void ca91cx42_free_resource(struct vme_master_resource *image)
 629{
 630        iounmap(image->kern_base);
 631        image->kern_base = NULL;
 632        release_resource(&(image->pci_resource));
 633        kfree(image->pci_resource.name);
 634        memset(&(image->pci_resource), 0, sizeof(struct resource));
 635}
 636
 637
 638int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
 639        unsigned long long vme_base, unsigned long long size,
 640        vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
 641{
 642        int retval = 0;
 643        unsigned int i;
 644        unsigned int temp_ctl = 0;
 645        unsigned long long pci_bound, vme_offset, pci_base;
 646
 647        /* Verify input data */
 648        if (vme_base & 0xFFF) {
 649                printk(KERN_ERR "Invalid VME Window alignment\n");
 650                retval = -EINVAL;
 651                goto err_window;
 652        }
 653        if (size & 0xFFF) {
 654                printk(KERN_ERR "Invalid VME Window alignment\n");
 655                retval = -EINVAL;
 656                goto err_window;
 657        }
 658
 659        spin_lock(&(image->lock));
 660
 661        /* XXX We should do this much later, so that we can exit without
 662         *     needing to redo the mapping...
 663         */
 664        /*
 665         * Let's allocate the resource here rather than further up the stack as
 666         * it avoids pushing loads of bus dependant stuff up the stack
 667         */
 668        retval = ca91cx42_alloc_resource(image, size);
 669        if (retval) {
 670                spin_unlock(&(image->lock));
 671                printk(KERN_ERR "Unable to allocate memory for resource "
 672                        "name\n");
 673                retval = -ENOMEM;
 674                goto err_res;
 675        }
 676
 677        pci_base = (unsigned long long)image->pci_resource.start;
 678
 679        /*
 680         * Bound address is a valid address for the window, adjust
 681         * according to window granularity.
 682         */
 683        pci_bound = pci_base + (size - 0x1000);
 684        vme_offset = vme_base - pci_base;
 685
 686        i = image->number;
 687
 688        /* Disable while we are mucking around */
 689        temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
 690        temp_ctl &= ~CA91CX42_LSI_CTL_EN;
 691        iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
 692
 693/* XXX Prefetch stuff currently unsupported */
 694#if 0
 695        if (vmeOut->wrPostEnable)
 696                temp_ctl |= 0x40000000;
 697#endif
 698
 699        /* Setup cycle types */
 700        temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
 701        if (cycle & VME_BLT)
 702                temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
 703        if (cycle & VME_MBLT)
 704                temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
 705
 706        /* Setup data width */
 707        temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
 708        switch (dwidth) {
 709        case VME_D8:
 710                temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
 711                break;
 712        case VME_D16:
 713                temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
 714                break;
 715        case VME_D32:
 716                temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
 717                break;
 718        case VME_D64:
 719                temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
 720                break;
 721        default:
 722                spin_unlock(&(image->lock));
 723                printk(KERN_ERR "Invalid data width\n");
 724                retval = -EINVAL;
 725                goto err_dwidth;
 726                break;
 727        }
 728
 729        /* Setup address space */
 730        temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
 731        switch (aspace) {
 732        case VME_A16:
 733                temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
 734                break;
 735        case VME_A24:
 736                temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
 737                break;
 738        case VME_A32:
 739                temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
 740                break;
 741        case VME_CRCSR:
 742                temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
 743                break;
 744        case VME_USER1:
 745                temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
 746                break;
 747        case VME_USER2:
 748                temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
 749                break;
 750        case VME_A64:
 751        case VME_USER3:
 752        case VME_USER4:
 753        default:
 754                spin_unlock(&(image->lock));
 755                printk(KERN_ERR "Invalid address space\n");
 756                retval = -EINVAL;
 757                goto err_aspace;
 758                break;
 759        }
 760
 761        temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
 762        if (cycle & VME_SUPER)
 763                temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
 764        if (cycle & VME_PROG)
 765                temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
 766
 767        /* Setup mapping */
 768        iowrite32(pci_base, ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
 769        iowrite32(pci_bound, ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
 770        iowrite32(vme_offset, ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
 771
 772        /* Write ctl reg without enable */
 773        iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
 774
 775        if (enabled)
 776                temp_ctl |= CA91CX42_LSI_CTL_EN;
 777
 778        iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
 779
 780        spin_unlock(&(image->lock));
 781        return 0;
 782
 783err_aspace:
 784err_dwidth:
 785        ca91cx42_free_resource(image);
 786err_res:
 787err_window:
 788        return retval;
 789}
 790
 791int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
 792        unsigned long long *vme_base, unsigned long long *size,
 793        vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
 794{
 795        unsigned int i, ctl;
 796        unsigned long long pci_base, pci_bound, vme_offset;
 797
 798        i = image->number;
 799
 800        ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
 801
 802        pci_base = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
 803        vme_offset = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
 804        pci_bound = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
 805
 806        *vme_base = pci_base + vme_offset;
 807        *size = (pci_bound - pci_base) + 0x1000;
 808
 809        *enabled = 0;
 810        *aspace = 0;
 811        *cycle = 0;
 812        *dwidth = 0;
 813
 814        if (ctl & CA91CX42_LSI_CTL_EN)
 815                *enabled = 1;
 816
 817        /* Setup address space */
 818        switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
 819        case CA91CX42_LSI_CTL_VAS_A16:
 820                *aspace = VME_A16;
 821                break;
 822        case CA91CX42_LSI_CTL_VAS_A24:
 823                *aspace = VME_A24;
 824                break;
 825        case CA91CX42_LSI_CTL_VAS_A32:
 826                *aspace = VME_A32;
 827                break;
 828        case CA91CX42_LSI_CTL_VAS_CRCSR:
 829                *aspace = VME_CRCSR;
 830                break;
 831        case CA91CX42_LSI_CTL_VAS_USER1:
 832                *aspace = VME_USER1;
 833                break;
 834        case CA91CX42_LSI_CTL_VAS_USER2:
 835                *aspace = VME_USER2;
 836                break;
 837        }
 838
 839        /* XXX Not sure howto check for MBLT */
 840        /* Setup cycle types */
 841        if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
 842                *cycle |= VME_BLT;
 843        else
 844                *cycle |= VME_SCT;
 845
 846        if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
 847                *cycle |= VME_SUPER;
 848        else
 849                *cycle |= VME_USER;
 850
 851        if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
 852                *cycle = VME_PROG;
 853        else
 854                *cycle = VME_DATA;
 855
 856        /* Setup data width */
 857        switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
 858        case CA91CX42_LSI_CTL_VDW_D8:
 859                *dwidth = VME_D8;
 860                break;
 861        case CA91CX42_LSI_CTL_VDW_D16:
 862                *dwidth = VME_D16;
 863                break;
 864        case CA91CX42_LSI_CTL_VDW_D32:
 865                *dwidth = VME_D32;
 866                break;
 867        case CA91CX42_LSI_CTL_VDW_D64:
 868                *dwidth = VME_D64;
 869                break;
 870        }
 871
 872/* XXX Prefetch stuff currently unsupported */
 873#if 0
 874        if (ctl & 0x40000000)
 875                vmeOut->wrPostEnable = 1;
 876#endif
 877
 878        return 0;
 879}
 880
 881int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
 882        unsigned long long *vme_base, unsigned long long *size,
 883        vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
 884{
 885        int retval;
 886
 887        spin_lock(&(image->lock));
 888
 889        retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
 890                cycle, dwidth);
 891
 892        spin_unlock(&(image->lock));
 893
 894        return retval;
 895}
 896
 897ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
 898        size_t count, loff_t offset)
 899{
 900        int retval;
 901
 902        spin_lock(&(image->lock));
 903
 904        memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
 905        retval = count;
 906
 907        spin_unlock(&(image->lock));
 908
 909        return retval;
 910}
 911
 912ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
 913        size_t count, loff_t offset)
 914{
 915        int retval = 0;
 916
 917        spin_lock(&(image->lock));
 918
 919        memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
 920        retval = count;
 921
 922        spin_unlock(&(image->lock));
 923
 924        return retval;
 925}
 926
 927int ca91cx42_slot_get(void)
 928{
 929        u32 slot = 0;
 930
 931        slot = ioread32(ca91cx42_bridge->base + VCSR_BS);
 932        slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
 933        return (int)slot;
 934
 935}
 936
 937static int __init ca91cx42_init(void)
 938{
 939        return pci_register_driver(&ca91cx42_driver);
 940}
 941
 942/*
 943 * Configure CR/CSR space
 944 *
 945 * Access to the CR/CSR can be configured at power-up. The location of the
 946 * CR/CSR registers in the CR/CSR address space is determined by the boards
 947 * Auto-ID or Geographic address. This function ensures that the window is
 948 * enabled at an offset consistent with the boards geopgraphic address.
 949 */
 950static int ca91cx42_crcsr_init(struct pci_dev *pdev)
 951{
 952        unsigned int crcsr_addr;
 953        int tmp, slot;
 954
 955/* XXX We may need to set this somehow as the Universe II does not support
 956 *     geographical addressing.
 957 */
 958#if 0
 959        if (vme_slotnum != -1)
 960                iowrite32(vme_slotnum << 27, ca91cx42_bridge->base + VCSR_BS);
 961#endif
 962        slot = ca91cx42_slot_get();
 963        dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
 964        if (slot == 0) {
 965                dev_err(&pdev->dev, "Slot number is unset, not configuring "
 966                        "CR/CSR space\n");
 967                return -EINVAL;
 968        }
 969
 970        /* Allocate mem for CR/CSR image */
 971        crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
 972                &crcsr_bus);
 973        if (crcsr_kernel == NULL) {
 974                dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
 975                        "image\n");
 976                return -ENOMEM;
 977        }
 978
 979        memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
 980
 981        crcsr_addr = slot * (512 * 1024);
 982        iowrite32(crcsr_bus - crcsr_addr, ca91cx42_bridge->base + VCSR_TO);
 983
 984        tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
 985        tmp |= CA91CX42_VCSR_CTL_EN;
 986        iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
 987
 988        return 0;
 989}
 990
 991static void ca91cx42_crcsr_exit(struct pci_dev *pdev)
 992{
 993        u32 tmp;
 994
 995        /* Turn off CR/CSR space */
 996        tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
 997        tmp &= ~CA91CX42_VCSR_CTL_EN;
 998        iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
 999
1000        /* Free image */
1001        iowrite32(0, ca91cx42_bridge->base + VCSR_TO);
1002
1003        pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
1004}
1005
1006static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1007{
1008        int retval, i;
1009        u32 data;
1010        struct list_head *pos = NULL;
1011        struct vme_master_resource *master_image;
1012        struct vme_slave_resource *slave_image;
1013#if 0
1014        struct vme_dma_resource *dma_ctrlr;
1015#endif
1016        struct vme_lm_resource *lm;
1017
1018        /* We want to support more than one of each bridge so we need to
1019         * dynamically allocate the bridge structure
1020         */
1021        ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1022
1023        if (ca91cx42_bridge == NULL) {
1024                dev_err(&pdev->dev, "Failed to allocate memory for device "
1025                        "structure\n");
1026                retval = -ENOMEM;
1027                goto err_struct;
1028        }
1029
1030        memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
1031
1032        /* Enable the device */
1033        retval = pci_enable_device(pdev);
1034        if (retval) {
1035                dev_err(&pdev->dev, "Unable to enable device\n");
1036                goto err_enable;
1037        }
1038
1039        /* Map Registers */
1040        retval = pci_request_regions(pdev, driver_name);
1041        if (retval) {
1042                dev_err(&pdev->dev, "Unable to reserve resources\n");
1043                goto err_resource;
1044        }
1045
1046        /* map registers in BAR 0 */
1047        ca91cx42_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0),
1048                4096);
1049        if (!ca91cx42_bridge->base) {
1050                dev_err(&pdev->dev, "Unable to remap CRG region\n");
1051                retval = -EIO;
1052                goto err_remap;
1053        }
1054
1055        /* Check to see if the mapping worked out */
1056        data = ioread32(ca91cx42_bridge->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1057        if (data != PCI_VENDOR_ID_TUNDRA) {
1058                dev_err(&pdev->dev, "PCI_ID check failed\n");
1059                retval = -EIO;
1060                goto err_test;
1061        }
1062
1063        /* Initialize wait queues & mutual exclusion flags */
1064        /* XXX These need to be moved to the vme_bridge structure */
1065        init_waitqueue_head(&dma_queue);
1066        init_waitqueue_head(&iack_queue);
1067        mutex_init(&(vme_int));
1068        mutex_init(&(vme_irq));
1069        mutex_init(&(vme_rmw));
1070
1071        ca91cx42_bridge->parent = &(pdev->dev);
1072        strcpy(ca91cx42_bridge->name, driver_name);
1073
1074        /* Setup IRQ */
1075        retval = ca91cx42_irq_init(ca91cx42_bridge);
1076        if (retval != 0) {
1077                dev_err(&pdev->dev, "Chip Initialization failed.\n");
1078                goto err_irq;
1079        }
1080
1081        /* Add master windows to list */
1082        INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1083        for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1084                master_image = kmalloc(sizeof(struct vme_master_resource),
1085                        GFP_KERNEL);
1086                if (master_image == NULL) {
1087                        dev_err(&pdev->dev, "Failed to allocate memory for "
1088                        "master resource structure\n");
1089                        retval = -ENOMEM;
1090                        goto err_master;
1091                }
1092                master_image->parent = ca91cx42_bridge;
1093                spin_lock_init(&(master_image->lock));
1094                master_image->locked = 0;
1095                master_image->number = i;
1096                master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1097                        VME_CRCSR | VME_USER1 | VME_USER2;
1098                master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1099                        VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1100                master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1101                memset(&(master_image->pci_resource), 0,
1102                        sizeof(struct resource));
1103                master_image->kern_base  = NULL;
1104                list_add_tail(&(master_image->list),
1105                        &(ca91cx42_bridge->master_resources));
1106        }
1107
1108        /* Add slave windows to list */
1109        INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1110        for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1111                slave_image = kmalloc(sizeof(struct vme_slave_resource),
1112                        GFP_KERNEL);
1113                if (slave_image == NULL) {
1114                        dev_err(&pdev->dev, "Failed to allocate memory for "
1115                        "slave resource structure\n");
1116                        retval = -ENOMEM;
1117                        goto err_slave;
1118                }
1119                slave_image->parent = ca91cx42_bridge;
1120                mutex_init(&(slave_image->mtx));
1121                slave_image->locked = 0;
1122                slave_image->number = i;
1123                slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1124                        VME_USER2;
1125
1126                /* Only windows 0 and 4 support A16 */
1127                if (i == 0 || i == 4)
1128                        slave_image->address_attr |= VME_A16;
1129
1130                slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1131                        VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1132                list_add_tail(&(slave_image->list),
1133                        &(ca91cx42_bridge->slave_resources));
1134        }
1135#if 0
1136        /* Add dma engines to list */
1137        INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1138        for (i = 0; i < CA91C142_MAX_DMA; i++) {
1139                dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1140                        GFP_KERNEL);
1141                if (dma_ctrlr == NULL) {
1142                        dev_err(&pdev->dev, "Failed to allocate memory for "
1143                        "dma resource structure\n");
1144                        retval = -ENOMEM;
1145                        goto err_dma;
1146                }
1147                dma_ctrlr->parent = ca91cx42_bridge;
1148                mutex_init(&(dma_ctrlr->mtx));
1149                dma_ctrlr->locked = 0;
1150                dma_ctrlr->number = i;
1151                INIT_LIST_HEAD(&(dma_ctrlr->pending));
1152                INIT_LIST_HEAD(&(dma_ctrlr->running));
1153                list_add_tail(&(dma_ctrlr->list),
1154                        &(ca91cx42_bridge->dma_resources));
1155        }
1156#endif
1157        /* Add location monitor to list */
1158        INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1159        lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1160        if (lm == NULL) {
1161                dev_err(&pdev->dev, "Failed to allocate memory for "
1162                "location monitor resource structure\n");
1163                retval = -ENOMEM;
1164                goto err_lm;
1165        }
1166        lm->parent = ca91cx42_bridge;
1167        mutex_init(&(lm->mtx));
1168        lm->locked = 0;
1169        lm->number = 1;
1170        lm->monitors = 4;
1171        list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1172
1173        ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1174        ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1175        ca91cx42_bridge->master_get = ca91cx42_master_get;
1176        ca91cx42_bridge->master_set = ca91cx42_master_set;
1177        ca91cx42_bridge->master_read = ca91cx42_master_read;
1178        ca91cx42_bridge->master_write = ca91cx42_master_write;
1179#if 0
1180        ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1181        ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1182        ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1183        ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1184#endif
1185        ca91cx42_bridge->request_irq = ca91cx42_request_irq;
1186        ca91cx42_bridge->free_irq = ca91cx42_free_irq;
1187        ca91cx42_bridge->generate_irq = ca91cx42_generate_irq;
1188#if 0
1189        ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1190        ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1191        ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1192        ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1193#endif
1194        ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1195
1196        data = ioread32(ca91cx42_bridge->base + MISC_CTL);
1197        dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1198                (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1199        dev_info(&pdev->dev, "Slot ID is %d\n", ca91cx42_slot_get());
1200
1201        if (ca91cx42_crcsr_init(pdev)) {
1202                dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1203                retval = -EINVAL;
1204#if 0
1205                goto err_crcsr;
1206#endif
1207        }
1208
1209        /* Need to save ca91cx42_bridge pointer locally in link list for use in
1210         * ca91cx42_remove()
1211         */
1212        retval = vme_register_bridge(ca91cx42_bridge);
1213        if (retval != 0) {
1214                dev_err(&pdev->dev, "Chip Registration failed.\n");
1215                goto err_reg;
1216        }
1217
1218        return 0;
1219
1220        vme_unregister_bridge(ca91cx42_bridge);
1221err_reg:
1222        ca91cx42_crcsr_exit(pdev);
1223err_crcsr:
1224err_lm:
1225        /* resources are stored in link list */
1226        list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1227                lm = list_entry(pos, struct vme_lm_resource, list);
1228                list_del(pos);
1229                kfree(lm);
1230        }
1231#if 0
1232err_dma:
1233        /* resources are stored in link list */
1234        list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1235                dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1236                list_del(pos);
1237                kfree(dma_ctrlr);
1238        }
1239#endif
1240err_slave:
1241        /* resources are stored in link list */
1242        list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1243                slave_image = list_entry(pos, struct vme_slave_resource, list);
1244                list_del(pos);
1245                kfree(slave_image);
1246        }
1247err_master:
1248        /* resources are stored in link list */
1249        list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1250                master_image = list_entry(pos, struct vme_master_resource,
1251                        list);
1252                list_del(pos);
1253                kfree(master_image);
1254        }
1255
1256        ca91cx42_irq_exit(pdev);
1257err_irq:
1258err_test:
1259        iounmap(ca91cx42_bridge->base);
1260err_remap:
1261        pci_release_regions(pdev);
1262err_resource:
1263        pci_disable_device(pdev);
1264err_enable:
1265        kfree(ca91cx42_bridge);
1266err_struct:
1267        return retval;
1268
1269}
1270
1271void ca91cx42_remove(struct pci_dev *pdev)
1272{
1273        struct list_head *pos = NULL;
1274        struct vme_master_resource *master_image;
1275        struct vme_slave_resource *slave_image;
1276        struct vme_dma_resource *dma_ctrlr;
1277        struct vme_lm_resource *lm;
1278        int i;
1279
1280        /* Turn off Ints */
1281        iowrite32(0, ca91cx42_bridge->base + LINT_EN);
1282
1283        /* Turn off the windows */
1284        iowrite32(0x00800000, ca91cx42_bridge->base + LSI0_CTL);
1285        iowrite32(0x00800000, ca91cx42_bridge->base + LSI1_CTL);
1286        iowrite32(0x00800000, ca91cx42_bridge->base + LSI2_CTL);
1287        iowrite32(0x00800000, ca91cx42_bridge->base + LSI3_CTL);
1288        iowrite32(0x00800000, ca91cx42_bridge->base + LSI4_CTL);
1289        iowrite32(0x00800000, ca91cx42_bridge->base + LSI5_CTL);
1290        iowrite32(0x00800000, ca91cx42_bridge->base + LSI6_CTL);
1291        iowrite32(0x00800000, ca91cx42_bridge->base + LSI7_CTL);
1292        iowrite32(0x00F00000, ca91cx42_bridge->base + VSI0_CTL);
1293        iowrite32(0x00F00000, ca91cx42_bridge->base + VSI1_CTL);
1294        iowrite32(0x00F00000, ca91cx42_bridge->base + VSI2_CTL);
1295        iowrite32(0x00F00000, ca91cx42_bridge->base + VSI3_CTL);
1296        iowrite32(0x00F00000, ca91cx42_bridge->base + VSI4_CTL);
1297        iowrite32(0x00F00000, ca91cx42_bridge->base + VSI5_CTL);
1298        iowrite32(0x00F00000, ca91cx42_bridge->base + VSI6_CTL);
1299        iowrite32(0x00F00000, ca91cx42_bridge->base + VSI7_CTL);
1300
1301        vme_unregister_bridge(ca91cx42_bridge);
1302#if 0
1303        ca91cx42_crcsr_exit(pdev);
1304#endif
1305        /* resources are stored in link list */
1306        list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1307                lm = list_entry(pos, struct vme_lm_resource, list);
1308                list_del(pos);
1309                kfree(lm);
1310        }
1311
1312        /* resources are stored in link list */
1313        list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1314                dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1315                list_del(pos);
1316                kfree(dma_ctrlr);
1317        }
1318
1319        /* resources are stored in link list */
1320        list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1321                slave_image = list_entry(pos, struct vme_slave_resource, list);
1322                list_del(pos);
1323                kfree(slave_image);
1324        }
1325
1326        /* resources are stored in link list */
1327        list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1328                master_image = list_entry(pos, struct vme_master_resource,
1329                        list);
1330                list_del(pos);
1331                kfree(master_image);
1332        }
1333
1334        ca91cx42_irq_exit(pdev);
1335
1336        iounmap(ca91cx42_bridge->base);
1337
1338        pci_release_regions(pdev);
1339
1340        pci_disable_device(pdev);
1341
1342        kfree(ca91cx42_bridge);
1343}
1344
1345static void __exit ca91cx42_exit(void)
1346{
1347        pci_unregister_driver(&ca91cx42_driver);
1348}
1349
1350MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1351MODULE_LICENSE("GPL");
1352
1353module_init(ca91cx42_init);
1354module_exit(ca91cx42_exit);
1355
1356/*----------------------------------------------------------------------------
1357 * STAGING
1358 *--------------------------------------------------------------------------*/
1359
1360#if 0
1361#define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >>  8) | ((X & 0x0000FF00) <<  8) | ((X & 0x000000FF) << 24))
1362
1363int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1364{
1365        int temp_ctl = 0;
1366        int tempBS = 0;
1367        int tempBD = 0;
1368        int tempTO = 0;
1369        int vmeBS = 0;
1370        int vmeBD = 0;
1371        int *rmw_pci_data_ptr = NULL;
1372        int *vaDataPtr = NULL;
1373        int i;
1374        vmeOutWindowCfg_t vmeOut;
1375        if (vmeRmw->maxAttempts < 1) {
1376                return -EINVAL;
1377        }
1378        if (vmeRmw->targetAddrU) {
1379                return -EINVAL;
1380        }
1381        /* Find the PCI address that maps to the desired VME address */
1382        for (i = 0; i < 8; i++) {
1383                temp_ctl = ioread32(ca91cx42_bridge->base +
1384                        CA91CX42_LSI_CTL[i]);
1385                if ((temp_ctl & 0x80000000) == 0) {
1386                        continue;
1387                }
1388                memset(&vmeOut, 0, sizeof(vmeOut));
1389                vmeOut.windowNbr = i;
1390                ca91cx42_get_out_bound(&vmeOut);
1391                if (vmeOut.addrSpace != vmeRmw->addrSpace) {
1392                        continue;
1393                }
1394                tempBS = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
1395                tempBD = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
1396                tempTO = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
1397                vmeBS = tempBS + tempTO;
1398                vmeBD = tempBD + tempTO;
1399                if ((vmeRmw->targetAddr >= vmeBS) &&
1400                    (vmeRmw->targetAddr < vmeBD)) {
1401                        rmw_pci_data_ptr =
1402                            (int *)(tempBS + (vmeRmw->targetAddr - vmeBS));
1403                        vaDataPtr =
1404                            (int *)(out_image_va[i] +
1405                                    (vmeRmw->targetAddr - vmeBS));
1406                        break;
1407                }
1408        }
1409
1410        /* If no window - fail. */
1411        if (rmw_pci_data_ptr == NULL) {
1412                return -EINVAL;
1413        }
1414        /* Setup the RMW registers. */
1415        iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1416        iowrite32(SWIZZLE(vmeRmw->enableMask), ca91cx42_bridge->base + SCYC_EN);
1417        iowrite32(SWIZZLE(vmeRmw->compareData), ca91cx42_bridge->base +
1418                SCYC_CMP);
1419        iowrite32(SWIZZLE(vmeRmw->swapData), ca91cx42_bridge->base + SCYC_SWP);
1420        iowrite32((int)rmw_pci_data_ptr, ca91cx42_bridge->base + SCYC_ADDR);
1421        iowrite32(1, ca91cx42_bridge->base + SCYC_CTL);
1422
1423        /* Run the RMW cycle until either success or max attempts. */
1424        vmeRmw->numAttempts = 1;
1425        while (vmeRmw->numAttempts <= vmeRmw->maxAttempts) {
1426
1427                if ((ioread32(vaDataPtr) & vmeRmw->enableMask) ==
1428                    (vmeRmw->swapData & vmeRmw->enableMask)) {
1429
1430                        iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1431                        break;
1432
1433                }
1434                vmeRmw->numAttempts++;
1435        }
1436
1437        /* If no success, set num Attempts to be greater than max attempts */
1438        if (vmeRmw->numAttempts > vmeRmw->maxAttempts) {
1439                vmeRmw->numAttempts = vmeRmw->maxAttempts + 1;
1440        }
1441
1442        return 0;
1443}
1444
1445int uniSetupDctlReg(vmeDmaPacket_t * vmeDma, int *dctlregreturn)
1446{
1447        unsigned int dctlreg = 0x80;
1448        struct vmeAttr *vmeAttr;
1449
1450        if (vmeDma->srcBus == VME_DMA_VME) {
1451                dctlreg = 0;
1452                vmeAttr = &vmeDma->srcVmeAttr;
1453        } else {
1454                dctlreg = 0x80000000;
1455                vmeAttr = &vmeDma->dstVmeAttr;
1456        }
1457
1458        switch (vmeAttr->maxDataWidth) {
1459        case VME_D8:
1460                break;
1461        case VME_D16:
1462                dctlreg |= 0x00400000;
1463                break;
1464        case VME_D32:
1465                dctlreg |= 0x00800000;
1466                break;
1467        case VME_D64:
1468                dctlreg |= 0x00C00000;
1469                break;
1470        }
1471
1472        switch (vmeAttr->addrSpace) {
1473        case VME_A16:
1474                break;
1475        case VME_A24:
1476                dctlreg |= 0x00010000;
1477                break;
1478        case VME_A32:
1479                dctlreg |= 0x00020000;
1480                break;
1481        case VME_USER1:
1482                dctlreg |= 0x00060000;
1483                break;
1484        case VME_USER2:
1485                dctlreg |= 0x00070000;
1486                break;
1487
1488        case VME_A64:           /* not supported in Universe DMA */
1489        case VME_CRCSR:
1490        case VME_USER3:
1491        case VME_USER4:
1492                return -EINVAL;
1493                break;
1494        }
1495        if (vmeAttr->userAccessType == VME_PROG) {
1496                dctlreg |= 0x00004000;
1497        }
1498        if (vmeAttr->dataAccessType == VME_SUPER) {
1499                dctlreg |= 0x00001000;
1500        }
1501        if (vmeAttr->xferProtocol != VME_SCT) {
1502                dctlreg |= 0x00000100;
1503        }
1504        *dctlregreturn = dctlreg;
1505        return 0;
1506}
1507
1508unsigned int
1509ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
1510{
1511        unsigned int val;
1512
1513        /* Setup registers as needed for direct or chained. */
1514        if (dgcsreg & 0x8000000) {
1515                iowrite32(0, ca91cx42_bridge->base + DTBC);
1516                iowrite32((unsigned int)vmeLL, ca91cx42_bridge->base + DCPP);
1517        } else {
1518#if     0
1519                printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
1520                printk(KERN_ERR "Starting: DVA  = %08x\n",
1521                        ioread32(&vmeLL->dva));
1522                printk(KERN_ERR "Starting: DLV  = %08x\n",
1523                        ioread32(&vmeLL->dlv));
1524                printk(KERN_ERR "Starting: DTBC = %08x\n",
1525                        ioread32(&vmeLL->dtbc));
1526                printk(KERN_ERR "Starting: DCTL = %08x\n",
1527                        ioread32(&vmeLL->dctl));
1528#endif
1529                /* Write registers */
1530                iowrite32(ioread32(&vmeLL->dva), ca91cx42_bridge->base + DVA);
1531                iowrite32(ioread32(&vmeLL->dlv), ca91cx42_bridge->base + DLA);
1532                iowrite32(ioread32(&vmeLL->dtbc), ca91cx42_bridge->base + DTBC);
1533                iowrite32(ioread32(&vmeLL->dctl), ca91cx42_bridge->base + DCTL);
1534                iowrite32(0, ca91cx42_bridge->base + DCPP);
1535        }
1536
1537        /* Start the operation */
1538        iowrite32(dgcsreg, ca91cx42_bridge->base + DGCS);
1539        val = get_tbl();
1540        iowrite32(dgcsreg | 0x8000000F, ca91cx42_bridge->base + DGCS);
1541        return val;
1542}
1543
1544TDMA_Cmd_Packet *ca91cx42_setup_dma(vmeDmaPacket_t * vmeDma)
1545{
1546        vmeDmaPacket_t *vmeCur;
1547        int maxPerPage;
1548        int currentLLcount;
1549        TDMA_Cmd_Packet *startLL;
1550        TDMA_Cmd_Packet *currentLL;
1551        TDMA_Cmd_Packet *nextLL;
1552        unsigned int dctlreg = 0;
1553
1554        maxPerPage = PAGESIZE / sizeof(TDMA_Cmd_Packet) - 1;
1555        startLL = (TDMA_Cmd_Packet *) __get_free_pages(GFP_KERNEL, 0);
1556        if (startLL == 0) {
1557                return startLL;
1558        }
1559        /* First allocate pages for descriptors and create linked list */
1560        vmeCur = vmeDma;
1561        currentLL = startLL;
1562        currentLLcount = 0;
1563        while (vmeCur != 0) {
1564                if (vmeCur->pNextPacket != 0) {
1565                        currentLL->dcpp = (unsigned int)(currentLL + 1);
1566                        currentLLcount++;
1567                        if (currentLLcount >= maxPerPage) {
1568                                currentLL->dcpp =
1569                                    __get_free_pages(GFP_KERNEL, 0);
1570                                currentLLcount = 0;
1571                        }
1572                        currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1573                } else {
1574                        currentLL->dcpp = (unsigned int)0;
1575                }
1576                vmeCur = vmeCur->pNextPacket;
1577        }
1578
1579        /* Next fill in information for each descriptor */
1580        vmeCur = vmeDma;
1581        currentLL = startLL;
1582        while (vmeCur != 0) {
1583                if (vmeCur->srcBus == VME_DMA_VME) {
1584                        iowrite32(vmeCur->srcAddr, &currentLL->dva);
1585                        iowrite32(vmeCur->dstAddr, &currentLL->dlv);
1586                } else {
1587                        iowrite32(vmeCur->srcAddr, &currentLL->dlv);
1588                        iowrite32(vmeCur->dstAddr, &currentLL->dva);
1589                }
1590                uniSetupDctlReg(vmeCur, &dctlreg);
1591                iowrite32(dctlreg, &currentLL->dctl);
1592                iowrite32(vmeCur->byteCount, &currentLL->dtbc);
1593
1594                currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1595                vmeCur = vmeCur->pNextPacket;
1596        }
1597
1598        /* Convert Links to PCI addresses. */
1599        currentLL = startLL;
1600        while (currentLL != 0) {
1601                nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1602                if (nextLL == 0) {
1603                        iowrite32(1, &currentLL->dcpp);
1604                } else {
1605                        iowrite32((unsigned int)virt_to_bus(nextLL),
1606                               &currentLL->dcpp);
1607                }
1608                currentLL = nextLL;
1609        }
1610
1611        /* Return pointer to descriptors list */
1612        return startLL;
1613}
1614
1615int ca91cx42_free_dma(TDMA_Cmd_Packet *startLL)
1616{
1617        TDMA_Cmd_Packet *currentLL;
1618        TDMA_Cmd_Packet *prevLL;
1619        TDMA_Cmd_Packet *nextLL;
1620        unsigned int dcppreg;
1621
1622        /* Convert Links to virtual addresses. */
1623        currentLL = startLL;
1624        while (currentLL != 0) {
1625                dcppreg = ioread32(&currentLL->dcpp);
1626                dcppreg &= ~6;
1627                if (dcppreg & 1) {
1628                        currentLL->dcpp = 0;
1629                } else {
1630                        currentLL->dcpp = (unsigned int)bus_to_virt(dcppreg);
1631                }
1632                currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1633        }
1634
1635        /* Free all pages associated with the descriptors. */
1636        currentLL = startLL;
1637        prevLL = currentLL;
1638        while (currentLL != 0) {
1639                nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1640                if (currentLL + 1 != nextLL) {
1641                        free_pages((int)prevLL, 0);
1642                        prevLL = nextLL;
1643                }
1644                currentLL = nextLL;
1645        }
1646
1647        /* Return pointer to descriptors list */
1648        return 0;
1649}
1650
1651int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
1652{
1653        unsigned int dgcsreg = 0;
1654        unsigned int dctlreg = 0;
1655        int val;
1656        int channel, x;
1657        vmeDmaPacket_t *curDma;
1658        TDMA_Cmd_Packet *dmaLL;
1659
1660        /* Sanity check the VME chain. */
1661        channel = vmeDma->channel_number;
1662        if (channel > 0) {
1663                return -EINVAL;
1664        }
1665        curDma = vmeDma;
1666        while (curDma != 0) {
1667                if (curDma->byteCount == 0) {
1668                        return -EINVAL;
1669                }
1670                if (curDma->byteCount >= 0x1000000) {
1671                        return -EINVAL;
1672                }
1673                if ((curDma->srcAddr & 7) != (curDma->dstAddr & 7)) {
1674                        return -EINVAL;
1675                }
1676                switch (curDma->srcBus) {
1677                case VME_DMA_PCI:
1678                        if (curDma->dstBus != VME_DMA_VME) {
1679                                return -EINVAL;
1680                        }
1681                        break;
1682                case VME_DMA_VME:
1683                        if (curDma->dstBus != VME_DMA_PCI) {
1684                                return -EINVAL;
1685                        }
1686                        break;
1687                default:
1688                        return -EINVAL;
1689                        break;
1690                }
1691                if (uniSetupDctlReg(curDma, &dctlreg) < 0) {
1692                        return -EINVAL;
1693                }
1694
1695                curDma = curDma->pNextPacket;
1696                if (curDma == vmeDma) { /* Endless Loop! */
1697                        return -EINVAL;
1698                }
1699        }
1700
1701        /* calculate control register */
1702        if (vmeDma->pNextPacket != 0) {
1703                dgcsreg = 0x8000000;
1704        } else {
1705                dgcsreg = 0;
1706        }
1707
1708        for (x = 0; x < 8; x++) {       /* vme block size */
1709                if ((256 << x) >= vmeDma->maxVmeBlockSize) {
1710                        break;
1711                }
1712        }
1713        if (x == 8)
1714                x = 7;
1715        dgcsreg |= (x << 20);
1716
1717        if (vmeDma->vmeBackOffTimer) {
1718                for (x = 1; x < 8; x++) {       /* vme timer */
1719                        if ((16 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1720                                break;
1721                        }
1722                }
1723                if (x == 8)
1724                        x = 7;
1725                dgcsreg |= (x << 16);
1726        }
1727        /*` Setup the dma chain */
1728        dmaLL = ca91cx42_setup_dma(vmeDma);
1729
1730        /* Start the DMA */
1731        if (dgcsreg & 0x8000000) {
1732                vmeDma->vmeDmaStartTick =
1733                    ca91cx42_start_dma(channel, dgcsreg,
1734                                  (TDMA_Cmd_Packet *) virt_to_phys(dmaLL));
1735        } else {
1736                vmeDma->vmeDmaStartTick =
1737                    ca91cx42_start_dma(channel, dgcsreg, dmaLL);
1738        }
1739
1740        wait_event_interruptible(dma_queue,
1741                ioread32(ca91cx42_bridge->base + DGCS) & 0x800);
1742
1743        val = ioread32(ca91cx42_bridge->base + DGCS);
1744        iowrite32(val | 0xF00, ca91cx42_bridge->base + DGCS);
1745
1746        vmeDma->vmeDmaStatus = 0;
1747
1748        if (!(val & 0x00000800)) {
1749                vmeDma->vmeDmaStatus = val & 0x700;
1750                printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1751                        " DGCS=%08X\n", val);
1752                val = ioread32(ca91cx42_bridge->base + DCPP);
1753                printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
1754                val = ioread32(ca91cx42_bridge->base + DCTL);
1755                printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
1756                val = ioread32(ca91cx42_bridge->base + DTBC);
1757                printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
1758                val = ioread32(ca91cx42_bridge->base + DLA);
1759                printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
1760                val = ioread32(ca91cx42_bridge->base + DVA);
1761                printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
1762
1763        }
1764        /* Free the dma chain */
1765        ca91cx42_free_dma(dmaLL);
1766
1767        return 0;
1768}
1769
1770int ca91cx42_lm_set(vmeLmCfg_t *vmeLm)
1771{
1772        int temp_ctl = 0;
1773
1774        if (vmeLm->addrU)
1775                return -EINVAL;
1776
1777        switch (vmeLm->addrSpace) {
1778        case VME_A64:
1779        case VME_USER3:
1780        case VME_USER4:
1781                return -EINVAL;
1782        case VME_A16:
1783                temp_ctl |= 0x00000;
1784                break;
1785        case VME_A24:
1786                temp_ctl |= 0x10000;
1787                break;
1788        case VME_A32:
1789                temp_ctl |= 0x20000;
1790                break;
1791        case VME_CRCSR:
1792                temp_ctl |= 0x50000;
1793                break;
1794        case VME_USER1:
1795                temp_ctl |= 0x60000;
1796                break;
1797        case VME_USER2:
1798                temp_ctl |= 0x70000;
1799                break;
1800        }
1801
1802        /* Disable while we are mucking around */
1803        iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1804
1805        iowrite32(vmeLm->addr, ca91cx42_bridge->base + LM_BS);
1806
1807        /* Setup CTL register. */
1808        if (vmeLm->userAccessType & VME_SUPER)
1809                temp_ctl |= 0x00200000;
1810        if (vmeLm->userAccessType & VME_USER)
1811                temp_ctl |= 0x00100000;
1812        if (vmeLm->dataAccessType & VME_PROG)
1813                temp_ctl |= 0x00800000;
1814        if (vmeLm->dataAccessType & VME_DATA)
1815                temp_ctl |= 0x00400000;
1816
1817
1818        /* Write ctl reg and enable */
1819        iowrite32(0x80000000 | temp_ctl, ca91cx42_bridge->base + LM_CTL);
1820        temp_ctl = ioread32(ca91cx42_bridge->base + LM_CTL);
1821
1822        return 0;
1823}
1824
1825int ca91cx42_wait_lm(vmeLmCfg_t *vmeLm)
1826{
1827        unsigned long flags;
1828        unsigned int tmp;
1829
1830        spin_lock_irqsave(&lm_lock, flags);
1831        spin_unlock_irqrestore(&lm_lock, flags);
1832        if (tmp == 0) {
1833                if (vmeLm->lmWait < 10)
1834                        vmeLm->lmWait = 10;
1835                interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait);
1836        }
1837        iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1838
1839        return 0;
1840}
1841
1842
1843
1844int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1845{
1846        int temp_ctl = 0;
1847        int vbto = 0;
1848
1849        temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1850        temp_ctl &= 0x00FFFFFF;
1851
1852        if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
1853                vbto = 7;
1854        } else if (vmeArb->globalTimeoutTimer > 1024) {
1855                return -EINVAL;
1856        } else if (vmeArb->globalTimeoutTimer == 0) {
1857                vbto = 0;
1858        } else {
1859                vbto = 1;
1860                while ((16 * (1 << (vbto - 1))) < vmeArb->globalTimeoutTimer)
1861                        vbto += 1;
1862        }
1863        temp_ctl |= (vbto << 28);
1864
1865        if (vmeArb->arbiterMode == VME_PRIORITY_MODE)
1866                temp_ctl |= 1 << 26;
1867
1868        if (vmeArb->arbiterTimeoutFlag)
1869                temp_ctl |= 2 << 24;
1870
1871        iowrite32(temp_ctl, ca91cx42_bridge->base + MISC_CTL);
1872        return 0;
1873}
1874
1875int ca91cx42_get_arbiter(vmeArbiterCfg_t *vmeArb)
1876{
1877        int temp_ctl = 0;
1878        int vbto = 0;
1879
1880        temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1881
1882        vbto = (temp_ctl >> 28) & 0xF;
1883        if (vbto != 0)
1884                vmeArb->globalTimeoutTimer = (16 * (1 << (vbto - 1)));
1885
1886        if (temp_ctl & (1 << 26))
1887                vmeArb->arbiterMode = VME_PRIORITY_MODE;
1888        else
1889                vmeArb->arbiterMode = VME_R_ROBIN_MODE;
1890
1891        if (temp_ctl & (3 << 24))
1892                vmeArb->arbiterTimeoutFlag = 1;
1893
1894        return 0;
1895}
1896
1897int ca91cx42_set_requestor(vmeRequesterCfg_t *vmeReq)
1898{
1899        int temp_ctl = 0;
1900
1901        temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1902        temp_ctl &= 0xFF0FFFFF;
1903
1904        if (vmeReq->releaseMode == 1)
1905                temp_ctl |= (1 << 20);
1906
1907        if (vmeReq->fairMode == 1)
1908                temp_ctl |= (1 << 21);
1909
1910        temp_ctl |= (vmeReq->requestLevel << 22);
1911
1912        iowrite32(temp_ctl, ca91cx42_bridge->base + MAST_CTL);
1913        return 0;
1914}
1915
1916int ca91cx42_get_requestor(vmeRequesterCfg_t *vmeReq)
1917{
1918        int temp_ctl = 0;
1919
1920        temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1921
1922        if (temp_ctl & (1 << 20))
1923                vmeReq->releaseMode = 1;
1924
1925        if (temp_ctl & (1 << 21))
1926                vmeReq->fairMode = 1;
1927
1928        vmeReq->requestLevel = (temp_ctl & 0xC00000) >> 22;
1929
1930        return 0;
1931}
1932
1933
1934#endif
1935