linux/drivers/staging/vme/bridges/vme_tsi148.c
<<
>>
Prefs
   1/*
   2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
   3 *
   4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
   5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
   6 *
   7 * Based on work by Tom Armistead and Ajit Prem
   8 * Copyright 2004 Motorola Inc.
   9 *
  10 * This program is free software; you can redistribute  it and/or modify it
  11 * under  the terms of  the GNU General  Public License as published by the
  12 * Free Software Foundation;  either version 2 of the  License, or (at your
  13 * option) any later version.
  14 */
  15
  16#include <linux/version.h>
  17#include <linux/module.h>
  18#include <linux/moduleparam.h>
  19#include <linux/mm.h>
  20#include <linux/types.h>
  21#include <linux/errno.h>
  22#include <linux/proc_fs.h>
  23#include <linux/pci.h>
  24#include <linux/poll.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/interrupt.h>
  27#include <linux/spinlock.h>
  28#include <linux/sched.h>
  29#include <asm/time.h>
  30#include <asm/io.h>
  31#include <asm/uaccess.h>
  32
  33#include "../vme.h"
  34#include "../vme_bridge.h"
  35#include "vme_tsi148.h"
  36
  37static int __init tsi148_init(void);
  38static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
  39static void tsi148_remove(struct pci_dev *);
  40static void __exit tsi148_exit(void);
  41
  42
  43int tsi148_slave_set(struct vme_slave_resource *, int, unsigned long long,
  44        unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
  45int tsi148_slave_get(struct vme_slave_resource *, int *, unsigned long long *,
  46        unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
  47
  48int tsi148_master_get(struct vme_master_resource *, int *, unsigned long long *,
  49        unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
  50int tsi148_master_set(struct vme_master_resource *, int, unsigned long long,
  51        unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
  52ssize_t tsi148_master_read(struct vme_master_resource *, void *, size_t,
  53        loff_t);
  54ssize_t tsi148_master_write(struct vme_master_resource *, void *, size_t,
  55        loff_t);
  56unsigned int tsi148_master_rmw(struct vme_master_resource *, unsigned int,
  57        unsigned int, unsigned int, loff_t);
  58int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
  59        struct vme_dma_attr *, size_t);
  60int tsi148_dma_list_exec(struct vme_dma_list *);
  61int tsi148_dma_list_empty(struct vme_dma_list *);
  62int tsi148_generate_irq(int, int);
  63int tsi148_slot_get(void);
  64
  65/* Modue parameter */
  66int err_chk = 0;
  67
  68/* XXX These should all be in a per device structure */
  69struct vme_bridge *tsi148_bridge;
  70wait_queue_head_t dma_queue[2];
  71wait_queue_head_t iack_queue;
  72void (*lm_callback[4])(int);    /* Called in interrupt handler, be careful! */
  73void *crcsr_kernel;
  74dma_addr_t crcsr_bus;
  75struct vme_master_resource *flush_image;
  76struct mutex vme_rmw;   /* Only one RMW cycle at a time */
  77struct mutex vme_int;   /*
  78                                 * Only one VME interrupt can be
  79                                 * generated at a time, provide locking
  80                                 */
  81struct mutex vme_irq;   /* Locking for VME irq callback configuration */
  82
  83
  84static char driver_name[] = "vme_tsi148";
  85
  86static struct pci_device_id tsi148_ids[] = {
  87        { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
  88        { },
  89};
  90
  91static struct pci_driver tsi148_driver = {
  92        .name = driver_name,
  93        .id_table = tsi148_ids,
  94        .probe = tsi148_probe,
  95        .remove = tsi148_remove,
  96};
  97
  98static void reg_join(unsigned int high, unsigned int low,
  99        unsigned long long *variable)
 100{
 101        *variable = (unsigned long long)high << 32;
 102        *variable |= (unsigned long long)low;
 103}
 104
 105static void reg_split(unsigned long long variable, unsigned int *high,
 106        unsigned int *low)
 107{
 108        *low = (unsigned int)variable & 0xFFFFFFFF;
 109        *high = (unsigned int)(variable >> 32);
 110}
 111
 112/*
 113 * Wakes up DMA queue.
 114 */
 115static u32 tsi148_DMA_irqhandler(int channel_mask)
 116{
 117        u32 serviced = 0;
 118
 119        if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
 120                wake_up(&dma_queue[0]);
 121                serviced |= TSI148_LCSR_INTC_DMA0C;
 122        }
 123        if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
 124                wake_up(&dma_queue[1]);
 125                serviced |= TSI148_LCSR_INTC_DMA1C;
 126        }
 127
 128        return serviced;
 129}
 130
 131/*
 132 * Wake up location monitor queue
 133 */
 134static u32 tsi148_LM_irqhandler(u32 stat)
 135{
 136        int i;
 137        u32 serviced = 0;
 138
 139        for (i = 0; i < 4; i++) {
 140                if(stat & TSI148_LCSR_INTS_LMS[i]) {
 141                        /* We only enable interrupts if the callback is set */
 142                        lm_callback[i](i);
 143                        serviced |= TSI148_LCSR_INTC_LMC[i];
 144                }
 145        }
 146
 147        return serviced;
 148}
 149
 150/*
 151 * Wake up mail box queue.
 152 *
 153 * XXX This functionality is not exposed up though API.
 154 */
 155static u32 tsi148_MB_irqhandler(u32 stat)
 156{
 157        int i;
 158        u32 val;
 159        u32 serviced = 0;
 160
 161        for (i = 0; i < 4; i++) {
 162                if(stat & TSI148_LCSR_INTS_MBS[i]) {
 163                        val = ioread32be(tsi148_bridge->base +
 164                                TSI148_GCSR_MBOX[i]);
 165                        printk("VME Mailbox %d received: 0x%x\n", i, val);
 166                        serviced |= TSI148_LCSR_INTC_MBC[i];
 167                }
 168        }
 169
 170        return serviced;
 171}
 172
 173/*
 174 * Display error & status message when PERR (PCI) exception interrupt occurs.
 175 */
 176static u32 tsi148_PERR_irqhandler(void)
 177{
 178        printk(KERN_ERR
 179                "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
 180                ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAU),
 181                ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAL),
 182                ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAT)
 183                );
 184        printk(KERN_ERR
 185                "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
 186                ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXA),
 187                ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXS)
 188                );
 189
 190        iowrite32be(TSI148_LCSR_EDPAT_EDPCL,
 191                tsi148_bridge->base + TSI148_LCSR_EDPAT);
 192
 193        return TSI148_LCSR_INTC_PERRC;
 194}
 195
 196/*
 197 * Save address and status when VME error interrupt occurs.
 198 */
 199static u32 tsi148_VERR_irqhandler(void)
 200{
 201        unsigned int error_addr_high, error_addr_low;
 202        unsigned long long error_addr;
 203        u32 error_attrib;
 204        struct vme_bus_error *error;
 205
 206        error_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAU);
 207        error_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAL);
 208        error_attrib = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAT);
 209
 210        reg_join(error_addr_high, error_addr_low, &error_addr);
 211
 212        /* Check for exception register overflow (we have lost error data) */
 213        if(error_attrib & TSI148_LCSR_VEAT_VEOF) {
 214                printk(KERN_ERR "VME Bus Exception Overflow Occurred\n");
 215        }
 216
 217        error = (struct vme_bus_error *)kmalloc(sizeof (struct vme_bus_error),
 218                GFP_ATOMIC);
 219        if (error) {
 220                error->address = error_addr;
 221                error->attributes = error_attrib;
 222                list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
 223        } else {
 224                printk(KERN_ERR
 225                        "Unable to alloc memory for VMEbus Error reporting\n");
 226                printk(KERN_ERR
 227                        "VME Bus Error at address: 0x%llx, attributes: %08x\n",
 228                        error_addr, error_attrib);
 229        }
 230
 231        /* Clear Status */
 232        iowrite32be(TSI148_LCSR_VEAT_VESCL,
 233                tsi148_bridge->base + TSI148_LCSR_VEAT);
 234
 235        return TSI148_LCSR_INTC_VERRC;
 236}
 237
 238/*
 239 * Wake up IACK queue.
 240 */
 241static u32 tsi148_IACK_irqhandler(void)
 242{
 243        printk("tsi148_IACK_irqhandler\n");
 244        wake_up(&iack_queue);
 245
 246        return TSI148_LCSR_INTC_IACKC;
 247}
 248
 249/*
 250 * Calling VME bus interrupt callback if provided.
 251 */
 252static u32 tsi148_VIRQ_irqhandler(u32 stat)
 253{
 254        int vec, i, serviced = 0;
 255        void (*call)(int, int, void *);
 256        void *priv_data;
 257
 258        for (i = 7; i > 0; i--) {
 259                if (stat & (1 << i)) {
 260                        /*
 261                         *      Note:   Even though the registers are defined
 262                         *      as 32-bits in the spec, we only want to issue
 263                         *      8-bit IACK cycles on the bus, read from offset
 264                         *      3.
 265                         */
 266                        vec = ioread8(tsi148_bridge->base +
 267                                TSI148_LCSR_VIACK[i] + 3);
 268
 269                        call = tsi148_bridge->irq[i - 1].callback[vec].func;
 270                        priv_data =
 271                                tsi148_bridge->irq[i-1].callback[vec].priv_data;
 272
 273                        if (call != NULL)
 274                                call(i, vec, priv_data);
 275                        else
 276                                printk("Spurilous VME interrupt, level:%x, "
 277                                        "vector:%x\n", i, vec);
 278
 279                        serviced |= (1 << i);
 280                }
 281        }
 282
 283        return serviced;
 284}
 285
 286/*
 287 * Top level interrupt handler.  Clears appropriate interrupt status bits and
 288 * then calls appropriate sub handler(s).
 289 */
 290static irqreturn_t tsi148_irqhandler(int irq, void *dev_id)
 291{
 292        u32 stat, enable, serviced = 0;
 293
 294        /* Determine which interrupts are unmasked and set */
 295        enable = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
 296        stat = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTS);
 297
 298        /* Only look at unmasked interrupts */
 299        stat &= enable;
 300
 301        if (unlikely(!stat)) {
 302                return IRQ_NONE;
 303        }
 304
 305        /* Call subhandlers as appropriate */
 306        /* DMA irqs */
 307        if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
 308                serviced |= tsi148_DMA_irqhandler(stat);
 309
 310        /* Location monitor irqs */
 311        if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
 312                        TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
 313                serviced |= tsi148_LM_irqhandler(stat);
 314
 315        /* Mail box irqs */
 316        if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
 317                        TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
 318                serviced |= tsi148_MB_irqhandler(stat);
 319
 320        /* PCI bus error */
 321        if (stat & TSI148_LCSR_INTS_PERRS)
 322                serviced |= tsi148_PERR_irqhandler();
 323
 324        /* VME bus error */
 325        if (stat & TSI148_LCSR_INTS_VERRS)
 326                serviced |= tsi148_VERR_irqhandler();
 327
 328        /* IACK irq */
 329        if (stat & TSI148_LCSR_INTS_IACKS)
 330                serviced |= tsi148_IACK_irqhandler();
 331
 332        /* VME bus irqs */
 333        if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
 334                        TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
 335                        TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
 336                        TSI148_LCSR_INTS_IRQ1S))
 337                serviced |= tsi148_VIRQ_irqhandler(stat);
 338
 339        /* Clear serviced interrupts */
 340        iowrite32be(serviced, tsi148_bridge->base + TSI148_LCSR_INTC);
 341
 342        return IRQ_HANDLED;
 343}
 344
 345static int tsi148_irq_init(struct vme_bridge *bridge)
 346{
 347        int result;
 348        unsigned int tmp;
 349        struct pci_dev *pdev;
 350
 351        /* Need pdev */
 352        pdev = container_of(bridge->parent, struct pci_dev, dev);
 353
 354        /* Initialise list for VME bus errors */
 355        INIT_LIST_HEAD(&(bridge->vme_errors));
 356
 357        result = request_irq(pdev->irq,
 358                             tsi148_irqhandler,
 359                             IRQF_SHARED,
 360                             driver_name, pdev);
 361        if (result) {
 362                dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
 363                        pdev->irq);
 364                return result;
 365        }
 366
 367        /* Enable and unmask interrupts */
 368        tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
 369                TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
 370                TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
 371                TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
 372                TSI148_LCSR_INTEO_IACKEO;
 373
 374        /* XXX This leaves the following interrupts masked.
 375         * TSI148_LCSR_INTEO_VIEEO
 376         * TSI148_LCSR_INTEO_SYSFLEO
 377         * TSI148_LCSR_INTEO_ACFLEO
 378         */
 379
 380        /* Don't enable Location Monitor interrupts here - they will be
 381         * enabled when the location monitors are properly configured and
 382         * a callback has been attached.
 383         * TSI148_LCSR_INTEO_LM0EO
 384         * TSI148_LCSR_INTEO_LM1EO
 385         * TSI148_LCSR_INTEO_LM2EO
 386         * TSI148_LCSR_INTEO_LM3EO
 387         */
 388
 389        /* Don't enable VME interrupts until we add a handler, else the board
 390         * will respond to it and we don't want that unless it knows how to
 391         * properly deal with it.
 392         * TSI148_LCSR_INTEO_IRQ7EO
 393         * TSI148_LCSR_INTEO_IRQ6EO
 394         * TSI148_LCSR_INTEO_IRQ5EO
 395         * TSI148_LCSR_INTEO_IRQ4EO
 396         * TSI148_LCSR_INTEO_IRQ3EO
 397         * TSI148_LCSR_INTEO_IRQ2EO
 398         * TSI148_LCSR_INTEO_IRQ1EO
 399         */
 400
 401        iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
 402        iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
 403
 404        return 0;
 405}
 406
 407static void tsi148_irq_exit(struct pci_dev *pdev)
 408{
 409        /* Turn off interrupts */
 410        iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEO);
 411        iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEN);
 412
 413        /* Clear all interrupts */
 414        iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTC);
 415
 416        /* Detach interrupt handler */
 417        free_irq(pdev->irq, pdev);
 418}
 419
 420/*
 421 * Check to see if an IACk has been received, return true (1) or false (0).
 422 */
 423int tsi148_iack_received(void)
 424{
 425        u32 tmp;
 426
 427        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
 428
 429        if (tmp & TSI148_LCSR_VICR_IRQS)
 430                return 0;
 431        else
 432                return 1;
 433}
 434
 435/*
 436 * Set up an VME interrupt
 437 */
 438int tsi148_request_irq(int level, int statid,
 439        void (*callback)(int level, int vector, void *priv_data),
 440        void *priv_data)
 441{
 442        u32 tmp;
 443
 444        mutex_lock(&(vme_irq));
 445
 446        if(tsi148_bridge->irq[level - 1].callback[statid].func) {
 447                mutex_unlock(&(vme_irq));
 448                printk("VME Interrupt already taken\n");
 449                return -EBUSY;
 450        }
 451
 452
 453        tsi148_bridge->irq[level - 1].count++;
 454        tsi148_bridge->irq[level - 1].callback[statid].priv_data = priv_data;
 455        tsi148_bridge->irq[level - 1].callback[statid].func = callback;
 456
 457        /* Enable IRQ level */
 458        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
 459        tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
 460        iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
 461
 462        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
 463        tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
 464        iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
 465
 466        mutex_unlock(&(vme_irq));
 467
 468        return 0;
 469}
 470
 471/*
 472 * Free VME interrupt
 473 */
 474void tsi148_free_irq(int level, int statid)
 475{
 476        u32 tmp;
 477        struct pci_dev *pdev;
 478
 479        mutex_lock(&(vme_irq));
 480
 481        tsi148_bridge->irq[level - 1].count--;
 482
 483        /* Disable IRQ level if no more interrupts attached at this level*/
 484        if (tsi148_bridge->irq[level - 1].count == 0) {
 485                tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
 486                tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
 487                iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
 488
 489                tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
 490                tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
 491                iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
 492
 493                pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
 494
 495                synchronize_irq(pdev->irq);
 496        }
 497
 498        tsi148_bridge->irq[level - 1].callback[statid].func = NULL;
 499        tsi148_bridge->irq[level - 1].callback[statid].priv_data = NULL;
 500
 501        mutex_unlock(&(vme_irq));
 502}
 503
 504/*
 505 * Generate a VME bus interrupt at the requested level & vector. Wait for
 506 * interrupt to be acked.
 507 */
 508int tsi148_generate_irq(int level, int statid)
 509{
 510        u32 tmp;
 511
 512        mutex_lock(&(vme_int));
 513
 514        /* Read VICR register */
 515        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
 516
 517        /* Set Status/ID */
 518        tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
 519                (statid & TSI148_LCSR_VICR_STID_M);
 520        iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
 521
 522        /* Assert VMEbus IRQ */
 523        tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
 524        iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
 525
 526        /* XXX Consider implementing a timeout? */
 527        wait_event_interruptible(iack_queue, tsi148_iack_received());
 528
 529        mutex_unlock(&(vme_int));
 530
 531        return 0;
 532}
 533
 534/*
 535 * Find the first error in this address range
 536 */
 537static struct vme_bus_error *tsi148_find_error(vme_address_t aspace,
 538        unsigned long long address, size_t count)
 539{
 540        struct list_head *err_pos;
 541        struct vme_bus_error *vme_err, *valid = NULL;
 542        unsigned long long bound;
 543
 544        bound = address + count;
 545
 546        /*
 547         * XXX We are currently not looking at the address space when parsing
 548         *     for errors. This is because parsing the Address Modifier Codes
 549         *     is going to be quite resource intensive to do properly. We
 550         *     should be OK just looking at the addresses and this is certainly
 551         *     much better than what we had before.
 552         */
 553        err_pos = NULL;
 554        /* Iterate through errors */
 555        list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
 556                vme_err = list_entry(err_pos, struct vme_bus_error, list);
 557                if((vme_err->address >= address) && (vme_err->address < bound)){
 558                        valid = vme_err;
 559                        break;
 560                }
 561        }
 562
 563        return valid;
 564}
 565
 566/*
 567 * Clear errors in the provided address range.
 568 */
 569static void tsi148_clear_errors(vme_address_t aspace,
 570        unsigned long long address, size_t count)
 571{
 572        struct list_head *err_pos, *temp;
 573        struct vme_bus_error *vme_err;
 574        unsigned long long bound;
 575
 576        bound = address + count;
 577
 578        /*
 579         * XXX We are currently not looking at the address space when parsing
 580         *     for errors. This is because parsing the Address Modifier Codes
 581         *     is going to be quite resource intensive to do properly. We
 582         *     should be OK just looking at the addresses and this is certainly
 583         *     much better than what we had before.
 584         */
 585        err_pos = NULL;
 586        /* Iterate through errors */
 587        list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
 588                vme_err = list_entry(err_pos, struct vme_bus_error, list);
 589
 590                if((vme_err->address >= address) && (vme_err->address < bound)){
 591                        list_del(err_pos);
 592                        kfree(vme_err);
 593                }
 594        }
 595}
 596
 597/*
 598 * Initialize a slave window with the requested attributes.
 599 */
 600int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
 601        unsigned long long vme_base, unsigned long long size,
 602        dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
 603{
 604        unsigned int i, addr = 0, granularity = 0;
 605        unsigned int temp_ctl = 0;
 606        unsigned int vme_base_low, vme_base_high;
 607        unsigned int vme_bound_low, vme_bound_high;
 608        unsigned int pci_offset_low, pci_offset_high;
 609        unsigned long long vme_bound, pci_offset;
 610
 611#if 0
 612        printk("Set slave image %d to:\n", image->number);
 613        printk("\tEnabled: %s\n", (enabled == 1)? "yes" : "no");
 614        printk("\tVME Base:0x%llx\n", vme_base);
 615        printk("\tWindow Size:0x%llx\n", size);
 616        printk("\tPCI Base:0x%lx\n", (unsigned long)pci_base);
 617        printk("\tAddress Space:0x%x\n", aspace);
 618        printk("\tTransfer Cycle Properties:0x%x\n", cycle);
 619#endif
 620
 621        i = image->number;
 622
 623        switch (aspace) {
 624        case VME_A16:
 625                granularity = 0x10;
 626                addr |= TSI148_LCSR_ITAT_AS_A16;
 627                break;
 628        case VME_A24:
 629                granularity = 0x1000;
 630                addr |= TSI148_LCSR_ITAT_AS_A24;
 631                break;
 632        case VME_A32:
 633                granularity = 0x10000;
 634                addr |= TSI148_LCSR_ITAT_AS_A32;
 635                break;
 636        case VME_A64:
 637                granularity = 0x10000;
 638                addr |= TSI148_LCSR_ITAT_AS_A64;
 639                break;
 640        case VME_CRCSR:
 641        case VME_USER1:
 642        case VME_USER2:
 643        case VME_USER3:
 644        case VME_USER4:
 645        default:
 646                printk("Invalid address space\n");
 647                return -EINVAL;
 648                break;
 649        }
 650
 651        /* Convert 64-bit variables to 2x 32-bit variables */
 652        reg_split(vme_base, &vme_base_high, &vme_base_low);
 653
 654        /*
 655         * Bound address is a valid address for the window, adjust
 656         * accordingly
 657         */
 658        vme_bound = vme_base + size - granularity;
 659        reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
 660        pci_offset = (unsigned long long)pci_base - vme_base;
 661        reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
 662
 663        if (vme_base_low & (granularity - 1)) {
 664                printk("Invalid VME base alignment\n");
 665                return -EINVAL;
 666        }
 667        if (vme_bound_low & (granularity - 1)) {
 668                printk("Invalid VME bound alignment\n");
 669                return -EINVAL;
 670        }
 671        if (pci_offset_low & (granularity - 1)) {
 672                printk("Invalid PCI Offset alignment\n");
 673                return -EINVAL;
 674        }
 675
 676#if 0
 677        printk("\tVME Bound:0x%llx\n", vme_bound);
 678        printk("\tPCI Offset:0x%llx\n", pci_offset);
 679#endif
 680
 681        /*  Disable while we are mucking around */
 682        temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
 683                TSI148_LCSR_OFFSET_ITAT);
 684        temp_ctl &= ~TSI148_LCSR_ITAT_EN;
 685        iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
 686                TSI148_LCSR_OFFSET_ITAT);
 687
 688        /* Setup mapping */
 689        iowrite32be(vme_base_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
 690                TSI148_LCSR_OFFSET_ITSAU);
 691        iowrite32be(vme_base_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
 692                TSI148_LCSR_OFFSET_ITSAL);
 693        iowrite32be(vme_bound_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
 694                TSI148_LCSR_OFFSET_ITEAU);
 695        iowrite32be(vme_bound_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
 696                TSI148_LCSR_OFFSET_ITEAL);
 697        iowrite32be(pci_offset_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
 698                TSI148_LCSR_OFFSET_ITOFU);
 699        iowrite32be(pci_offset_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
 700                TSI148_LCSR_OFFSET_ITOFL);
 701
 702/* XXX Prefetch stuff currently unsupported */
 703#if 0
 704
 705        for (x = 0; x < 4; x++) {
 706                if ((64 << x) >= vmeIn->prefetchSize) {
 707                        break;
 708                }
 709        }
 710        if (x == 4)
 711                x--;
 712        temp_ctl |= (x << 16);
 713
 714        if (vmeIn->prefetchThreshold)
 715                if (vmeIn->prefetchThreshold)
 716                        temp_ctl |= 0x40000;
 717#endif
 718
 719        /* Setup 2eSST speeds */
 720        temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
 721        switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
 722        case VME_2eSST160:
 723                temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
 724                break;
 725        case VME_2eSST267:
 726                temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
 727                break;
 728        case VME_2eSST320:
 729                temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
 730                break;
 731        }
 732
 733        /* Setup cycle types */
 734        temp_ctl &= ~(0x1F << 7);
 735        if (cycle & VME_BLT)
 736                temp_ctl |= TSI148_LCSR_ITAT_BLT;
 737        if (cycle & VME_MBLT)
 738                temp_ctl |= TSI148_LCSR_ITAT_MBLT;
 739        if (cycle & VME_2eVME)
 740                temp_ctl |= TSI148_LCSR_ITAT_2eVME;
 741        if (cycle & VME_2eSST)
 742                temp_ctl |= TSI148_LCSR_ITAT_2eSST;
 743        if (cycle & VME_2eSSTB)
 744                temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
 745
 746        /* Setup address space */
 747        temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
 748        temp_ctl |= addr;
 749
 750        temp_ctl &= ~0xF;
 751        if (cycle & VME_SUPER)
 752                temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
 753        if (cycle & VME_USER)
 754                temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
 755        if (cycle & VME_PROG)
 756                temp_ctl |= TSI148_LCSR_ITAT_PGM;
 757        if (cycle & VME_DATA)
 758                temp_ctl |= TSI148_LCSR_ITAT_DATA;
 759
 760        /* Write ctl reg without enable */
 761        iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
 762                TSI148_LCSR_OFFSET_ITAT);
 763
 764        if (enabled)
 765                temp_ctl |= TSI148_LCSR_ITAT_EN;
 766
 767        iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
 768                TSI148_LCSR_OFFSET_ITAT);
 769
 770        return 0;
 771}
 772
 773/*
 774 * Get slave window configuration.
 775 *
 776 * XXX Prefetch currently unsupported.
 777 */
 778int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
 779        unsigned long long *vme_base, unsigned long long *size,
 780        dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
 781{
 782        unsigned int i, granularity = 0, ctl = 0;
 783        unsigned int vme_base_low, vme_base_high;
 784        unsigned int vme_bound_low, vme_bound_high;
 785        unsigned int pci_offset_low, pci_offset_high;
 786        unsigned long long vme_bound, pci_offset;
 787
 788
 789        i = image->number;
 790
 791        /* Read registers */
 792        ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
 793                TSI148_LCSR_OFFSET_ITAT);
 794
 795        vme_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
 796                TSI148_LCSR_OFFSET_ITSAU);
 797        vme_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
 798                TSI148_LCSR_OFFSET_ITSAL);
 799        vme_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
 800                TSI148_LCSR_OFFSET_ITEAU);
 801        vme_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
 802                TSI148_LCSR_OFFSET_ITEAL);
 803        pci_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
 804                TSI148_LCSR_OFFSET_ITOFU);
 805        pci_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
 806                TSI148_LCSR_OFFSET_ITOFL);
 807
 808        /* Convert 64-bit variables to 2x 32-bit variables */
 809        reg_join(vme_base_high, vme_base_low, vme_base);
 810        reg_join(vme_bound_high, vme_bound_low, &vme_bound);
 811        reg_join(pci_offset_high, pci_offset_low, &pci_offset);
 812
 813        *pci_base = (dma_addr_t)vme_base + pci_offset;
 814
 815        *enabled = 0;
 816        *aspace = 0;
 817        *cycle = 0;
 818
 819        if (ctl & TSI148_LCSR_ITAT_EN)
 820                *enabled = 1;
 821
 822        if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
 823                granularity = 0x10;
 824                *aspace |= VME_A16;
 825        }
 826        if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
 827                granularity = 0x1000;
 828                *aspace |= VME_A24;
 829        }
 830        if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
 831                granularity = 0x10000;
 832                *aspace |= VME_A32;
 833        }
 834        if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
 835                granularity = 0x10000;
 836                *aspace |= VME_A64;
 837        }
 838
 839        /* Need granularity before we set the size */
 840        *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
 841
 842
 843        if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
 844                *cycle |= VME_2eSST160;
 845        if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
 846                *cycle |= VME_2eSST267;
 847        if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
 848                *cycle |= VME_2eSST320;
 849
 850        if (ctl & TSI148_LCSR_ITAT_BLT)
 851                *cycle |= VME_BLT;
 852        if (ctl & TSI148_LCSR_ITAT_MBLT)
 853                *cycle |= VME_MBLT;
 854        if (ctl & TSI148_LCSR_ITAT_2eVME)
 855                *cycle |= VME_2eVME;
 856        if (ctl & TSI148_LCSR_ITAT_2eSST)
 857                *cycle |= VME_2eSST;
 858        if (ctl & TSI148_LCSR_ITAT_2eSSTB)
 859                *cycle |= VME_2eSSTB;
 860
 861        if (ctl & TSI148_LCSR_ITAT_SUPR)
 862                *cycle |= VME_SUPER;
 863        if (ctl & TSI148_LCSR_ITAT_NPRIV)
 864                *cycle |= VME_USER;
 865        if (ctl & TSI148_LCSR_ITAT_PGM)
 866                *cycle |= VME_PROG;
 867        if (ctl & TSI148_LCSR_ITAT_DATA)
 868                *cycle |= VME_DATA;
 869
 870        return 0;
 871}
 872
 873/*
 874 * Allocate and map PCI Resource
 875 */
 876static int tsi148_alloc_resource(struct vme_master_resource *image,
 877        unsigned long long size)
 878{
 879        unsigned long long existing_size;
 880        int retval = 0;
 881        struct pci_dev *pdev;
 882
 883        /* Find pci_dev container of dev */
 884        if (tsi148_bridge->parent == NULL) {
 885                printk("Dev entry NULL\n");
 886                return -EINVAL;
 887        }
 888        pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
 889
 890        existing_size = (unsigned long long)(image->pci_resource.end -
 891                image->pci_resource.start);
 892
 893        /* If the existing size is OK, return */
 894        if (existing_size == (size - 1))
 895                return 0;
 896
 897        if (existing_size != 0) {
 898                iounmap(image->kern_base);
 899                image->kern_base = NULL;
 900                if (image->pci_resource.name != NULL)
 901                        kfree(image->pci_resource.name);
 902                release_resource(&(image->pci_resource));
 903                memset(&(image->pci_resource), 0, sizeof(struct resource));
 904        }
 905
 906        if (image->pci_resource.name == NULL) {
 907                image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
 908                if (image->pci_resource.name == NULL) {
 909                        printk(KERN_ERR "Unable to allocate memory for resource"
 910                                " name\n");
 911                        retval = -ENOMEM;
 912                        goto err_name;
 913                }
 914        }
 915
 916        sprintf((char *)image->pci_resource.name, "%s.%d", tsi148_bridge->name,
 917                image->number);
 918
 919        image->pci_resource.start = 0;
 920        image->pci_resource.end = (unsigned long)size;
 921        image->pci_resource.flags = IORESOURCE_MEM;
 922
 923        retval = pci_bus_alloc_resource(pdev->bus,
 924                &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
 925                0, NULL, NULL);
 926        if (retval) {
 927                printk(KERN_ERR "Failed to allocate mem resource for "
 928                        "window %d size 0x%lx start 0x%lx\n",
 929                        image->number, (unsigned long)size,
 930                        (unsigned long)image->pci_resource.start);
 931                goto err_resource;
 932        }
 933
 934        image->kern_base = ioremap_nocache(
 935                image->pci_resource.start, size);
 936        if (image->kern_base == NULL) {
 937                printk(KERN_ERR "Failed to remap resource\n");
 938                retval = -ENOMEM;
 939                goto err_remap;
 940        }
 941
 942        return 0;
 943
 944        iounmap(image->kern_base);
 945        image->kern_base = NULL;
 946err_remap:
 947        release_resource(&(image->pci_resource));
 948err_resource:
 949        kfree(image->pci_resource.name);
 950        memset(&(image->pci_resource), 0, sizeof(struct resource));
 951err_name:
 952        return retval;
 953}
 954
 955/*
 956 * Free and unmap PCI Resource
 957 */
 958static void tsi148_free_resource(struct vme_master_resource *image)
 959{
 960        iounmap(image->kern_base);
 961        image->kern_base = NULL;
 962        release_resource(&(image->pci_resource));
 963        kfree(image->pci_resource.name);
 964        memset(&(image->pci_resource), 0, sizeof(struct resource));
 965}
 966
 967/*
 968 * Set the attributes of an outbound window.
 969 */
 970int tsi148_master_set( struct vme_master_resource *image, int enabled,
 971        unsigned long long vme_base, unsigned long long size,
 972        vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
 973{
 974        int retval = 0;
 975        unsigned int i;
 976        unsigned int temp_ctl = 0;
 977        unsigned int pci_base_low, pci_base_high;
 978        unsigned int pci_bound_low, pci_bound_high;
 979        unsigned int vme_offset_low, vme_offset_high;
 980        unsigned long long pci_bound, vme_offset, pci_base;
 981
 982        /* Verify input data */
 983        if (vme_base & 0xFFFF) {
 984                printk("Invalid VME Window alignment\n");
 985                retval = -EINVAL;
 986                goto err_window;
 987        }
 988        if (size < 0x10000) {
 989                printk("Invalid VME Window size\n");
 990                retval = -EINVAL;
 991                goto err_window;
 992        }
 993
 994        spin_lock(&(image->lock));
 995
 996        /* Let's allocate the resource here rather than further up the stack as
 997         * it avoids pushing loads of bus dependant stuff up the stack
 998         */
 999        retval = tsi148_alloc_resource(image, size);
1000        if (retval) {
1001                spin_unlock(&(image->lock));
1002                printk(KERN_ERR "Unable to allocate memory for resource "
1003                        "name\n");
1004                retval = -ENOMEM;
1005                goto err_res;
1006        }
1007
1008        pci_base = (unsigned long long)image->pci_resource.start;
1009
1010
1011        /*
1012         * Bound address is a valid address for the window, adjust
1013         * according to window granularity.
1014         */
1015        pci_bound = pci_base + (size - 0x10000);
1016        vme_offset = vme_base - pci_base;
1017
1018        /* Convert 64-bit variables to 2x 32-bit variables */
1019        reg_split(pci_base, &pci_base_high, &pci_base_low);
1020        reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
1021        reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
1022
1023        if (pci_base_low & 0xFFFF) {
1024                spin_unlock(&(image->lock));
1025                printk("Invalid PCI base alignment\n");
1026                retval = -EINVAL;
1027                goto err_gran;
1028        }
1029        if (pci_bound_low & 0xFFFF) {
1030                spin_unlock(&(image->lock));
1031                printk("Invalid PCI bound alignment\n");
1032                retval = -EINVAL;
1033                goto err_gran;
1034        }
1035        if (vme_offset_low & 0xFFFF) {
1036                spin_unlock(&(image->lock));
1037                printk("Invalid VME Offset alignment\n");
1038                retval = -EINVAL;
1039                goto err_gran;
1040        }
1041
1042        i = image->number;
1043
1044        /* Disable while we are mucking around */
1045        temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1046                TSI148_LCSR_OFFSET_OTAT);
1047        temp_ctl &= ~TSI148_LCSR_OTAT_EN;
1048        iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1049                TSI148_LCSR_OFFSET_OTAT);
1050
1051/* XXX Prefetch stuff currently unsupported */
1052#if 0
1053        if (vmeOut->prefetchEnable) {
1054                temp_ctl |= 0x40000;
1055                for (x = 0; x < 4; x++) {
1056                        if ((2 << x) >= vmeOut->prefetchSize)
1057                                break;
1058                }
1059                if (x == 4)
1060                        x = 3;
1061                temp_ctl |= (x << 16);
1062        }
1063#endif
1064
1065        /* Setup 2eSST speeds */
1066        temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
1067        switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1068        case VME_2eSST160:
1069                temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1070                break;
1071        case VME_2eSST267:
1072                temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1073                break;
1074        case VME_2eSST320:
1075                temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1076                break;
1077        }
1078
1079        /* Setup cycle types */
1080        if (cycle & VME_BLT) {
1081                temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1082                temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1083        }
1084        if (cycle & VME_MBLT) {
1085                temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1086                temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1087        }
1088        if (cycle & VME_2eVME) {
1089                temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1090                temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1091        }
1092        if (cycle & VME_2eSST) {
1093                temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1094                temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1095        }
1096        if (cycle & VME_2eSSTB) {
1097                printk("Currently not setting Broadcast Select Registers\n");
1098                temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1099                temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1100        }
1101
1102        /* Setup data width */
1103        temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1104        switch (dwidth) {
1105        case VME_D16:
1106                temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1107                break;
1108        case VME_D32:
1109                temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1110                break;
1111        default:
1112                spin_unlock(&(image->lock));
1113                printk("Invalid data width\n");
1114                retval = -EINVAL;
1115                goto err_dwidth;
1116        }
1117
1118        /* Setup address space */
1119        temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1120        switch (aspace) {
1121        case VME_A16:
1122                temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1123                break;
1124        case VME_A24:
1125                temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1126                break;
1127        case VME_A32:
1128                temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1129                break;
1130        case VME_A64:
1131                temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1132                break;
1133        case VME_CRCSR:
1134                temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1135                break;
1136        case VME_USER1:
1137                temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1138                break;
1139        case VME_USER2:
1140                temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1141                break;
1142        case VME_USER3:
1143                temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1144                break;
1145        case VME_USER4:
1146                temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1147                break;
1148        default:
1149                spin_unlock(&(image->lock));
1150                printk("Invalid address space\n");
1151                retval = -EINVAL;
1152                goto err_aspace;
1153                break;
1154        }
1155
1156        temp_ctl &= ~(3<<4);
1157        if (cycle & VME_SUPER)
1158                temp_ctl |= TSI148_LCSR_OTAT_SUP;
1159        if (cycle & VME_PROG)
1160                temp_ctl |= TSI148_LCSR_OTAT_PGM;
1161
1162        /* Setup mapping */
1163        iowrite32be(pci_base_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1164                TSI148_LCSR_OFFSET_OTSAU);
1165        iowrite32be(pci_base_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1166                TSI148_LCSR_OFFSET_OTSAL);
1167        iowrite32be(pci_bound_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1168                TSI148_LCSR_OFFSET_OTEAU);
1169        iowrite32be(pci_bound_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1170                TSI148_LCSR_OFFSET_OTEAL);
1171        iowrite32be(vme_offset_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1172                TSI148_LCSR_OFFSET_OTOFU);
1173        iowrite32be(vme_offset_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1174                TSI148_LCSR_OFFSET_OTOFL);
1175
1176/* XXX We need to deal with OTBS */
1177#if 0
1178        iowrite32be(vmeOut->bcastSelect2esst, tsi148_bridge->base +
1179                TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTBS);
1180#endif
1181
1182        /* Write ctl reg without enable */
1183        iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1184                TSI148_LCSR_OFFSET_OTAT);
1185
1186        if (enabled)
1187                temp_ctl |= TSI148_LCSR_OTAT_EN;
1188
1189        iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1190                TSI148_LCSR_OFFSET_OTAT);
1191
1192        spin_unlock(&(image->lock));
1193        return 0;
1194
1195err_aspace:
1196err_dwidth:
1197err_gran:
1198        tsi148_free_resource(image);
1199err_res:
1200err_window:
1201        return retval;
1202
1203}
1204
1205/*
1206 * Set the attributes of an outbound window.
1207 *
1208 * XXX Not parsing prefetch information.
1209 */
1210int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
1211        unsigned long long *vme_base, unsigned long long *size,
1212        vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1213{
1214        unsigned int i, ctl;
1215        unsigned int pci_base_low, pci_base_high;
1216        unsigned int pci_bound_low, pci_bound_high;
1217        unsigned int vme_offset_low, vme_offset_high;
1218
1219        unsigned long long pci_base, pci_bound, vme_offset;
1220
1221        i = image->number;
1222
1223        ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1224                TSI148_LCSR_OFFSET_OTAT);
1225
1226        pci_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1227                TSI148_LCSR_OFFSET_OTSAU);
1228        pci_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1229                TSI148_LCSR_OFFSET_OTSAL);
1230        pci_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1231                TSI148_LCSR_OFFSET_OTEAU);
1232        pci_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1233                TSI148_LCSR_OFFSET_OTEAL);
1234        vme_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1235                TSI148_LCSR_OFFSET_OTOFU);
1236        vme_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1237                TSI148_LCSR_OFFSET_OTOFL);
1238
1239        /* Convert 64-bit variables to 2x 32-bit variables */
1240        reg_join(pci_base_high, pci_base_low, &pci_base);
1241        reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1242        reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1243
1244        *vme_base = pci_base + vme_offset;
1245        *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1246
1247        *enabled = 0;
1248        *aspace = 0;
1249        *cycle = 0;
1250        *dwidth = 0;
1251
1252        if (ctl & TSI148_LCSR_OTAT_EN)
1253                *enabled = 1;
1254
1255        /* Setup address space */
1256        if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1257                *aspace |= VME_A16;
1258        if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1259                *aspace |= VME_A24;
1260        if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1261                *aspace |= VME_A32;
1262        if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1263                *aspace |= VME_A64;
1264        if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1265                *aspace |= VME_CRCSR;
1266        if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1267                *aspace |= VME_USER1;
1268        if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1269                *aspace |= VME_USER2;
1270        if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1271                *aspace |= VME_USER3;
1272        if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1273                *aspace |= VME_USER4;
1274
1275        /* Setup 2eSST speeds */
1276        if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1277                *cycle |= VME_2eSST160;
1278        if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1279                *cycle |= VME_2eSST267;
1280        if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1281                *cycle |= VME_2eSST320;
1282
1283        /* Setup cycle types */
1284        if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_SCT)
1285                *cycle |= VME_SCT;
1286        if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_BLT)
1287                *cycle |= VME_BLT;
1288        if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_MBLT)
1289                *cycle |= VME_MBLT;
1290        if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eVME)
1291                *cycle |= VME_2eVME;
1292        if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSST)
1293                *cycle |= VME_2eSST;
1294        if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSSTB)
1295                *cycle |= VME_2eSSTB;
1296
1297        if (ctl & TSI148_LCSR_OTAT_SUP)
1298                *cycle |= VME_SUPER;
1299        else
1300                *cycle |= VME_USER;
1301
1302        if (ctl & TSI148_LCSR_OTAT_PGM)
1303                *cycle |= VME_PROG;
1304        else
1305                *cycle |= VME_DATA;
1306
1307        /* Setup data width */
1308        if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1309                *dwidth = VME_D16;
1310        if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1311                *dwidth = VME_D32;
1312
1313        return 0;
1314}
1315
1316
1317int tsi148_master_get( struct vme_master_resource *image, int *enabled,
1318        unsigned long long *vme_base, unsigned long long *size,
1319        vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1320{
1321        int retval;
1322
1323        spin_lock(&(image->lock));
1324
1325        retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1326                cycle, dwidth);
1327
1328        spin_unlock(&(image->lock));
1329
1330        return retval;
1331}
1332
1333ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1334        size_t count, loff_t offset)
1335{
1336        int retval, enabled;
1337        unsigned long long vme_base, size;
1338        vme_address_t aspace;
1339        vme_cycle_t cycle;
1340        vme_width_t dwidth;
1341        struct vme_bus_error *vme_err = NULL;
1342
1343        spin_lock(&(image->lock));
1344
1345        memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1346        retval = count;
1347
1348        if (!err_chk)
1349                goto skip_chk;
1350
1351        __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1352                &dwidth);
1353
1354        vme_err = tsi148_find_error(aspace, vme_base + offset, count);
1355        if(vme_err != NULL) {
1356                dev_err(image->parent->parent, "First VME read error detected "
1357                        "an at address 0x%llx\n", vme_err->address);
1358                retval = vme_err->address - (vme_base + offset);
1359                /* Clear down save errors in this address range */
1360                tsi148_clear_errors(aspace, vme_base + offset, count);
1361        }
1362
1363skip_chk:
1364        spin_unlock(&(image->lock));
1365
1366        return retval;
1367}
1368
1369
1370/* XXX We need to change vme_master_resource->mtx to a spinlock so that read
1371 *     and write functions can be used in an interrupt context
1372 */
1373ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1374        size_t count, loff_t offset)
1375{
1376        int retval = 0, enabled;
1377        unsigned long long vme_base, size;
1378        vme_address_t aspace;
1379        vme_cycle_t cycle;
1380        vme_width_t dwidth;
1381
1382        struct vme_bus_error *vme_err = NULL;
1383
1384        spin_lock(&(image->lock));
1385
1386        memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1387        retval = count;
1388
1389        /*
1390         * Writes are posted. We need to do a read on the VME bus to flush out
1391         * all of the writes before we check for errors. We can't guarentee
1392         * that reading the data we have just written is safe. It is believed
1393         * that there isn't any read, write re-ordering, so we can read any
1394         * location in VME space, so lets read the Device ID from the tsi148's
1395         * own registers as mapped into CR/CSR space.
1396         *
1397         * We check for saved errors in the written address range/space.
1398         */
1399
1400        if (!err_chk)
1401                goto skip_chk;
1402
1403        /*
1404         * Get window info first, to maximise the time that the buffers may
1405         * fluch on their own
1406         */
1407        __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1408                &dwidth);
1409
1410        ioread16(flush_image->kern_base + 0x7F000);
1411
1412        vme_err = tsi148_find_error(aspace, vme_base + offset, count);
1413        if(vme_err != NULL) {
1414                printk("First VME write error detected an at address 0x%llx\n",
1415                        vme_err->address);
1416                retval = vme_err->address - (vme_base + offset);
1417                /* Clear down save errors in this address range */
1418                tsi148_clear_errors(aspace, vme_base + offset, count);
1419        }
1420
1421skip_chk:
1422        spin_unlock(&(image->lock));
1423
1424        return retval;
1425}
1426
1427/*
1428 * Perform an RMW cycle on the VME bus.
1429 *
1430 * Requires a previously configured master window, returns final value.
1431 */
1432unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1433        unsigned int mask, unsigned int compare, unsigned int swap,
1434        loff_t offset)
1435{
1436        unsigned long long pci_addr;
1437        unsigned int pci_addr_high, pci_addr_low;
1438        u32 tmp, result;
1439        int i;
1440
1441
1442        /* Find the PCI address that maps to the desired VME address */
1443        i = image->number;
1444
1445        /* Locking as we can only do one of these at a time */
1446        mutex_lock(&(vme_rmw));
1447
1448        /* Lock image */
1449        spin_lock(&(image->lock));
1450
1451        pci_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1452                TSI148_LCSR_OFFSET_OTSAU);
1453        pci_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1454                TSI148_LCSR_OFFSET_OTSAL);
1455
1456        reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1457        reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1458
1459        /* Configure registers */
1460        iowrite32be(mask, tsi148_bridge->base + TSI148_LCSR_RMWEN);
1461        iowrite32be(compare, tsi148_bridge->base + TSI148_LCSR_RMWC);
1462        iowrite32be(swap, tsi148_bridge->base + TSI148_LCSR_RMWS);
1463        iowrite32be(pci_addr_high, tsi148_bridge->base + TSI148_LCSR_RMWAU);
1464        iowrite32be(pci_addr_low, tsi148_bridge->base + TSI148_LCSR_RMWAL);
1465
1466        /* Enable RMW */
1467        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1468        tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1469        iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1470
1471        /* Kick process off with a read to the required address. */
1472        result = ioread32be(image->kern_base + offset);
1473
1474        /* Disable RMW */
1475        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1476        tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1477        iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1478
1479        spin_unlock(&(image->lock));
1480
1481        mutex_unlock(&(vme_rmw));
1482
1483        return result;
1484}
1485
1486static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
1487        vme_cycle_t cycle, vme_width_t dwidth)
1488{
1489        /* Setup 2eSST speeds */
1490        switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1491        case VME_2eSST160:
1492                *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1493                break;
1494        case VME_2eSST267:
1495                *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1496                break;
1497        case VME_2eSST320:
1498                *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1499                break;
1500        }
1501
1502        /* Setup cycle types */
1503        if (cycle & VME_SCT) {
1504                *attr |= TSI148_LCSR_DSAT_TM_SCT;
1505        }
1506        if (cycle & VME_BLT) {
1507                *attr |= TSI148_LCSR_DSAT_TM_BLT;
1508        }
1509        if (cycle & VME_MBLT) {
1510                *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1511        }
1512        if (cycle & VME_2eVME) {
1513                *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1514        }
1515        if (cycle & VME_2eSST) {
1516                *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1517        }
1518        if (cycle & VME_2eSSTB) {
1519                printk("Currently not setting Broadcast Select Registers\n");
1520                *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1521        }
1522
1523        /* Setup data width */
1524        switch (dwidth) {
1525        case VME_D16:
1526                *attr |= TSI148_LCSR_DSAT_DBW_16;
1527                break;
1528        case VME_D32:
1529                *attr |= TSI148_LCSR_DSAT_DBW_32;
1530                break;
1531        default:
1532                printk("Invalid data width\n");
1533                return -EINVAL;
1534        }
1535
1536        /* Setup address space */
1537        switch (aspace) {
1538        case VME_A16:
1539                *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1540                break;
1541        case VME_A24:
1542                *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1543                break;
1544        case VME_A32:
1545                *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1546                break;
1547        case VME_A64:
1548                *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1549                break;
1550        case VME_CRCSR:
1551                *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1552                break;
1553        case VME_USER1:
1554                *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1555                break;
1556        case VME_USER2:
1557                *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1558                break;
1559        case VME_USER3:
1560                *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1561                break;
1562        case VME_USER4:
1563                *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1564                break;
1565        default:
1566                printk("Invalid address space\n");
1567                return -EINVAL;
1568                break;
1569        }
1570
1571        if (cycle & VME_SUPER)
1572                *attr |= TSI148_LCSR_DSAT_SUP;
1573        if (cycle & VME_PROG)
1574                *attr |= TSI148_LCSR_DSAT_PGM;
1575
1576        return 0;
1577}
1578
1579static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
1580        vme_cycle_t cycle, vme_width_t dwidth)
1581{
1582        /* Setup 2eSST speeds */
1583        switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1584        case VME_2eSST160:
1585                *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1586                break;
1587        case VME_2eSST267:
1588                *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1589                break;
1590        case VME_2eSST320:
1591                *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1592                break;
1593        }
1594
1595        /* Setup cycle types */
1596        if (cycle & VME_SCT) {
1597                *attr |= TSI148_LCSR_DDAT_TM_SCT;
1598        }
1599        if (cycle & VME_BLT) {
1600                *attr |= TSI148_LCSR_DDAT_TM_BLT;
1601        }
1602        if (cycle & VME_MBLT) {
1603                *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1604        }
1605        if (cycle & VME_2eVME) {
1606                *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1607        }
1608        if (cycle & VME_2eSST) {
1609                *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1610        }
1611        if (cycle & VME_2eSSTB) {
1612                printk("Currently not setting Broadcast Select Registers\n");
1613                *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1614        }
1615
1616        /* Setup data width */
1617        switch (dwidth) {
1618        case VME_D16:
1619                *attr |= TSI148_LCSR_DDAT_DBW_16;
1620                break;
1621        case VME_D32:
1622                *attr |= TSI148_LCSR_DDAT_DBW_32;
1623                break;
1624        default:
1625                printk("Invalid data width\n");
1626                return -EINVAL;
1627        }
1628
1629        /* Setup address space */
1630        switch (aspace) {
1631        case VME_A16:
1632                *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1633                break;
1634        case VME_A24:
1635                *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1636                break;
1637        case VME_A32:
1638                *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1639                break;
1640        case VME_A64:
1641                *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1642                break;
1643        case VME_CRCSR:
1644                *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1645                break;
1646        case VME_USER1:
1647                *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1648                break;
1649        case VME_USER2:
1650                *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1651                break;
1652        case VME_USER3:
1653                *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1654                break;
1655        case VME_USER4:
1656                *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1657                break;
1658        default:
1659                printk("Invalid address space\n");
1660                return -EINVAL;
1661                break;
1662        }
1663
1664        if (cycle & VME_SUPER)
1665                *attr |= TSI148_LCSR_DDAT_SUP;
1666        if (cycle & VME_PROG)
1667                *attr |= TSI148_LCSR_DDAT_PGM;
1668
1669        return 0;
1670}
1671
1672/*
1673 * Add a link list descriptor to the list
1674 *
1675 * XXX Need to handle 2eSST Broadcast select bits
1676 */
1677int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
1678        struct vme_dma_attr *dest, size_t count)
1679{
1680        struct tsi148_dma_entry *entry, *prev;
1681        u32 address_high, address_low;
1682        struct vme_dma_pattern *pattern_attr;
1683        struct vme_dma_pci *pci_attr;
1684        struct vme_dma_vme *vme_attr;
1685        dma_addr_t desc_ptr;
1686        int retval = 0;
1687
1688        /* XXX descriptor must be aligned on 64-bit boundaries */
1689        entry = (struct tsi148_dma_entry *)kmalloc(
1690                sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1691        if (entry == NULL) {
1692                printk("Failed to allocate memory for dma resource "
1693                        "structure\n");
1694                retval = -ENOMEM;
1695                goto err_mem;
1696        }
1697
1698        /* Test descriptor alignment */
1699        if ((unsigned long)&(entry->descriptor) & 0x7) {
1700                printk("Descriptor not aligned to 8 byte boundary as "
1701                        "required: %p\n", &(entry->descriptor));
1702                retval = -EINVAL;
1703                goto err_align;
1704        }
1705
1706        /* Given we are going to fill out the structure, we probably don't
1707         * need to zero it, but better safe than sorry for now.
1708         */
1709        memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
1710
1711        /* Fill out source part */
1712        switch (src->type) {
1713        case VME_DMA_PATTERN:
1714                pattern_attr = (struct vme_dma_pattern *)src->private;
1715
1716                entry->descriptor.dsal = pattern_attr->pattern;
1717                entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1718                /* Default behaviour is 32 bit pattern */
1719                if (pattern_attr->type & VME_DMA_PATTERN_BYTE) {
1720                        entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1721                }
1722                /* It seems that the default behaviour is to increment */
1723                if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) {
1724                        entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1725                }
1726                break;
1727        case VME_DMA_PCI:
1728                pci_attr = (struct vme_dma_pci *)src->private;
1729
1730                reg_split((unsigned long long)pci_attr->address, &address_high,
1731                        &address_low);
1732                entry->descriptor.dsau = address_high;
1733                entry->descriptor.dsal = address_low;
1734                entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1735                break;
1736        case VME_DMA_VME:
1737                vme_attr = (struct vme_dma_vme *)src->private;
1738
1739                reg_split((unsigned long long)vme_attr->address, &address_high,
1740                        &address_low);
1741                entry->descriptor.dsau = address_high;
1742                entry->descriptor.dsal = address_low;
1743                entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1744
1745                retval = tsi148_dma_set_vme_src_attributes(
1746                        &(entry->descriptor.dsat), vme_attr->aspace,
1747                        vme_attr->cycle, vme_attr->dwidth);
1748                if(retval < 0 )
1749                        goto err_source;
1750                break;
1751        default:
1752                printk("Invalid source type\n");
1753                retval = -EINVAL;
1754                goto err_source;
1755                break;
1756        }
1757
1758        /* Assume last link - this will be over-written by adding another */
1759        entry->descriptor.dnlau = 0;
1760        entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1761
1762
1763        /* Fill out destination part */
1764        switch (dest->type) {
1765        case VME_DMA_PCI:
1766                pci_attr = (struct vme_dma_pci *)dest->private;
1767
1768                reg_split((unsigned long long)pci_attr->address, &address_high,
1769                        &address_low);
1770                entry->descriptor.ddau = address_high;
1771                entry->descriptor.ddal = address_low;
1772                entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1773                break;
1774        case VME_DMA_VME:
1775                vme_attr = (struct vme_dma_vme *)dest->private;
1776
1777                reg_split((unsigned long long)vme_attr->address, &address_high,
1778                        &address_low);
1779                entry->descriptor.ddau = address_high;
1780                entry->descriptor.ddal = address_low;
1781                entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1782
1783                retval = tsi148_dma_set_vme_dest_attributes(
1784                        &(entry->descriptor.ddat), vme_attr->aspace,
1785                        vme_attr->cycle, vme_attr->dwidth);
1786                if(retval < 0 )
1787                        goto err_dest;
1788                break;
1789        default:
1790                printk("Invalid destination type\n");
1791                retval = -EINVAL;
1792                goto err_dest;
1793                break;
1794        }
1795
1796        /* Fill out count */
1797        entry->descriptor.dcnt = (u32)count;
1798
1799        /* Add to list */
1800        list_add_tail(&(entry->list), &(list->entries));
1801
1802        /* Fill out previous descriptors "Next Address" */
1803        if(entry->list.prev != &(list->entries)){
1804                prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1805                        list);
1806                /* We need the bus address for the pointer */
1807                desc_ptr = virt_to_bus(&(entry->descriptor));
1808                reg_split(desc_ptr, &(prev->descriptor.dnlau),
1809                        &(prev->descriptor.dnlal));
1810        }
1811
1812        return 0;
1813
1814err_dest:
1815err_source:
1816err_align:
1817                kfree(entry);
1818err_mem:
1819        return retval;
1820}
1821
1822/*
1823 * Check to see if the provided DMA channel is busy.
1824 */
1825static int tsi148_dma_busy(int channel)
1826{
1827        u32 tmp;
1828
1829        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
1830                TSI148_LCSR_OFFSET_DSTA);
1831
1832        if (tmp & TSI148_LCSR_DSTA_BSY)
1833                return 0;
1834        else
1835                return 1;
1836
1837}
1838
1839/*
1840 * Execute a previously generated link list
1841 *
1842 * XXX Need to provide control register configuration.
1843 */
1844int tsi148_dma_list_exec(struct vme_dma_list *list)
1845{
1846        struct vme_dma_resource *ctrlr;
1847        int channel, retval = 0;
1848        struct tsi148_dma_entry *entry;
1849        dma_addr_t bus_addr;
1850        u32 bus_addr_high, bus_addr_low;
1851        u32 val, dctlreg = 0;
1852#if 0
1853        int x;
1854#endif
1855
1856        ctrlr = list->parent;
1857
1858        mutex_lock(&(ctrlr->mtx));
1859
1860        channel = ctrlr->number;
1861
1862        if (! list_empty(&(ctrlr->running))) {
1863                /*
1864                 * XXX We have an active DMA transfer and currently haven't
1865                 *     sorted out the mechanism for "pending" DMA transfers.
1866                 *     Return busy.
1867                 */
1868                /* Need to add to pending here */
1869                mutex_unlock(&(ctrlr->mtx));
1870                return -EBUSY;
1871        } else {
1872                list_add(&(list->list), &(ctrlr->running));
1873        }
1874#if 0
1875        /* XXX Still todo */
1876        for (x = 0; x < 8; x++) {       /* vme block size */
1877                if ((32 << x) >= vmeDma->maxVmeBlockSize) {
1878                        break;
1879                }
1880        }
1881        if (x == 8)
1882                x = 7;
1883        dctlreg |= (x << 12);
1884
1885        for (x = 0; x < 8; x++) {       /* pci block size */
1886                if ((32 << x) >= vmeDma->maxPciBlockSize) {
1887                        break;
1888                }
1889        }
1890        if (x == 8)
1891                x = 7;
1892        dctlreg |= (x << 4);
1893
1894        if (vmeDma->vmeBackOffTimer) {
1895                for (x = 1; x < 8; x++) {       /* vme timer */
1896                        if ((1 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1897                                break;
1898                        }
1899                }
1900                if (x == 8)
1901                        x = 7;
1902                dctlreg |= (x << 8);
1903        }
1904
1905        if (vmeDma->pciBackOffTimer) {
1906                for (x = 1; x < 8; x++) {       /* pci timer */
1907                        if ((1 << (x - 1)) >= vmeDma->pciBackOffTimer) {
1908                                break;
1909                        }
1910                }
1911                if (x == 8)
1912                        x = 7;
1913                dctlreg |= (x << 0);
1914        }
1915#endif
1916
1917        /* Get first bus address and write into registers */
1918        entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
1919                list);
1920
1921        bus_addr = virt_to_bus(&(entry->descriptor));
1922
1923        mutex_unlock(&(ctrlr->mtx));
1924
1925        reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1926
1927        iowrite32be(bus_addr_high, tsi148_bridge->base +
1928                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1929        iowrite32be(bus_addr_low, tsi148_bridge->base +
1930                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1931
1932        /* Start the operation */
1933        iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, tsi148_bridge->base +
1934                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1935
1936        wait_event_interruptible(dma_queue[channel], tsi148_dma_busy(channel));
1937        /*
1938         * Read status register, this register is valid until we kick off a
1939         * new transfer.
1940         */
1941        val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
1942                TSI148_LCSR_OFFSET_DSTA);
1943
1944        if (val & TSI148_LCSR_DSTA_VBE) {
1945                printk(KERN_ERR "tsi148: DMA Error. DSTA=%08X\n", val);
1946                retval = -EIO;
1947        }
1948
1949        /* Remove list from running list */
1950        mutex_lock(&(ctrlr->mtx));
1951        list_del(&(list->list));
1952        mutex_unlock(&(ctrlr->mtx));
1953
1954        return retval;
1955}
1956
1957/*
1958 * Clean up a previously generated link list
1959 *
1960 * We have a separate function, don't assume that the chain can't be reused.
1961 */
1962int tsi148_dma_list_empty(struct vme_dma_list *list)
1963{
1964        struct list_head *pos, *temp;
1965        struct tsi148_dma_entry *entry;
1966
1967        /* detach and free each entry */
1968        list_for_each_safe(pos, temp, &(list->entries)) {
1969                list_del(pos);
1970                entry = list_entry(pos, struct tsi148_dma_entry, list);
1971                kfree(entry);
1972        }
1973
1974        return (0);
1975}
1976
1977/*
1978 * All 4 location monitors reside at the same base - this is therefore a
1979 * system wide configuration.
1980 *
1981 * This does not enable the LM monitor - that should be done when the first
1982 * callback is attached and disabled when the last callback is removed.
1983 */
1984int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1985        vme_address_t aspace, vme_cycle_t cycle)
1986{
1987        u32 lm_base_high, lm_base_low, lm_ctl = 0;
1988        int i;
1989
1990        mutex_lock(&(lm->mtx));
1991
1992        /* If we already have a callback attached, we can't move it! */
1993        for (i = 0; i < lm->monitors; i++) {
1994                if(lm_callback[i] != NULL) {
1995                        mutex_unlock(&(lm->mtx));
1996                        printk("Location monitor callback attached, can't "
1997                                "reset\n");
1998                        return -EBUSY;
1999                }
2000        }
2001
2002        switch (aspace) {
2003        case VME_A16:
2004                lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
2005                break;
2006        case VME_A24:
2007                lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
2008                break;
2009        case VME_A32:
2010                lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
2011                break;
2012        case VME_A64:
2013                lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
2014                break;
2015        default:
2016                mutex_unlock(&(lm->mtx));
2017                printk("Invalid address space\n");
2018                return -EINVAL;
2019                break;
2020        }
2021
2022        if (cycle & VME_SUPER)
2023                lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
2024        if (cycle & VME_USER)
2025                lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
2026        if (cycle & VME_PROG)
2027                lm_ctl |= TSI148_LCSR_LMAT_PGM;
2028        if (cycle & VME_DATA)
2029                lm_ctl |= TSI148_LCSR_LMAT_DATA;
2030
2031        reg_split(lm_base, &lm_base_high, &lm_base_low);
2032
2033        iowrite32be(lm_base_high, tsi148_bridge->base + TSI148_LCSR_LMBAU);
2034        iowrite32be(lm_base_low, tsi148_bridge->base + TSI148_LCSR_LMBAL);
2035        iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
2036
2037        mutex_unlock(&(lm->mtx));
2038
2039        return 0;
2040}
2041
2042/* Get configuration of the callback monitor and return whether it is enabled
2043 * or disabled.
2044 */
2045int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
2046        vme_address_t *aspace, vme_cycle_t *cycle)
2047{
2048        u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2049
2050        mutex_lock(&(lm->mtx));
2051
2052        lm_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAU);
2053        lm_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAL);
2054        lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2055
2056        reg_join(lm_base_high, lm_base_low, lm_base);
2057
2058        if (lm_ctl & TSI148_LCSR_LMAT_EN)
2059                enabled = 1;
2060
2061        if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) {
2062                *aspace |= VME_A16;
2063        }
2064        if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) {
2065                *aspace |= VME_A24;
2066        }
2067        if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) {
2068                *aspace |= VME_A32;
2069        }
2070        if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) {
2071                *aspace |= VME_A64;
2072        }
2073
2074        if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2075                *cycle |= VME_SUPER;
2076        if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2077                *cycle |= VME_USER;
2078        if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2079                *cycle |= VME_PROG;
2080        if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2081                *cycle |= VME_DATA;
2082
2083        mutex_unlock(&(lm->mtx));
2084
2085        return enabled;
2086}
2087
2088/*
2089 * Attach a callback to a specific location monitor.
2090 *
2091 * Callback will be passed the monitor triggered.
2092 */
2093int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2094        void (*callback)(int))
2095{
2096        u32 lm_ctl, tmp;
2097
2098        mutex_lock(&(lm->mtx));
2099
2100        /* Ensure that the location monitor is configured - need PGM or DATA */
2101        lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2102        if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2103                mutex_unlock(&(lm->mtx));
2104                printk("Location monitor not properly configured\n");
2105                return -EINVAL;
2106        }
2107
2108        /* Check that a callback isn't already attached */
2109        if (lm_callback[monitor] != NULL) {
2110                mutex_unlock(&(lm->mtx));
2111                printk("Existing callback attached\n");
2112                return -EBUSY;
2113        }
2114
2115        /* Attach callback */
2116        lm_callback[monitor] = callback;
2117
2118        /* Enable Location Monitor interrupt */
2119        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
2120        tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2121        iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
2122
2123        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
2124        tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2125        iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
2126
2127        /* Ensure that global Location Monitor Enable set */
2128        if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2129                lm_ctl |= TSI148_LCSR_LMAT_EN;
2130                iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
2131        }
2132
2133        mutex_unlock(&(lm->mtx));
2134
2135        return 0;
2136}
2137
2138/*
2139 * Detach a callback function forn a specific location monitor.
2140 */
2141int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2142{
2143        u32 lm_en, tmp;
2144
2145        mutex_lock(&(lm->mtx));
2146
2147        /* Disable Location Monitor and ensure previous interrupts are clear */
2148        lm_en = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
2149        lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2150        iowrite32be(lm_en, tsi148_bridge->base + TSI148_LCSR_INTEN);
2151
2152        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
2153        tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2154        iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
2155
2156        iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2157                 tsi148_bridge->base + TSI148_LCSR_INTEO);
2158
2159        /* Detach callback */
2160        lm_callback[monitor] = NULL;
2161
2162        /* If all location monitors disabled, disable global Location Monitor */
2163        if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2164                        TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2165                tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2166                tmp &= ~TSI148_LCSR_LMAT_EN;
2167                iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_LMAT);
2168        }
2169
2170        mutex_unlock(&(lm->mtx));
2171
2172        return 0;
2173}
2174
2175/*
2176 * Determine Geographical Addressing
2177 */
2178int tsi148_slot_get(void)
2179{
2180        u32 slot = 0;
2181
2182        slot = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2183        slot = slot & TSI148_LCSR_VSTAT_GA_M;
2184        return (int)slot;
2185}
2186
2187static int __init tsi148_init(void)
2188{
2189        return pci_register_driver(&tsi148_driver);
2190}
2191
2192/*
2193 * Configure CR/CSR space
2194 *
2195 * Access to the CR/CSR can be configured at power-up. The location of the
2196 * CR/CSR registers in the CR/CSR address space is determined by the boards
2197 * Auto-ID or Geographic address. This function ensures that the window is
2198 * enabled at an offset consistent with the boards geopgraphic address.
2199 *
2200 * Each board has a 512kB window, with the highest 4kB being used for the
2201 * boards registers, this means there is a fix length 508kB window which must
2202 * be mapped onto PCI memory.
2203 */
2204static int tsi148_crcsr_init(struct pci_dev *pdev)
2205{
2206        u32 cbar, crat, vstat;
2207        u32 crcsr_bus_high, crcsr_bus_low;
2208        int retval;
2209
2210        /* Allocate mem for CR/CSR image */
2211        crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2212                &crcsr_bus);
2213        if (crcsr_kernel == NULL) {
2214                dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
2215                        "image\n");
2216                return -ENOMEM;
2217        }
2218
2219        memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2220
2221        reg_split(crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2222
2223        iowrite32be(crcsr_bus_high, tsi148_bridge->base + TSI148_LCSR_CROU);
2224        iowrite32be(crcsr_bus_low, tsi148_bridge->base + TSI148_LCSR_CROL);
2225
2226        /* Ensure that the CR/CSR is configured at the correct offset */
2227        cbar = ioread32be(tsi148_bridge->base + TSI148_CBAR);
2228        cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2229
2230        vstat = tsi148_slot_get();
2231
2232        if (cbar != vstat) {
2233                dev_info(&pdev->dev, "Setting CR/CSR offset\n");
2234                iowrite32be(cbar<<3, tsi148_bridge->base + TSI148_CBAR);
2235        }
2236        dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
2237
2238        crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
2239        if (crat & TSI148_LCSR_CRAT_EN) {
2240                dev_info(&pdev->dev, "Enabling CR/CSR space\n");
2241                iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2242                        tsi148_bridge->base + TSI148_LCSR_CRAT);
2243        } else
2244                dev_info(&pdev->dev, "CR/CSR already enabled\n");
2245
2246        /* If we want flushed, error-checked writes, set up a window
2247         * over the CR/CSR registers. We read from here to safely flush
2248         * through VME writes.
2249         */
2250        if(err_chk) {
2251                retval = tsi148_master_set(flush_image, 1, (vstat * 0x80000),
2252                        0x80000, VME_CRCSR, VME_SCT, VME_D16);
2253                if (retval)
2254                        dev_err(&pdev->dev, "Configuring flush image failed\n");
2255        }
2256
2257        return 0;
2258
2259}
2260
2261static void tsi148_crcsr_exit(struct pci_dev *pdev)
2262{
2263        u32 crat;
2264
2265        /* Turn off CR/CSR space */
2266        crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
2267        iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2268                tsi148_bridge->base + TSI148_LCSR_CRAT);
2269
2270        /* Free image */
2271        iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROU);
2272        iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROL);
2273
2274        pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
2275}
2276
2277static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2278{
2279        int retval, i, master_num;
2280        u32 data;
2281        struct list_head *pos = NULL;
2282        struct vme_master_resource *master_image;
2283        struct vme_slave_resource *slave_image;
2284        struct vme_dma_resource *dma_ctrlr;
2285        struct vme_lm_resource *lm;
2286
2287        /* If we want to support more than one of each bridge, we need to
2288         * dynamically generate this so we get one per device
2289         */
2290        tsi148_bridge = (struct vme_bridge *)kmalloc(sizeof(struct vme_bridge),
2291                GFP_KERNEL);
2292        if (tsi148_bridge == NULL) {
2293                dev_err(&pdev->dev, "Failed to allocate memory for device "
2294                        "structure\n");
2295                retval = -ENOMEM;
2296                goto err_struct;
2297        }
2298
2299        memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
2300
2301        /* Enable the device */
2302        retval = pci_enable_device(pdev);
2303        if (retval) {
2304                dev_err(&pdev->dev, "Unable to enable device\n");
2305                goto err_enable;
2306        }
2307
2308        /* Map Registers */
2309        retval = pci_request_regions(pdev, driver_name);
2310        if (retval) {
2311                dev_err(&pdev->dev, "Unable to reserve resources\n");
2312                goto err_resource;
2313        }
2314
2315        /* map registers in BAR 0 */
2316        tsi148_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0), 4096);
2317        if (!tsi148_bridge->base) {
2318                dev_err(&pdev->dev, "Unable to remap CRG region\n");
2319                retval = -EIO;
2320                goto err_remap;
2321        }
2322
2323        /* Check to see if the mapping worked out */
2324        data = ioread32(tsi148_bridge->base + TSI148_PCFS_ID) & 0x0000FFFF;
2325        if (data != PCI_VENDOR_ID_TUNDRA) {
2326                dev_err(&pdev->dev, "CRG region check failed\n");
2327                retval = -EIO;
2328                goto err_test;
2329        }
2330
2331        /* Initialize wait queues & mutual exclusion flags */
2332        /* XXX These need to be moved to the vme_bridge structure */
2333        init_waitqueue_head(&dma_queue[0]);
2334        init_waitqueue_head(&dma_queue[1]);
2335        init_waitqueue_head(&iack_queue);
2336        mutex_init(&(vme_int));
2337        mutex_init(&(vme_irq));
2338        mutex_init(&(vme_rmw));
2339
2340        tsi148_bridge->parent = &(pdev->dev);
2341        strcpy(tsi148_bridge->name, driver_name);
2342
2343        /* Setup IRQ */
2344        retval = tsi148_irq_init(tsi148_bridge);
2345        if (retval != 0) {
2346                dev_err(&pdev->dev, "Chip Initialization failed.\n");
2347                goto err_irq;
2348        }
2349
2350        /* If we are going to flush writes, we need to read from the VME bus.
2351         * We need to do this safely, thus we read the devices own CR/CSR
2352         * register. To do this we must set up a window in CR/CSR space and
2353         * hence have one less master window resource available.
2354         */
2355        master_num = TSI148_MAX_MASTER;
2356        if(err_chk){
2357                master_num--;
2358                /* XXX */
2359                flush_image = (struct vme_master_resource *)kmalloc(
2360                        sizeof(struct vme_master_resource), GFP_KERNEL);
2361                if (flush_image == NULL) {
2362                        dev_err(&pdev->dev, "Failed to allocate memory for "
2363                        "flush resource structure\n");
2364                        retval = -ENOMEM;
2365                        goto err_master;
2366                }
2367                flush_image->parent = tsi148_bridge;
2368                spin_lock_init(&(flush_image->lock));
2369                flush_image->locked = 1;
2370                flush_image->number = master_num;
2371                flush_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2372                        VME_A64;
2373                flush_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2374                        VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2375                        VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2376                        VME_PROG | VME_DATA;
2377                flush_image->width_attr = VME_D16 | VME_D32;
2378                memset(&(flush_image->pci_resource), 0,
2379                        sizeof(struct resource));
2380                flush_image->kern_base  = NULL;
2381        }
2382
2383        /* Add master windows to list */
2384        INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
2385        for (i = 0; i < master_num; i++) {
2386                master_image = (struct vme_master_resource *)kmalloc(
2387                        sizeof(struct vme_master_resource), GFP_KERNEL);
2388                if (master_image == NULL) {
2389                        dev_err(&pdev->dev, "Failed to allocate memory for "
2390                        "master resource structure\n");
2391                        retval = -ENOMEM;
2392                        goto err_master;
2393                }
2394                master_image->parent = tsi148_bridge;
2395                spin_lock_init(&(master_image->lock));
2396                master_image->locked = 0;
2397                master_image->number = i;
2398                master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2399                        VME_A64;
2400                master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2401                        VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2402                        VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2403                        VME_PROG | VME_DATA;
2404                master_image->width_attr = VME_D16 | VME_D32;
2405                memset(&(master_image->pci_resource), 0,
2406                        sizeof(struct resource));
2407                master_image->kern_base  = NULL;
2408                list_add_tail(&(master_image->list),
2409                        &(tsi148_bridge->master_resources));
2410        }
2411
2412        /* Add slave windows to list */
2413        INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
2414        for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2415                slave_image = (struct vme_slave_resource *)kmalloc(
2416                        sizeof(struct vme_slave_resource), GFP_KERNEL);
2417                if (slave_image == NULL) {
2418                        dev_err(&pdev->dev, "Failed to allocate memory for "
2419                        "slave resource structure\n");
2420                        retval = -ENOMEM;
2421                        goto err_slave;
2422                }
2423                slave_image->parent = tsi148_bridge;
2424                mutex_init(&(slave_image->mtx));
2425                slave_image->locked = 0;
2426                slave_image->number = i;
2427                slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2428                        VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2429                        VME_USER3 | VME_USER4;
2430                slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2431                        VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2432                        VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2433                        VME_PROG | VME_DATA;
2434                list_add_tail(&(slave_image->list),
2435                        &(tsi148_bridge->slave_resources));
2436        }
2437
2438        /* Add dma engines to list */
2439        INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
2440        for (i = 0; i < TSI148_MAX_DMA; i++) {
2441                dma_ctrlr = (struct vme_dma_resource *)kmalloc(
2442                        sizeof(struct vme_dma_resource), GFP_KERNEL);
2443                if (dma_ctrlr == NULL) {
2444                        dev_err(&pdev->dev, "Failed to allocate memory for "
2445                        "dma resource structure\n");
2446                        retval = -ENOMEM;
2447                        goto err_dma;
2448                }
2449                dma_ctrlr->parent = tsi148_bridge;
2450                mutex_init(&(dma_ctrlr->mtx));
2451                dma_ctrlr->locked = 0;
2452                dma_ctrlr->number = i;
2453                INIT_LIST_HEAD(&(dma_ctrlr->pending));
2454                INIT_LIST_HEAD(&(dma_ctrlr->running));
2455                list_add_tail(&(dma_ctrlr->list),
2456                        &(tsi148_bridge->dma_resources));
2457        }
2458
2459        /* Add location monitor to list */
2460        INIT_LIST_HEAD(&(tsi148_bridge->lm_resources));
2461        lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2462        if (lm == NULL) {
2463                dev_err(&pdev->dev, "Failed to allocate memory for "
2464                "location monitor resource structure\n");
2465                retval = -ENOMEM;
2466                goto err_lm;
2467        }
2468        lm->parent = tsi148_bridge;
2469        mutex_init(&(lm->mtx));
2470        lm->locked = 0;
2471        lm->number = 1;
2472        lm->monitors = 4;
2473        list_add_tail(&(lm->list), &(tsi148_bridge->lm_resources));
2474
2475        tsi148_bridge->slave_get = tsi148_slave_get;
2476        tsi148_bridge->slave_set = tsi148_slave_set;
2477        tsi148_bridge->master_get = tsi148_master_get;
2478        tsi148_bridge->master_set = tsi148_master_set;
2479        tsi148_bridge->master_read = tsi148_master_read;
2480        tsi148_bridge->master_write = tsi148_master_write;
2481        tsi148_bridge->master_rmw = tsi148_master_rmw;
2482        tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2483        tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2484        tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2485        tsi148_bridge->request_irq = tsi148_request_irq;
2486        tsi148_bridge->free_irq = tsi148_free_irq;
2487        tsi148_bridge->generate_irq = tsi148_generate_irq;
2488        tsi148_bridge->lm_set = tsi148_lm_set;
2489        tsi148_bridge->lm_get = tsi148_lm_get;
2490        tsi148_bridge->lm_attach = tsi148_lm_attach;
2491        tsi148_bridge->lm_detach = tsi148_lm_detach;
2492        tsi148_bridge->slot_get = tsi148_slot_get;
2493
2494        data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2495        dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2496                (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
2497        dev_info(&pdev->dev, "VME geographical address is %d\n",
2498                data & TSI148_LCSR_VSTAT_GA_M);
2499        dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2500                err_chk ? "enabled" : "disabled");
2501
2502        if(tsi148_crcsr_init(pdev)) {
2503                dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2504                goto err_crcsr;
2505
2506        }
2507
2508        /* Need to save tsi148_bridge pointer locally in link list for use in
2509         * tsi148_remove()
2510         */
2511        retval = vme_register_bridge(tsi148_bridge);
2512        if (retval != 0) {
2513                dev_err(&pdev->dev, "Chip Registration failed.\n");
2514                goto err_reg;
2515        }
2516
2517        /* Clear VME bus "board fail", and "power-up reset" lines */
2518        data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2519        data &= ~TSI148_LCSR_VSTAT_BRDFL;
2520        data |= TSI148_LCSR_VSTAT_CPURST;
2521        iowrite32be(data, tsi148_bridge->base + TSI148_LCSR_VSTAT);
2522
2523        return 0;
2524
2525        vme_unregister_bridge(tsi148_bridge);
2526err_reg:
2527        tsi148_crcsr_exit(pdev);
2528err_crcsr:
2529err_lm:
2530        /* resources are stored in link list */
2531        list_for_each(pos, &(tsi148_bridge->lm_resources)) {
2532                lm = list_entry(pos, struct vme_lm_resource, list);
2533                list_del(pos);
2534                kfree(lm);
2535        }
2536err_dma:
2537        /* resources are stored in link list */
2538        list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2539                dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2540                list_del(pos);
2541                kfree(dma_ctrlr);
2542        }
2543err_slave:
2544        /* resources are stored in link list */
2545        list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2546                slave_image = list_entry(pos, struct vme_slave_resource, list);
2547                list_del(pos);
2548                kfree(slave_image);
2549        }
2550err_master:
2551        /* resources are stored in link list */
2552        list_for_each(pos, &(tsi148_bridge->master_resources)) {
2553                master_image = list_entry(pos, struct vme_master_resource,                              list);
2554                list_del(pos);
2555                kfree(master_image);
2556        }
2557
2558        tsi148_irq_exit(pdev);
2559err_irq:
2560err_test:
2561        iounmap(tsi148_bridge->base);
2562err_remap:
2563        pci_release_regions(pdev);
2564err_resource:
2565        pci_disable_device(pdev);
2566err_enable:
2567        kfree(tsi148_bridge);
2568err_struct:
2569        return retval;
2570
2571}
2572
2573static void tsi148_remove(struct pci_dev *pdev)
2574{
2575        struct list_head *pos = NULL;
2576        struct vme_master_resource *master_image;
2577        struct vme_slave_resource *slave_image;
2578        struct vme_dma_resource *dma_ctrlr;
2579        int i;
2580
2581        dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2582
2583        /* XXX We need to find the pdev->dev in the list of vme_bridge->dev's */
2584
2585        /*
2586         *  Shutdown all inbound and outbound windows.
2587         */
2588        for (i = 0; i < 8; i++) {
2589                iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_IT[i] +
2590                        TSI148_LCSR_OFFSET_ITAT);
2591                iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_OT[i] +
2592                        TSI148_LCSR_OFFSET_OTAT);
2593        }
2594
2595        /*
2596         *  Shutdown Location monitor.
2597         */
2598        iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_LMAT);
2599
2600        /*
2601         *  Shutdown CRG map.
2602         */
2603        iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CSRAT);
2604
2605        /*
2606         *  Clear error status.
2607         */
2608        iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_EDPAT);
2609        iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_VEAT);
2610        iowrite32be(0x07000700, tsi148_bridge->base + TSI148_LCSR_PSTAT);
2611
2612        /*
2613         *  Remove VIRQ interrupt (if any)
2614         */
2615        if (ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR) & 0x800) {
2616                iowrite32be(0x8000, tsi148_bridge->base + TSI148_LCSR_VICR);
2617        }
2618
2619        /*
2620         *  Disable and clear all interrupts.
2621         */
2622        iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEO);
2623        iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTC);
2624        iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTEN);
2625
2626        /*
2627         *  Map all Interrupts to PCI INTA
2628         */
2629        iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM1);
2630        iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM2);
2631
2632        tsi148_irq_exit(pdev);
2633
2634        vme_unregister_bridge(tsi148_bridge);
2635
2636        tsi148_crcsr_exit(pdev);
2637
2638        /* resources are stored in link list */
2639        list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2640                dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2641                list_del(pos);
2642                kfree(dma_ctrlr);
2643        }
2644
2645        /* resources are stored in link list */
2646        list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2647                slave_image = list_entry(pos, struct vme_slave_resource, list);
2648                list_del(pos);
2649                kfree(slave_image);
2650        }
2651
2652        /* resources are stored in link list */
2653        list_for_each(pos, &(tsi148_bridge->master_resources)) {
2654                master_image = list_entry(pos, struct vme_master_resource,                              list);
2655                list_del(pos);
2656                kfree(master_image);
2657        }
2658
2659        tsi148_irq_exit(pdev);
2660
2661        iounmap(tsi148_bridge->base);
2662
2663        pci_release_regions(pdev);
2664
2665        pci_disable_device(pdev);
2666
2667        kfree(tsi148_bridge);
2668}
2669
2670static void __exit tsi148_exit(void)
2671{
2672        pci_unregister_driver(&tsi148_driver);
2673
2674        printk(KERN_DEBUG "Driver removed.\n");
2675}
2676
2677MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2678module_param(err_chk, bool, 0);
2679
2680MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2681MODULE_LICENSE("GPL");
2682
2683module_init(tsi148_init);
2684module_exit(tsi148_exit);
2685
2686/*----------------------------------------------------------------------------
2687 * STAGING
2688 *--------------------------------------------------------------------------*/
2689
2690#if 0
2691/*
2692 * Direct Mode DMA transfer
2693 *
2694 * XXX Not looking at direct mode for now, we can always use link list mode
2695 *     with a single entry.
2696 */
2697int tsi148_dma_run(struct vme_dma_resource *resource, struct vme_dma_attr src,
2698        struct vme_dma_attr dest, size_t count)
2699{
2700        u32 dctlreg = 0;
2701        unsigned int tmp;
2702        int val;
2703        int channel, x;
2704        struct vmeDmaPacket *cur_dma;
2705        struct tsi148_dma_descriptor *dmaLL;
2706
2707        /* direct mode */
2708        dctlreg = 0x800000;
2709
2710        for (x = 0; x < 8; x++) {       /* vme block size */
2711                if ((32 << x) >= vmeDma->maxVmeBlockSize) {
2712                        break;
2713                }
2714        }
2715        if (x == 8)
2716                x = 7;
2717        dctlreg |= (x << 12);
2718
2719        for (x = 0; x < 8; x++) {       /* pci block size */
2720                if ((32 << x) >= vmeDma->maxPciBlockSize) {
2721                        break;
2722                }
2723        }
2724        if (x == 8)
2725                x = 7;
2726        dctlreg |= (x << 4);
2727
2728        if (vmeDma->vmeBackOffTimer) {
2729                for (x = 1; x < 8; x++) {       /* vme timer */
2730                        if ((1 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
2731                                break;
2732                        }
2733                }
2734                if (x == 8)
2735                        x = 7;
2736                dctlreg |= (x << 8);
2737        }
2738
2739        if (vmeDma->pciBackOffTimer) {
2740                for (x = 1; x < 8; x++) {       /* pci timer */
2741                        if ((1 << (x - 1)) >= vmeDma->pciBackOffTimer) {
2742                                break;
2743                        }
2744                }
2745                if (x == 8)
2746                        x = 7;
2747                dctlreg |= (x << 0);
2748        }
2749
2750        /* Program registers for DMA transfer */
2751        iowrite32be(dmaLL->dsau, tsi148_bridge->base +
2752                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAU);
2753        iowrite32be(dmaLL->dsal, tsi148_bridge->base +
2754                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAL);
2755        iowrite32be(dmaLL->ddau, tsi148_bridge->base +
2756                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAU);
2757        iowrite32be(dmaLL->ddal, tsi148_bridge->base +
2758                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAL);
2759        iowrite32be(dmaLL->dsat, tsi148_bridge->base +
2760                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAT);
2761        iowrite32be(dmaLL->ddat, tsi148_bridge->base +
2762                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAT);
2763        iowrite32be(dmaLL->dcnt, tsi148_bridge->base +
2764                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCNT);
2765        iowrite32be(dmaLL->ddbs, tsi148_bridge->base +
2766                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDBS);
2767
2768        /* Start the operation */
2769        iowrite32be(dctlreg | 0x2000000, tsi148_bridge->base +
2770                TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
2771
2772        tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
2773                TSI148_LCSR_OFFSET_DSTA);
2774        wait_event_interruptible(dma_queue[channel], (tmp & 0x1000000) == 0);
2775
2776        /*
2777         * Read status register, we should probably do this in some error
2778         * handler rather than here so that we can be sure we haven't kicked off
2779         * another DMA transfer.
2780         */
2781        val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
2782                TSI148_LCSR_OFFSET_DSTA);
2783
2784        vmeDma->vmeDmaStatus = 0;
2785        if (val & 0x10000000) {
2786                printk(KERN_ERR
2787                        "DMA Error in DMA_tempe_irqhandler DSTA=%08X\n",
2788                        val);
2789                vmeDma->vmeDmaStatus = val;
2790
2791        }
2792        return (0);
2793}
2794#endif
2795
2796#if 0
2797
2798/* Global VME controller information */
2799struct pci_dev *vme_pci_dev;
2800
2801/*
2802 * Set the VME bus arbiter with the requested attributes
2803 */
2804int tempe_set_arbiter(vmeArbiterCfg_t * vmeArb)
2805{
2806        int temp_ctl = 0;
2807        int gto = 0;
2808
2809        temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
2810        temp_ctl &= 0xFFEFFF00;
2811
2812        if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
2813                gto = 8;
2814        } else if (vmeArb->globalTimeoutTimer > 2048) {
2815                return (-EINVAL);
2816        } else if (vmeArb->globalTimeoutTimer == 0) {
2817                gto = 0;
2818        } else {
2819                gto = 1;
2820                while ((16 * (1 << (gto - 1))) < vmeArb->globalTimeoutTimer) {
2821                        gto += 1;
2822                }
2823        }
2824        temp_ctl |= gto;
2825
2826        if (vmeArb->arbiterMode != VME_PRIORITY_MODE) {
2827                temp_ctl |= 1 << 6;
2828        }
2829
2830        if (vmeArb->arbiterTimeoutFlag) {
2831                temp_ctl |= 1 << 7;
2832        }
2833
2834        if (vmeArb->noEarlyReleaseFlag) {
2835                temp_ctl |= 1 << 20;
2836        }
2837        iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VCTRL);
2838
2839        return (0);
2840}
2841
2842/*
2843 * Return the attributes of the VME bus arbiter.
2844 */
2845int tempe_get_arbiter(vmeArbiterCfg_t * vmeArb)
2846{
2847        int temp_ctl = 0;
2848        int gto = 0;
2849
2850
2851        temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
2852
2853        gto = temp_ctl & 0xF;
2854        if (gto != 0) {
2855                vmeArb->globalTimeoutTimer = (16 * (1 << (gto - 1)));
2856        }
2857
2858        if (temp_ctl & (1 << 6)) {
2859                vmeArb->arbiterMode = VME_R_ROBIN_MODE;
2860        } else {
2861                vmeArb->arbiterMode = VME_PRIORITY_MODE;
2862        }
2863
2864        if (temp_ctl & (1 << 7)) {
2865                vmeArb->arbiterTimeoutFlag = 1;
2866        }
2867
2868        if (temp_ctl & (1 << 20)) {
2869                vmeArb->noEarlyReleaseFlag = 1;
2870        }
2871
2872        return (0);
2873}
2874
2875/*
2876 * Set the VME bus requestor with the requested attributes
2877 */
2878int tempe_set_requestor(vmeRequesterCfg_t * vmeReq)
2879{
2880        int temp_ctl = 0;
2881
2882        temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2883        temp_ctl &= 0xFFFF0000;
2884
2885        if (vmeReq->releaseMode == 1) {
2886                temp_ctl |= (1 << 3);
2887        }
2888
2889        if (vmeReq->fairMode == 1) {
2890                temp_ctl |= (1 << 2);
2891        }
2892
2893        temp_ctl |= (vmeReq->timeonTimeoutTimer & 7) << 8;
2894        temp_ctl |= (vmeReq->timeoffTimeoutTimer & 7) << 12;
2895        temp_ctl |= vmeReq->requestLevel;
2896
2897        iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2898        return (0);
2899}
2900
2901/*
2902 * Return the attributes of the VME bus requestor
2903 */
2904int tempe_get_requestor(vmeRequesterCfg_t * vmeReq)
2905{
2906        int temp_ctl = 0;
2907
2908        temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2909
2910        if (temp_ctl & 0x18) {
2911                vmeReq->releaseMode = 1;
2912        }
2913
2914        if (temp_ctl & (1 << 2)) {
2915                vmeReq->fairMode = 1;
2916        }
2917
2918        vmeReq->requestLevel = temp_ctl & 3;
2919        vmeReq->timeonTimeoutTimer = (temp_ctl >> 8) & 7;
2920        vmeReq->timeoffTimeoutTimer = (temp_ctl >> 12) & 7;
2921
2922        return (0);
2923}
2924
2925
2926#endif
2927