linux/drivers/vme/vme.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * VME Bridge Framework
   4 *
   5 * Author: Martyn Welch <martyn.welch@ge.com>
   6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
   7 *
   8 * Based on work by Tom Armistead and Ajit Prem
   9 * Copyright 2004 Motorola Inc.
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/export.h>
  14#include <linux/mm.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/errno.h>
  18#include <linux/pci.h>
  19#include <linux/poll.h>
  20#include <linux/highmem.h>
  21#include <linux/interrupt.h>
  22#include <linux/pagemap.h>
  23#include <linux/device.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/syscalls.h>
  26#include <linux/mutex.h>
  27#include <linux/spinlock.h>
  28#include <linux/slab.h>
  29#include <linux/vme.h>
  30
  31#include "vme_bridge.h"
  32
  33/* Bitmask and list of registered buses both protected by common mutex */
  34static unsigned int vme_bus_numbers;
  35static LIST_HEAD(vme_bus_list);
  36static DEFINE_MUTEX(vme_buses_lock);
  37
  38static int __init vme_init(void);
  39
  40static struct vme_dev *dev_to_vme_dev(struct device *dev)
  41{
  42        return container_of(dev, struct vme_dev, dev);
  43}
  44
  45/*
  46 * Find the bridge that the resource is associated with.
  47 */
  48static struct vme_bridge *find_bridge(struct vme_resource *resource)
  49{
  50        /* Get list to search */
  51        switch (resource->type) {
  52        case VME_MASTER:
  53                return list_entry(resource->entry, struct vme_master_resource,
  54                        list)->parent;
  55                break;
  56        case VME_SLAVE:
  57                return list_entry(resource->entry, struct vme_slave_resource,
  58                        list)->parent;
  59                break;
  60        case VME_DMA:
  61                return list_entry(resource->entry, struct vme_dma_resource,
  62                        list)->parent;
  63                break;
  64        case VME_LM:
  65                return list_entry(resource->entry, struct vme_lm_resource,
  66                        list)->parent;
  67                break;
  68        default:
  69                printk(KERN_ERR "Unknown resource type\n");
  70                return NULL;
  71                break;
  72        }
  73}
  74
  75/**
  76 * vme_free_consistent - Allocate contiguous memory.
  77 * @resource: Pointer to VME resource.
  78 * @size: Size of allocation required.
  79 * @dma: Pointer to variable to store physical address of allocation.
  80 *
  81 * Allocate a contiguous block of memory for use by the driver. This is used to
  82 * create the buffers for the slave windows.
  83 *
  84 * Return: Virtual address of allocation on success, NULL on failure.
  85 */
  86void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  87        dma_addr_t *dma)
  88{
  89        struct vme_bridge *bridge;
  90
  91        if (!resource) {
  92                printk(KERN_ERR "No resource\n");
  93                return NULL;
  94        }
  95
  96        bridge = find_bridge(resource);
  97        if (!bridge) {
  98                printk(KERN_ERR "Can't find bridge\n");
  99                return NULL;
 100        }
 101
 102        if (!bridge->parent) {
 103                printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 104                return NULL;
 105        }
 106
 107        if (!bridge->alloc_consistent) {
 108                printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
 109                       bridge->name);
 110                return NULL;
 111        }
 112
 113        return bridge->alloc_consistent(bridge->parent, size, dma);
 114}
 115EXPORT_SYMBOL(vme_alloc_consistent);
 116
 117/**
 118 * vme_free_consistent - Free previously allocated memory.
 119 * @resource: Pointer to VME resource.
 120 * @size: Size of allocation to free.
 121 * @vaddr: Virtual address of allocation.
 122 * @dma: Physical address of allocation.
 123 *
 124 * Free previously allocated block of contiguous memory.
 125 */
 126void vme_free_consistent(struct vme_resource *resource, size_t size,
 127        void *vaddr, dma_addr_t dma)
 128{
 129        struct vme_bridge *bridge;
 130
 131        if (!resource) {
 132                printk(KERN_ERR "No resource\n");
 133                return;
 134        }
 135
 136        bridge = find_bridge(resource);
 137        if (!bridge) {
 138                printk(KERN_ERR "Can't find bridge\n");
 139                return;
 140        }
 141
 142        if (!bridge->parent) {
 143                printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 144                return;
 145        }
 146
 147        if (!bridge->free_consistent) {
 148                printk(KERN_ERR "free_consistent not supported by bridge %s\n",
 149                       bridge->name);
 150                return;
 151        }
 152
 153        bridge->free_consistent(bridge->parent, size, vaddr, dma);
 154}
 155EXPORT_SYMBOL(vme_free_consistent);
 156
 157/**
 158 * vme_get_size - Helper function returning size of a VME window
 159 * @resource: Pointer to VME slave or master resource.
 160 *
 161 * Determine the size of the VME window provided. This is a helper
 162 * function, wrappering the call to vme_master_get or vme_slave_get
 163 * depending on the type of window resource handed to it.
 164 *
 165 * Return: Size of the window on success, zero on failure.
 166 */
 167size_t vme_get_size(struct vme_resource *resource)
 168{
 169        int enabled, retval;
 170        unsigned long long base, size;
 171        dma_addr_t buf_base;
 172        u32 aspace, cycle, dwidth;
 173
 174        switch (resource->type) {
 175        case VME_MASTER:
 176                retval = vme_master_get(resource, &enabled, &base, &size,
 177                        &aspace, &cycle, &dwidth);
 178                if (retval)
 179                        return 0;
 180
 181                return size;
 182                break;
 183        case VME_SLAVE:
 184                retval = vme_slave_get(resource, &enabled, &base, &size,
 185                        &buf_base, &aspace, &cycle);
 186                if (retval)
 187                        return 0;
 188
 189                return size;
 190                break;
 191        case VME_DMA:
 192                return 0;
 193                break;
 194        default:
 195                printk(KERN_ERR "Unknown resource type\n");
 196                return 0;
 197                break;
 198        }
 199}
 200EXPORT_SYMBOL(vme_get_size);
 201
 202int vme_check_window(u32 aspace, unsigned long long vme_base,
 203                     unsigned long long size)
 204{
 205        int retval = 0;
 206
 207        if (vme_base + size < size)
 208                return -EINVAL;
 209
 210        switch (aspace) {
 211        case VME_A16:
 212                if (vme_base + size > VME_A16_MAX)
 213                        retval = -EFAULT;
 214                break;
 215        case VME_A24:
 216                if (vme_base + size > VME_A24_MAX)
 217                        retval = -EFAULT;
 218                break;
 219        case VME_A32:
 220                if (vme_base + size > VME_A32_MAX)
 221                        retval = -EFAULT;
 222                break;
 223        case VME_A64:
 224                /* The VME_A64_MAX limit is actually U64_MAX + 1 */
 225                break;
 226        case VME_CRCSR:
 227                if (vme_base + size > VME_CRCSR_MAX)
 228                        retval = -EFAULT;
 229                break;
 230        case VME_USER1:
 231        case VME_USER2:
 232        case VME_USER3:
 233        case VME_USER4:
 234                /* User Defined */
 235                break;
 236        default:
 237                printk(KERN_ERR "Invalid address space\n");
 238                retval = -EINVAL;
 239                break;
 240        }
 241
 242        return retval;
 243}
 244EXPORT_SYMBOL(vme_check_window);
 245
 246static u32 vme_get_aspace(int am)
 247{
 248        switch (am) {
 249        case 0x29:
 250        case 0x2D:
 251                return VME_A16;
 252        case 0x38:
 253        case 0x39:
 254        case 0x3A:
 255        case 0x3B:
 256        case 0x3C:
 257        case 0x3D:
 258        case 0x3E:
 259        case 0x3F:
 260                return VME_A24;
 261        case 0x8:
 262        case 0x9:
 263        case 0xA:
 264        case 0xB:
 265        case 0xC:
 266        case 0xD:
 267        case 0xE:
 268        case 0xF:
 269                return VME_A32;
 270        case 0x0:
 271        case 0x1:
 272        case 0x3:
 273                return VME_A64;
 274        }
 275
 276        return 0;
 277}
 278
 279/**
 280 * vme_slave_request - Request a VME slave window resource.
 281 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
 282 * @address: Required VME address space.
 283 * @cycle: Required VME data transfer cycle type.
 284 *
 285 * Request use of a VME window resource capable of being set for the requested
 286 * address space and data transfer cycle.
 287 *
 288 * Return: Pointer to VME resource on success, NULL on failure.
 289 */
 290struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
 291        u32 cycle)
 292{
 293        struct vme_bridge *bridge;
 294        struct list_head *slave_pos = NULL;
 295        struct vme_slave_resource *allocated_image = NULL;
 296        struct vme_slave_resource *slave_image = NULL;
 297        struct vme_resource *resource = NULL;
 298
 299        bridge = vdev->bridge;
 300        if (!bridge) {
 301                printk(KERN_ERR "Can't find VME bus\n");
 302                goto err_bus;
 303        }
 304
 305        /* Loop through slave resources */
 306        list_for_each(slave_pos, &bridge->slave_resources) {
 307                slave_image = list_entry(slave_pos,
 308                        struct vme_slave_resource, list);
 309
 310                if (!slave_image) {
 311                        printk(KERN_ERR "Registered NULL Slave resource\n");
 312                        continue;
 313                }
 314
 315                /* Find an unlocked and compatible image */
 316                mutex_lock(&slave_image->mtx);
 317                if (((slave_image->address_attr & address) == address) &&
 318                        ((slave_image->cycle_attr & cycle) == cycle) &&
 319                        (slave_image->locked == 0)) {
 320
 321                        slave_image->locked = 1;
 322                        mutex_unlock(&slave_image->mtx);
 323                        allocated_image = slave_image;
 324                        break;
 325                }
 326                mutex_unlock(&slave_image->mtx);
 327        }
 328
 329        /* No free image */
 330        if (!allocated_image)
 331                goto err_image;
 332
 333        resource = kmalloc(sizeof(*resource), GFP_KERNEL);
 334        if (!resource)
 335                goto err_alloc;
 336
 337        resource->type = VME_SLAVE;
 338        resource->entry = &allocated_image->list;
 339
 340        return resource;
 341
 342err_alloc:
 343        /* Unlock image */
 344        mutex_lock(&slave_image->mtx);
 345        slave_image->locked = 0;
 346        mutex_unlock(&slave_image->mtx);
 347err_image:
 348err_bus:
 349        return NULL;
 350}
 351EXPORT_SYMBOL(vme_slave_request);
 352
 353/**
 354 * vme_slave_set - Set VME slave window configuration.
 355 * @resource: Pointer to VME slave resource.
 356 * @enabled: State to which the window should be configured.
 357 * @vme_base: Base address for the window.
 358 * @size: Size of the VME window.
 359 * @buf_base: Based address of buffer used to provide VME slave window storage.
 360 * @aspace: VME address space for the VME window.
 361 * @cycle: VME data transfer cycle type for the VME window.
 362 *
 363 * Set configuration for provided VME slave window.
 364 *
 365 * Return: Zero on success, -EINVAL if operation is not supported on this
 366 *         device, if an invalid resource has been provided or invalid
 367 *         attributes are provided. Hardware specific errors may also be
 368 *         returned.
 369 */
 370int vme_slave_set(struct vme_resource *resource, int enabled,
 371        unsigned long long vme_base, unsigned long long size,
 372        dma_addr_t buf_base, u32 aspace, u32 cycle)
 373{
 374        struct vme_bridge *bridge = find_bridge(resource);
 375        struct vme_slave_resource *image;
 376        int retval;
 377
 378        if (resource->type != VME_SLAVE) {
 379                printk(KERN_ERR "Not a slave resource\n");
 380                return -EINVAL;
 381        }
 382
 383        image = list_entry(resource->entry, struct vme_slave_resource, list);
 384
 385        if (!bridge->slave_set) {
 386                printk(KERN_ERR "Function not supported\n");
 387                return -ENOSYS;
 388        }
 389
 390        if (!(((image->address_attr & aspace) == aspace) &&
 391                ((image->cycle_attr & cycle) == cycle))) {
 392                printk(KERN_ERR "Invalid attributes\n");
 393                return -EINVAL;
 394        }
 395
 396        retval = vme_check_window(aspace, vme_base, size);
 397        if (retval)
 398                return retval;
 399
 400        return bridge->slave_set(image, enabled, vme_base, size, buf_base,
 401                aspace, cycle);
 402}
 403EXPORT_SYMBOL(vme_slave_set);
 404
 405/**
 406 * vme_slave_get - Retrieve VME slave window configuration.
 407 * @resource: Pointer to VME slave resource.
 408 * @enabled: Pointer to variable for storing state.
 409 * @vme_base: Pointer to variable for storing window base address.
 410 * @size: Pointer to variable for storing window size.
 411 * @buf_base: Pointer to variable for storing slave buffer base address.
 412 * @aspace: Pointer to variable for storing VME address space.
 413 * @cycle: Pointer to variable for storing VME data transfer cycle type.
 414 *
 415 * Return configuration for provided VME slave window.
 416 *
 417 * Return: Zero on success, -EINVAL if operation is not supported on this
 418 *         device or if an invalid resource has been provided.
 419 */
 420int vme_slave_get(struct vme_resource *resource, int *enabled,
 421        unsigned long long *vme_base, unsigned long long *size,
 422        dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
 423{
 424        struct vme_bridge *bridge = find_bridge(resource);
 425        struct vme_slave_resource *image;
 426
 427        if (resource->type != VME_SLAVE) {
 428                printk(KERN_ERR "Not a slave resource\n");
 429                return -EINVAL;
 430        }
 431
 432        image = list_entry(resource->entry, struct vme_slave_resource, list);
 433
 434        if (!bridge->slave_get) {
 435                printk(KERN_ERR "vme_slave_get not supported\n");
 436                return -EINVAL;
 437        }
 438
 439        return bridge->slave_get(image, enabled, vme_base, size, buf_base,
 440                aspace, cycle);
 441}
 442EXPORT_SYMBOL(vme_slave_get);
 443
 444/**
 445 * vme_slave_free - Free VME slave window
 446 * @resource: Pointer to VME slave resource.
 447 *
 448 * Free the provided slave resource so that it may be reallocated.
 449 */
 450void vme_slave_free(struct vme_resource *resource)
 451{
 452        struct vme_slave_resource *slave_image;
 453
 454        if (resource->type != VME_SLAVE) {
 455                printk(KERN_ERR "Not a slave resource\n");
 456                return;
 457        }
 458
 459        slave_image = list_entry(resource->entry, struct vme_slave_resource,
 460                list);
 461        if (!slave_image) {
 462                printk(KERN_ERR "Can't find slave resource\n");
 463                return;
 464        }
 465
 466        /* Unlock image */
 467        mutex_lock(&slave_image->mtx);
 468        if (slave_image->locked == 0)
 469                printk(KERN_ERR "Image is already free\n");
 470
 471        slave_image->locked = 0;
 472        mutex_unlock(&slave_image->mtx);
 473
 474        /* Free up resource memory */
 475        kfree(resource);
 476}
 477EXPORT_SYMBOL(vme_slave_free);
 478
 479/**
 480 * vme_master_request - Request a VME master window resource.
 481 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
 482 * @address: Required VME address space.
 483 * @cycle: Required VME data transfer cycle type.
 484 * @dwidth: Required VME data transfer width.
 485 *
 486 * Request use of a VME window resource capable of being set for the requested
 487 * address space, data transfer cycle and width.
 488 *
 489 * Return: Pointer to VME resource on success, NULL on failure.
 490 */
 491struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
 492        u32 cycle, u32 dwidth)
 493{
 494        struct vme_bridge *bridge;
 495        struct list_head *master_pos = NULL;
 496        struct vme_master_resource *allocated_image = NULL;
 497        struct vme_master_resource *master_image = NULL;
 498        struct vme_resource *resource = NULL;
 499
 500        bridge = vdev->bridge;
 501        if (!bridge) {
 502                printk(KERN_ERR "Can't find VME bus\n");
 503                goto err_bus;
 504        }
 505
 506        /* Loop through master resources */
 507        list_for_each(master_pos, &bridge->master_resources) {
 508                master_image = list_entry(master_pos,
 509                        struct vme_master_resource, list);
 510
 511                if (!master_image) {
 512                        printk(KERN_WARNING "Registered NULL master resource\n");
 513                        continue;
 514                }
 515
 516                /* Find an unlocked and compatible image */
 517                spin_lock(&master_image->lock);
 518                if (((master_image->address_attr & address) == address) &&
 519                        ((master_image->cycle_attr & cycle) == cycle) &&
 520                        ((master_image->width_attr & dwidth) == dwidth) &&
 521                        (master_image->locked == 0)) {
 522
 523                        master_image->locked = 1;
 524                        spin_unlock(&master_image->lock);
 525                        allocated_image = master_image;
 526                        break;
 527                }
 528                spin_unlock(&master_image->lock);
 529        }
 530
 531        /* Check to see if we found a resource */
 532        if (!allocated_image) {
 533                printk(KERN_ERR "Can't find a suitable resource\n");
 534                goto err_image;
 535        }
 536
 537        resource = kmalloc(sizeof(*resource), GFP_KERNEL);
 538        if (!resource)
 539                goto err_alloc;
 540
 541        resource->type = VME_MASTER;
 542        resource->entry = &allocated_image->list;
 543
 544        return resource;
 545
 546err_alloc:
 547        /* Unlock image */
 548        spin_lock(&master_image->lock);
 549        master_image->locked = 0;
 550        spin_unlock(&master_image->lock);
 551err_image:
 552err_bus:
 553        return NULL;
 554}
 555EXPORT_SYMBOL(vme_master_request);
 556
 557/**
 558 * vme_master_set - Set VME master window configuration.
 559 * @resource: Pointer to VME master resource.
 560 * @enabled: State to which the window should be configured.
 561 * @vme_base: Base address for the window.
 562 * @size: Size of the VME window.
 563 * @aspace: VME address space for the VME window.
 564 * @cycle: VME data transfer cycle type for the VME window.
 565 * @dwidth: VME data transfer width for the VME window.
 566 *
 567 * Set configuration for provided VME master window.
 568 *
 569 * Return: Zero on success, -EINVAL if operation is not supported on this
 570 *         device, if an invalid resource has been provided or invalid
 571 *         attributes are provided. Hardware specific errors may also be
 572 *         returned.
 573 */
 574int vme_master_set(struct vme_resource *resource, int enabled,
 575        unsigned long long vme_base, unsigned long long size, u32 aspace,
 576        u32 cycle, u32 dwidth)
 577{
 578        struct vme_bridge *bridge = find_bridge(resource);
 579        struct vme_master_resource *image;
 580        int retval;
 581
 582        if (resource->type != VME_MASTER) {
 583                printk(KERN_ERR "Not a master resource\n");
 584                return -EINVAL;
 585        }
 586
 587        image = list_entry(resource->entry, struct vme_master_resource, list);
 588
 589        if (!bridge->master_set) {
 590                printk(KERN_WARNING "vme_master_set not supported\n");
 591                return -EINVAL;
 592        }
 593
 594        if (!(((image->address_attr & aspace) == aspace) &&
 595                ((image->cycle_attr & cycle) == cycle) &&
 596                ((image->width_attr & dwidth) == dwidth))) {
 597                printk(KERN_WARNING "Invalid attributes\n");
 598                return -EINVAL;
 599        }
 600
 601        retval = vme_check_window(aspace, vme_base, size);
 602        if (retval)
 603                return retval;
 604
 605        return bridge->master_set(image, enabled, vme_base, size, aspace,
 606                cycle, dwidth);
 607}
 608EXPORT_SYMBOL(vme_master_set);
 609
 610/**
 611 * vme_master_get - Retrieve VME master window configuration.
 612 * @resource: Pointer to VME master resource.
 613 * @enabled: Pointer to variable for storing state.
 614 * @vme_base: Pointer to variable for storing window base address.
 615 * @size: Pointer to variable for storing window size.
 616 * @aspace: Pointer to variable for storing VME address space.
 617 * @cycle: Pointer to variable for storing VME data transfer cycle type.
 618 * @dwidth: Pointer to variable for storing VME data transfer width.
 619 *
 620 * Return configuration for provided VME master window.
 621 *
 622 * Return: Zero on success, -EINVAL if operation is not supported on this
 623 *         device or if an invalid resource has been provided.
 624 */
 625int vme_master_get(struct vme_resource *resource, int *enabled,
 626        unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
 627        u32 *cycle, u32 *dwidth)
 628{
 629        struct vme_bridge *bridge = find_bridge(resource);
 630        struct vme_master_resource *image;
 631
 632        if (resource->type != VME_MASTER) {
 633                printk(KERN_ERR "Not a master resource\n");
 634                return -EINVAL;
 635        }
 636
 637        image = list_entry(resource->entry, struct vme_master_resource, list);
 638
 639        if (!bridge->master_get) {
 640                printk(KERN_WARNING "%s not supported\n", __func__);
 641                return -EINVAL;
 642        }
 643
 644        return bridge->master_get(image, enabled, vme_base, size, aspace,
 645                cycle, dwidth);
 646}
 647EXPORT_SYMBOL(vme_master_get);
 648
 649/**
 650 * vme_master_write - Read data from VME space into a buffer.
 651 * @resource: Pointer to VME master resource.
 652 * @buf: Pointer to buffer where data should be transferred.
 653 * @count: Number of bytes to transfer.
 654 * @offset: Offset into VME master window at which to start transfer.
 655 *
 656 * Perform read of count bytes of data from location on VME bus which maps into
 657 * the VME master window at offset to buf.
 658 *
 659 * Return: Number of bytes read, -EINVAL if resource is not a VME master
 660 *         resource or read operation is not supported. -EFAULT returned if
 661 *         invalid offset is provided. Hardware specific errors may also be
 662 *         returned.
 663 */
 664ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
 665        loff_t offset)
 666{
 667        struct vme_bridge *bridge = find_bridge(resource);
 668        struct vme_master_resource *image;
 669        size_t length;
 670
 671        if (!bridge->master_read) {
 672                printk(KERN_WARNING "Reading from resource not supported\n");
 673                return -EINVAL;
 674        }
 675
 676        if (resource->type != VME_MASTER) {
 677                printk(KERN_ERR "Not a master resource\n");
 678                return -EINVAL;
 679        }
 680
 681        image = list_entry(resource->entry, struct vme_master_resource, list);
 682
 683        length = vme_get_size(resource);
 684
 685        if (offset > length) {
 686                printk(KERN_WARNING "Invalid Offset\n");
 687                return -EFAULT;
 688        }
 689
 690        if ((offset + count) > length)
 691                count = length - offset;
 692
 693        return bridge->master_read(image, buf, count, offset);
 694
 695}
 696EXPORT_SYMBOL(vme_master_read);
 697
 698/**
 699 * vme_master_write - Write data out to VME space from a buffer.
 700 * @resource: Pointer to VME master resource.
 701 * @buf: Pointer to buffer holding data to transfer.
 702 * @count: Number of bytes to transfer.
 703 * @offset: Offset into VME master window at which to start transfer.
 704 *
 705 * Perform write of count bytes of data from buf to location on VME bus which
 706 * maps into the VME master window at offset.
 707 *
 708 * Return: Number of bytes written, -EINVAL if resource is not a VME master
 709 *         resource or write operation is not supported. -EFAULT returned if
 710 *         invalid offset is provided. Hardware specific errors may also be
 711 *         returned.
 712 */
 713ssize_t vme_master_write(struct vme_resource *resource, void *buf,
 714        size_t count, loff_t offset)
 715{
 716        struct vme_bridge *bridge = find_bridge(resource);
 717        struct vme_master_resource *image;
 718        size_t length;
 719
 720        if (!bridge->master_write) {
 721                printk(KERN_WARNING "Writing to resource not supported\n");
 722                return -EINVAL;
 723        }
 724
 725        if (resource->type != VME_MASTER) {
 726                printk(KERN_ERR "Not a master resource\n");
 727                return -EINVAL;
 728        }
 729
 730        image = list_entry(resource->entry, struct vme_master_resource, list);
 731
 732        length = vme_get_size(resource);
 733
 734        if (offset > length) {
 735                printk(KERN_WARNING "Invalid Offset\n");
 736                return -EFAULT;
 737        }
 738
 739        if ((offset + count) > length)
 740                count = length - offset;
 741
 742        return bridge->master_write(image, buf, count, offset);
 743}
 744EXPORT_SYMBOL(vme_master_write);
 745
 746/**
 747 * vme_master_rmw - Perform read-modify-write cycle.
 748 * @resource: Pointer to VME master resource.
 749 * @mask: Bits to be compared and swapped in operation.
 750 * @compare: Bits to be compared with data read from offset.
 751 * @swap: Bits to be swapped in data read from offset.
 752 * @offset: Offset into VME master window at which to perform operation.
 753 *
 754 * Perform read-modify-write cycle on provided location:
 755 * - Location on VME bus is read.
 756 * - Bits selected by mask are compared with compare.
 757 * - Where a selected bit matches that in compare and are selected in swap,
 758 * the bit is swapped.
 759 * - Result written back to location on VME bus.
 760 *
 761 * Return: Bytes written on success, -EINVAL if resource is not a VME master
 762 *         resource or RMW operation is not supported. Hardware specific
 763 *         errors may also be returned.
 764 */
 765unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
 766        unsigned int compare, unsigned int swap, loff_t offset)
 767{
 768        struct vme_bridge *bridge = find_bridge(resource);
 769        struct vme_master_resource *image;
 770
 771        if (!bridge->master_rmw) {
 772                printk(KERN_WARNING "Writing to resource not supported\n");
 773                return -EINVAL;
 774        }
 775
 776        if (resource->type != VME_MASTER) {
 777                printk(KERN_ERR "Not a master resource\n");
 778                return -EINVAL;
 779        }
 780
 781        image = list_entry(resource->entry, struct vme_master_resource, list);
 782
 783        return bridge->master_rmw(image, mask, compare, swap, offset);
 784}
 785EXPORT_SYMBOL(vme_master_rmw);
 786
 787/**
 788 * vme_master_mmap - Mmap region of VME master window.
 789 * @resource: Pointer to VME master resource.
 790 * @vma: Pointer to definition of user mapping.
 791 *
 792 * Memory map a region of the VME master window into user space.
 793 *
 794 * Return: Zero on success, -EINVAL if resource is not a VME master
 795 *         resource or -EFAULT if map exceeds window size. Other generic mmap
 796 *         errors may also be returned.
 797 */
 798int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
 799{
 800        struct vme_master_resource *image;
 801        phys_addr_t phys_addr;
 802        unsigned long vma_size;
 803
 804        if (resource->type != VME_MASTER) {
 805                pr_err("Not a master resource\n");
 806                return -EINVAL;
 807        }
 808
 809        image = list_entry(resource->entry, struct vme_master_resource, list);
 810        phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
 811        vma_size = vma->vm_end - vma->vm_start;
 812
 813        if (phys_addr + vma_size > image->bus_resource.end + 1) {
 814                pr_err("Map size cannot exceed the window size\n");
 815                return -EFAULT;
 816        }
 817
 818        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 819
 820        return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
 821}
 822EXPORT_SYMBOL(vme_master_mmap);
 823
 824/**
 825 * vme_master_free - Free VME master window
 826 * @resource: Pointer to VME master resource.
 827 *
 828 * Free the provided master resource so that it may be reallocated.
 829 */
 830void vme_master_free(struct vme_resource *resource)
 831{
 832        struct vme_master_resource *master_image;
 833
 834        if (resource->type != VME_MASTER) {
 835                printk(KERN_ERR "Not a master resource\n");
 836                return;
 837        }
 838
 839        master_image = list_entry(resource->entry, struct vme_master_resource,
 840                list);
 841        if (!master_image) {
 842                printk(KERN_ERR "Can't find master resource\n");
 843                return;
 844        }
 845
 846        /* Unlock image */
 847        spin_lock(&master_image->lock);
 848        if (master_image->locked == 0)
 849                printk(KERN_ERR "Image is already free\n");
 850
 851        master_image->locked = 0;
 852        spin_unlock(&master_image->lock);
 853
 854        /* Free up resource memory */
 855        kfree(resource);
 856}
 857EXPORT_SYMBOL(vme_master_free);
 858
 859/**
 860 * vme_dma_request - Request a DMA controller.
 861 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
 862 * @route: Required src/destination combination.
 863 *
 864 * Request a VME DMA controller with capability to perform transfers bewteen
 865 * requested source/destination combination.
 866 *
 867 * Return: Pointer to VME DMA resource on success, NULL on failure.
 868 */
 869struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
 870{
 871        struct vme_bridge *bridge;
 872        struct list_head *dma_pos = NULL;
 873        struct vme_dma_resource *allocated_ctrlr = NULL;
 874        struct vme_dma_resource *dma_ctrlr = NULL;
 875        struct vme_resource *resource = NULL;
 876
 877        /* XXX Not checking resource attributes */
 878        printk(KERN_ERR "No VME resource Attribute tests done\n");
 879
 880        bridge = vdev->bridge;
 881        if (!bridge) {
 882                printk(KERN_ERR "Can't find VME bus\n");
 883                goto err_bus;
 884        }
 885
 886        /* Loop through DMA resources */
 887        list_for_each(dma_pos, &bridge->dma_resources) {
 888                dma_ctrlr = list_entry(dma_pos,
 889                        struct vme_dma_resource, list);
 890                if (!dma_ctrlr) {
 891                        printk(KERN_ERR "Registered NULL DMA resource\n");
 892                        continue;
 893                }
 894
 895                /* Find an unlocked and compatible controller */
 896                mutex_lock(&dma_ctrlr->mtx);
 897                if (((dma_ctrlr->route_attr & route) == route) &&
 898                        (dma_ctrlr->locked == 0)) {
 899
 900                        dma_ctrlr->locked = 1;
 901                        mutex_unlock(&dma_ctrlr->mtx);
 902                        allocated_ctrlr = dma_ctrlr;
 903                        break;
 904                }
 905                mutex_unlock(&dma_ctrlr->mtx);
 906        }
 907
 908        /* Check to see if we found a resource */
 909        if (!allocated_ctrlr)
 910                goto err_ctrlr;
 911
 912        resource = kmalloc(sizeof(*resource), GFP_KERNEL);
 913        if (!resource)
 914                goto err_alloc;
 915
 916        resource->type = VME_DMA;
 917        resource->entry = &allocated_ctrlr->list;
 918
 919        return resource;
 920
 921err_alloc:
 922        /* Unlock image */
 923        mutex_lock(&dma_ctrlr->mtx);
 924        dma_ctrlr->locked = 0;
 925        mutex_unlock(&dma_ctrlr->mtx);
 926err_ctrlr:
 927err_bus:
 928        return NULL;
 929}
 930EXPORT_SYMBOL(vme_dma_request);
 931
 932/**
 933 * vme_new_dma_list - Create new VME DMA list.
 934 * @resource: Pointer to VME DMA resource.
 935 *
 936 * Create a new VME DMA list. It is the responsibility of the user to free
 937 * the list once it is no longer required with vme_dma_list_free().
 938 *
 939 * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
 940 *         VME DMA resource.
 941 */
 942struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
 943{
 944        struct vme_dma_list *dma_list;
 945
 946        if (resource->type != VME_DMA) {
 947                printk(KERN_ERR "Not a DMA resource\n");
 948                return NULL;
 949        }
 950
 951        dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
 952        if (!dma_list)
 953                return NULL;
 954
 955        INIT_LIST_HEAD(&dma_list->entries);
 956        dma_list->parent = list_entry(resource->entry,
 957                                      struct vme_dma_resource,
 958                                      list);
 959        mutex_init(&dma_list->mtx);
 960
 961        return dma_list;
 962}
 963EXPORT_SYMBOL(vme_new_dma_list);
 964
 965/**
 966 * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
 967 * @pattern: Value to use used as pattern
 968 * @type: Type of pattern to be written.
 969 *
 970 * Create VME DMA list attribute for pattern generation. It is the
 971 * responsibility of the user to free used attributes using
 972 * vme_dma_free_attribute().
 973 *
 974 * Return: Pointer to VME DMA attribute, NULL on failure.
 975 */
 976struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
 977{
 978        struct vme_dma_attr *attributes;
 979        struct vme_dma_pattern *pattern_attr;
 980
 981        attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
 982        if (!attributes)
 983                goto err_attr;
 984
 985        pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL);
 986        if (!pattern_attr)
 987                goto err_pat;
 988
 989        attributes->type = VME_DMA_PATTERN;
 990        attributes->private = (void *)pattern_attr;
 991
 992        pattern_attr->pattern = pattern;
 993        pattern_attr->type = type;
 994
 995        return attributes;
 996
 997err_pat:
 998        kfree(attributes);
 999err_attr:
1000        return NULL;
1001}
1002EXPORT_SYMBOL(vme_dma_pattern_attribute);
1003
1004/**
1005 * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
1006 * @address: PCI base address for DMA transfer.
1007 *
1008 * Create VME DMA list attribute pointing to a location on PCI for DMA
1009 * transfers. It is the responsibility of the user to free used attributes
1010 * using vme_dma_free_attribute().
1011 *
1012 * Return: Pointer to VME DMA attribute, NULL on failure.
1013 */
1014struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
1015{
1016        struct vme_dma_attr *attributes;
1017        struct vme_dma_pci *pci_attr;
1018
1019        /* XXX Run some sanity checks here */
1020
1021        attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
1022        if (!attributes)
1023                goto err_attr;
1024
1025        pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL);
1026        if (!pci_attr)
1027                goto err_pci;
1028
1029        attributes->type = VME_DMA_PCI;
1030        attributes->private = (void *)pci_attr;
1031
1032        pci_attr->address = address;
1033
1034        return attributes;
1035
1036err_pci:
1037        kfree(attributes);
1038err_attr:
1039        return NULL;
1040}
1041EXPORT_SYMBOL(vme_dma_pci_attribute);
1042
1043/**
1044 * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
1045 * @address: VME base address for DMA transfer.
1046 * @aspace: VME address space to use for DMA transfer.
1047 * @cycle: VME bus cycle to use for DMA transfer.
1048 * @dwidth: VME data width to use for DMA transfer.
1049 *
1050 * Create VME DMA list attribute pointing to a location on the VME bus for DMA
1051 * transfers. It is the responsibility of the user to free used attributes
1052 * using vme_dma_free_attribute().
1053 *
1054 * Return: Pointer to VME DMA attribute, NULL on failure.
1055 */
1056struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
1057        u32 aspace, u32 cycle, u32 dwidth)
1058{
1059        struct vme_dma_attr *attributes;
1060        struct vme_dma_vme *vme_attr;
1061
1062        attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
1063        if (!attributes)
1064                goto err_attr;
1065
1066        vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL);
1067        if (!vme_attr)
1068                goto err_vme;
1069
1070        attributes->type = VME_DMA_VME;
1071        attributes->private = (void *)vme_attr;
1072
1073        vme_attr->address = address;
1074        vme_attr->aspace = aspace;
1075        vme_attr->cycle = cycle;
1076        vme_attr->dwidth = dwidth;
1077
1078        return attributes;
1079
1080err_vme:
1081        kfree(attributes);
1082err_attr:
1083        return NULL;
1084}
1085EXPORT_SYMBOL(vme_dma_vme_attribute);
1086
1087/**
1088 * vme_dma_free_attribute - Free DMA list attribute.
1089 * @attributes: Pointer to DMA list attribute.
1090 *
1091 * Free VME DMA list attribute. VME DMA list attributes can be safely freed
1092 * once vme_dma_list_add() has returned.
1093 */
1094void vme_dma_free_attribute(struct vme_dma_attr *attributes)
1095{
1096        kfree(attributes->private);
1097        kfree(attributes);
1098}
1099EXPORT_SYMBOL(vme_dma_free_attribute);
1100
1101/**
1102 * vme_dma_list_add - Add enty to a VME DMA list.
1103 * @list: Pointer to VME list.
1104 * @src: Pointer to DMA list attribute to use as source.
1105 * @dest: Pointer to DMA list attribute to use as destination.
1106 * @count: Number of bytes to transfer.
1107 *
1108 * Add an entry to the provided VME DMA list. Entry requires pointers to source
1109 * and destination DMA attributes and a count.
1110 *
1111 * Please note, the attributes supported as source and destinations for
1112 * transfers are hardware dependent.
1113 *
1114 * Return: Zero on success, -EINVAL if operation is not supported on this
1115 *         device or if the link list has already been submitted for execution.
1116 *         Hardware specific errors also possible.
1117 */
1118int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
1119        struct vme_dma_attr *dest, size_t count)
1120{
1121        struct vme_bridge *bridge = list->parent->parent;
1122        int retval;
1123
1124        if (!bridge->dma_list_add) {
1125                printk(KERN_WARNING "Link List DMA generation not supported\n");
1126                return -EINVAL;
1127        }
1128
1129        if (!mutex_trylock(&list->mtx)) {
1130                printk(KERN_ERR "Link List already submitted\n");
1131                return -EINVAL;
1132        }
1133
1134        retval = bridge->dma_list_add(list, src, dest, count);
1135
1136        mutex_unlock(&list->mtx);
1137
1138        return retval;
1139}
1140EXPORT_SYMBOL(vme_dma_list_add);
1141
1142/**
1143 * vme_dma_list_exec - Queue a VME DMA list for execution.
1144 * @list: Pointer to VME list.
1145 *
1146 * Queue the provided VME DMA list for execution. The call will return once the
1147 * list has been executed.
1148 *
1149 * Return: Zero on success, -EINVAL if operation is not supported on this
1150 *         device. Hardware specific errors also possible.
1151 */
1152int vme_dma_list_exec(struct vme_dma_list *list)
1153{
1154        struct vme_bridge *bridge = list->parent->parent;
1155        int retval;
1156
1157        if (!bridge->dma_list_exec) {
1158                printk(KERN_ERR "Link List DMA execution not supported\n");
1159                return -EINVAL;
1160        }
1161
1162        mutex_lock(&list->mtx);
1163
1164        retval = bridge->dma_list_exec(list);
1165
1166        mutex_unlock(&list->mtx);
1167
1168        return retval;
1169}
1170EXPORT_SYMBOL(vme_dma_list_exec);
1171
1172/**
1173 * vme_dma_list_free - Free a VME DMA list.
1174 * @list: Pointer to VME list.
1175 *
1176 * Free the provided DMA list and all its entries.
1177 *
1178 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
1179 *         is still in use. Hardware specific errors also possible.
1180 */
1181int vme_dma_list_free(struct vme_dma_list *list)
1182{
1183        struct vme_bridge *bridge = list->parent->parent;
1184        int retval;
1185
1186        if (!bridge->dma_list_empty) {
1187                printk(KERN_WARNING "Emptying of Link Lists not supported\n");
1188                return -EINVAL;
1189        }
1190
1191        if (!mutex_trylock(&list->mtx)) {
1192                printk(KERN_ERR "Link List in use\n");
1193                return -EBUSY;
1194        }
1195
1196        /*
1197         * Empty out all of the entries from the DMA list. We need to go to the
1198         * low level driver as DMA entries are driver specific.
1199         */
1200        retval = bridge->dma_list_empty(list);
1201        if (retval) {
1202                printk(KERN_ERR "Unable to empty link-list entries\n");
1203                mutex_unlock(&list->mtx);
1204                return retval;
1205        }
1206        mutex_unlock(&list->mtx);
1207        kfree(list);
1208
1209        return retval;
1210}
1211EXPORT_SYMBOL(vme_dma_list_free);
1212
1213/**
1214 * vme_dma_free - Free a VME DMA resource.
1215 * @resource: Pointer to VME DMA resource.
1216 *
1217 * Free the provided DMA resource so that it may be reallocated.
1218 *
1219 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
1220 *         is still active.
1221 */
1222int vme_dma_free(struct vme_resource *resource)
1223{
1224        struct vme_dma_resource *ctrlr;
1225
1226        if (resource->type != VME_DMA) {
1227                printk(KERN_ERR "Not a DMA resource\n");
1228                return -EINVAL;
1229        }
1230
1231        ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
1232
1233        if (!mutex_trylock(&ctrlr->mtx)) {
1234                printk(KERN_ERR "Resource busy, can't free\n");
1235                return -EBUSY;
1236        }
1237
1238        if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
1239                printk(KERN_WARNING "Resource still processing transfers\n");
1240                mutex_unlock(&ctrlr->mtx);
1241                return -EBUSY;
1242        }
1243
1244        ctrlr->locked = 0;
1245
1246        mutex_unlock(&ctrlr->mtx);
1247
1248        kfree(resource);
1249
1250        return 0;
1251}
1252EXPORT_SYMBOL(vme_dma_free);
1253
1254void vme_bus_error_handler(struct vme_bridge *bridge,
1255                           unsigned long long address, int am)
1256{
1257        struct list_head *handler_pos = NULL;
1258        struct vme_error_handler *handler;
1259        int handler_triggered = 0;
1260        u32 aspace = vme_get_aspace(am);
1261
1262        list_for_each(handler_pos, &bridge->vme_error_handlers) {
1263                handler = list_entry(handler_pos, struct vme_error_handler,
1264                                     list);
1265                if ((aspace == handler->aspace) &&
1266                    (address >= handler->start) &&
1267                    (address < handler->end)) {
1268                        if (!handler->num_errors)
1269                                handler->first_error = address;
1270                        if (handler->num_errors != UINT_MAX)
1271                                handler->num_errors++;
1272                        handler_triggered = 1;
1273                }
1274        }
1275
1276        if (!handler_triggered)
1277                dev_err(bridge->parent,
1278                        "Unhandled VME access error at address 0x%llx\n",
1279                        address);
1280}
1281EXPORT_SYMBOL(vme_bus_error_handler);
1282
1283struct vme_error_handler *vme_register_error_handler(
1284        struct vme_bridge *bridge, u32 aspace,
1285        unsigned long long address, size_t len)
1286{
1287        struct vme_error_handler *handler;
1288
1289        handler = kmalloc(sizeof(*handler), GFP_ATOMIC);
1290        if (!handler)
1291                return NULL;
1292
1293        handler->aspace = aspace;
1294        handler->start = address;
1295        handler->end = address + len;
1296        handler->num_errors = 0;
1297        handler->first_error = 0;
1298        list_add_tail(&handler->list, &bridge->vme_error_handlers);
1299
1300        return handler;
1301}
1302EXPORT_SYMBOL(vme_register_error_handler);
1303
1304void vme_unregister_error_handler(struct vme_error_handler *handler)
1305{
1306        list_del(&handler->list);
1307        kfree(handler);
1308}
1309EXPORT_SYMBOL(vme_unregister_error_handler);
1310
1311void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
1312{
1313        void (*call)(int, int, void *);
1314        void *priv_data;
1315
1316        call = bridge->irq[level - 1].callback[statid].func;
1317        priv_data = bridge->irq[level - 1].callback[statid].priv_data;
1318        if (call)
1319                call(level, statid, priv_data);
1320        else
1321                printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
1322                       level, statid);
1323}
1324EXPORT_SYMBOL(vme_irq_handler);
1325
1326/**
1327 * vme_irq_request - Request a specific VME interrupt.
1328 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1329 * @level: Interrupt priority being requested.
1330 * @statid: Interrupt vector being requested.
1331 * @callback: Pointer to callback function called when VME interrupt/vector
1332 *            received.
1333 * @priv_data: Generic pointer that will be passed to the callback function.
1334 *
1335 * Request callback to be attached as a handler for VME interrupts with provided
1336 * level and statid.
1337 *
1338 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
1339 *         function is not supported, -EBUSY if the level/statid combination is
1340 *         already in use. Hardware specific errors also possible.
1341 */
1342int vme_irq_request(struct vme_dev *vdev, int level, int statid,
1343        void (*callback)(int, int, void *),
1344        void *priv_data)
1345{
1346        struct vme_bridge *bridge;
1347
1348        bridge = vdev->bridge;
1349        if (!bridge) {
1350                printk(KERN_ERR "Can't find VME bus\n");
1351                return -EINVAL;
1352        }
1353
1354        if ((level < 1) || (level > 7)) {
1355                printk(KERN_ERR "Invalid interrupt level\n");
1356                return -EINVAL;
1357        }
1358
1359        if (!bridge->irq_set) {
1360                printk(KERN_ERR "Configuring interrupts not supported\n");
1361                return -EINVAL;
1362        }
1363
1364        mutex_lock(&bridge->irq_mtx);
1365
1366        if (bridge->irq[level - 1].callback[statid].func) {
1367                mutex_unlock(&bridge->irq_mtx);
1368                printk(KERN_WARNING "VME Interrupt already taken\n");
1369                return -EBUSY;
1370        }
1371
1372        bridge->irq[level - 1].count++;
1373        bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1374        bridge->irq[level - 1].callback[statid].func = callback;
1375
1376        /* Enable IRQ level */
1377        bridge->irq_set(bridge, level, 1, 1);
1378
1379        mutex_unlock(&bridge->irq_mtx);
1380
1381        return 0;
1382}
1383EXPORT_SYMBOL(vme_irq_request);
1384
1385/**
1386 * vme_irq_free - Free a VME interrupt.
1387 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1388 * @level: Interrupt priority of interrupt being freed.
1389 * @statid: Interrupt vector of interrupt being freed.
1390 *
1391 * Remove previously attached callback from VME interrupt priority/vector.
1392 */
1393void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1394{
1395        struct vme_bridge *bridge;
1396
1397        bridge = vdev->bridge;
1398        if (!bridge) {
1399                printk(KERN_ERR "Can't find VME bus\n");
1400                return;
1401        }
1402
1403        if ((level < 1) || (level > 7)) {
1404                printk(KERN_ERR "Invalid interrupt level\n");
1405                return;
1406        }
1407
1408        if (!bridge->irq_set) {
1409                printk(KERN_ERR "Configuring interrupts not supported\n");
1410                return;
1411        }
1412
1413        mutex_lock(&bridge->irq_mtx);
1414
1415        bridge->irq[level - 1].count--;
1416
1417        /* Disable IRQ level if no more interrupts attached at this level*/
1418        if (bridge->irq[level - 1].count == 0)
1419                bridge->irq_set(bridge, level, 0, 1);
1420
1421        bridge->irq[level - 1].callback[statid].func = NULL;
1422        bridge->irq[level - 1].callback[statid].priv_data = NULL;
1423
1424        mutex_unlock(&bridge->irq_mtx);
1425}
1426EXPORT_SYMBOL(vme_irq_free);
1427
1428/**
1429 * vme_irq_generate - Generate VME interrupt.
1430 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1431 * @level: Interrupt priority at which to assert the interrupt.
1432 * @statid: Interrupt vector to associate with the interrupt.
1433 *
1434 * Generate a VME interrupt of the provided level and with the provided
1435 * statid.
1436 *
1437 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
1438 *         function is not supported. Hardware specific errors also possible.
1439 */
1440int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1441{
1442        struct vme_bridge *bridge;
1443
1444        bridge = vdev->bridge;
1445        if (!bridge) {
1446                printk(KERN_ERR "Can't find VME bus\n");
1447                return -EINVAL;
1448        }
1449
1450        if ((level < 1) || (level > 7)) {
1451                printk(KERN_WARNING "Invalid interrupt level\n");
1452                return -EINVAL;
1453        }
1454
1455        if (!bridge->irq_generate) {
1456                printk(KERN_WARNING "Interrupt generation not supported\n");
1457                return -EINVAL;
1458        }
1459
1460        return bridge->irq_generate(bridge, level, statid);
1461}
1462EXPORT_SYMBOL(vme_irq_generate);
1463
1464/**
1465 * vme_lm_request - Request a VME location monitor
1466 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1467 *
1468 * Allocate a location monitor resource to the driver. A location monitor
1469 * allows the driver to monitor accesses to a contiguous number of
1470 * addresses on the VME bus.
1471 *
1472 * Return: Pointer to a VME resource on success or NULL on failure.
1473 */
1474struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1475{
1476        struct vme_bridge *bridge;
1477        struct list_head *lm_pos = NULL;
1478        struct vme_lm_resource *allocated_lm = NULL;
1479        struct vme_lm_resource *lm = NULL;
1480        struct vme_resource *resource = NULL;
1481
1482        bridge = vdev->bridge;
1483        if (!bridge) {
1484                printk(KERN_ERR "Can't find VME bus\n");
1485                goto err_bus;
1486        }
1487
1488        /* Loop through LM resources */
1489        list_for_each(lm_pos, &bridge->lm_resources) {
1490                lm = list_entry(lm_pos,
1491                        struct vme_lm_resource, list);
1492                if (!lm) {
1493                        printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1494                        continue;
1495                }
1496
1497                /* Find an unlocked controller */
1498                mutex_lock(&lm->mtx);
1499                if (lm->locked == 0) {
1500                        lm->locked = 1;
1501                        mutex_unlock(&lm->mtx);
1502                        allocated_lm = lm;
1503                        break;
1504                }
1505                mutex_unlock(&lm->mtx);
1506        }
1507
1508        /* Check to see if we found a resource */
1509        if (!allocated_lm)
1510                goto err_lm;
1511
1512        resource = kmalloc(sizeof(*resource), GFP_KERNEL);
1513        if (!resource)
1514                goto err_alloc;
1515
1516        resource->type = VME_LM;
1517        resource->entry = &allocated_lm->list;
1518
1519        return resource;
1520
1521err_alloc:
1522        /* Unlock image */
1523        mutex_lock(&lm->mtx);
1524        lm->locked = 0;
1525        mutex_unlock(&lm->mtx);
1526err_lm:
1527err_bus:
1528        return NULL;
1529}
1530EXPORT_SYMBOL(vme_lm_request);
1531
1532/**
1533 * vme_lm_count - Determine number of VME Addresses monitored
1534 * @resource: Pointer to VME location monitor resource.
1535 *
1536 * The number of contiguous addresses monitored is hardware dependent.
1537 * Return the number of contiguous addresses monitored by the
1538 * location monitor.
1539 *
1540 * Return: Count of addresses monitored or -EINVAL when provided with an
1541 *         invalid location monitor resource.
1542 */
1543int vme_lm_count(struct vme_resource *resource)
1544{
1545        struct vme_lm_resource *lm;
1546
1547        if (resource->type != VME_LM) {
1548                printk(KERN_ERR "Not a Location Monitor resource\n");
1549                return -EINVAL;
1550        }
1551
1552        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1553
1554        return lm->monitors;
1555}
1556EXPORT_SYMBOL(vme_lm_count);
1557
1558/**
1559 * vme_lm_set - Configure location monitor
1560 * @resource: Pointer to VME location monitor resource.
1561 * @lm_base: Base address to monitor.
1562 * @aspace: VME address space to monitor.
1563 * @cycle: VME bus cycle type to monitor.
1564 *
1565 * Set the base address, address space and cycle type of accesses to be
1566 * monitored by the location monitor.
1567 *
1568 * Return: Zero on success, -EINVAL when provided with an invalid location
1569 *         monitor resource or function is not supported. Hardware specific
1570 *         errors may also be returned.
1571 */
1572int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1573        u32 aspace, u32 cycle)
1574{
1575        struct vme_bridge *bridge = find_bridge(resource);
1576        struct vme_lm_resource *lm;
1577
1578        if (resource->type != VME_LM) {
1579                printk(KERN_ERR "Not a Location Monitor resource\n");
1580                return -EINVAL;
1581        }
1582
1583        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1584
1585        if (!bridge->lm_set) {
1586                printk(KERN_ERR "vme_lm_set not supported\n");
1587                return -EINVAL;
1588        }
1589
1590        return bridge->lm_set(lm, lm_base, aspace, cycle);
1591}
1592EXPORT_SYMBOL(vme_lm_set);
1593
1594/**
1595 * vme_lm_get - Retrieve location monitor settings
1596 * @resource: Pointer to VME location monitor resource.
1597 * @lm_base: Pointer used to output the base address monitored.
1598 * @aspace: Pointer used to output the address space monitored.
1599 * @cycle: Pointer used to output the VME bus cycle type monitored.
1600 *
1601 * Retrieve the base address, address space and cycle type of accesses to
1602 * be monitored by the location monitor.
1603 *
1604 * Return: Zero on success, -EINVAL when provided with an invalid location
1605 *         monitor resource or function is not supported. Hardware specific
1606 *         errors may also be returned.
1607 */
1608int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1609        u32 *aspace, u32 *cycle)
1610{
1611        struct vme_bridge *bridge = find_bridge(resource);
1612        struct vme_lm_resource *lm;
1613
1614        if (resource->type != VME_LM) {
1615                printk(KERN_ERR "Not a Location Monitor resource\n");
1616                return -EINVAL;
1617        }
1618
1619        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1620
1621        if (!bridge->lm_get) {
1622                printk(KERN_ERR "vme_lm_get not supported\n");
1623                return -EINVAL;
1624        }
1625
1626        return bridge->lm_get(lm, lm_base, aspace, cycle);
1627}
1628EXPORT_SYMBOL(vme_lm_get);
1629
1630/**
1631 * vme_lm_attach - Provide callback for location monitor address
1632 * @resource: Pointer to VME location monitor resource.
1633 * @monitor: Offset to which callback should be attached.
1634 * @callback: Pointer to callback function called when triggered.
1635 * @data: Generic pointer that will be passed to the callback function.
1636 *
1637 * Attach a callback to the specificed offset into the location monitors
1638 * monitored addresses. A generic pointer is provided to allow data to be
1639 * passed to the callback when called.
1640 *
1641 * Return: Zero on success, -EINVAL when provided with an invalid location
1642 *         monitor resource or function is not supported. Hardware specific
1643 *         errors may also be returned.
1644 */
1645int vme_lm_attach(struct vme_resource *resource, int monitor,
1646        void (*callback)(void *), void *data)
1647{
1648        struct vme_bridge *bridge = find_bridge(resource);
1649        struct vme_lm_resource *lm;
1650
1651        if (resource->type != VME_LM) {
1652                printk(KERN_ERR "Not a Location Monitor resource\n");
1653                return -EINVAL;
1654        }
1655
1656        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1657
1658        if (!bridge->lm_attach) {
1659                printk(KERN_ERR "vme_lm_attach not supported\n");
1660                return -EINVAL;
1661        }
1662
1663        return bridge->lm_attach(lm, monitor, callback, data);
1664}
1665EXPORT_SYMBOL(vme_lm_attach);
1666
1667/**
1668 * vme_lm_detach - Remove callback for location monitor address
1669 * @resource: Pointer to VME location monitor resource.
1670 * @monitor: Offset to which callback should be removed.
1671 *
1672 * Remove the callback associated with the specificed offset into the
1673 * location monitors monitored addresses.
1674 *
1675 * Return: Zero on success, -EINVAL when provided with an invalid location
1676 *         monitor resource or function is not supported. Hardware specific
1677 *         errors may also be returned.
1678 */
1679int vme_lm_detach(struct vme_resource *resource, int monitor)
1680{
1681        struct vme_bridge *bridge = find_bridge(resource);
1682        struct vme_lm_resource *lm;
1683
1684        if (resource->type != VME_LM) {
1685                printk(KERN_ERR "Not a Location Monitor resource\n");
1686                return -EINVAL;
1687        }
1688
1689        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1690
1691        if (!bridge->lm_detach) {
1692                printk(KERN_ERR "vme_lm_detach not supported\n");
1693                return -EINVAL;
1694        }
1695
1696        return bridge->lm_detach(lm, monitor);
1697}
1698EXPORT_SYMBOL(vme_lm_detach);
1699
1700/**
1701 * vme_lm_free - Free allocated VME location monitor
1702 * @resource: Pointer to VME location monitor resource.
1703 *
1704 * Free allocation of a VME location monitor.
1705 *
1706 * WARNING: This function currently expects that any callbacks that have
1707 *          been attached to the location monitor have been removed.
1708 *
1709 * Return: Zero on success, -EINVAL when provided with an invalid location
1710 *         monitor resource.
1711 */
1712void vme_lm_free(struct vme_resource *resource)
1713{
1714        struct vme_lm_resource *lm;
1715
1716        if (resource->type != VME_LM) {
1717                printk(KERN_ERR "Not a Location Monitor resource\n");
1718                return;
1719        }
1720
1721        lm = list_entry(resource->entry, struct vme_lm_resource, list);
1722
1723        mutex_lock(&lm->mtx);
1724
1725        /* XXX
1726         * Check to see that there aren't any callbacks still attached, if
1727         * there are we should probably be detaching them!
1728         */
1729
1730        lm->locked = 0;
1731
1732        mutex_unlock(&lm->mtx);
1733
1734        kfree(resource);
1735}
1736EXPORT_SYMBOL(vme_lm_free);
1737
1738/**
1739 * vme_slot_num - Retrieve slot ID
1740 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1741 *
1742 * Retrieve the slot ID associated with the provided VME device.
1743 *
1744 * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
1745 *         or the function is not supported. Hardware specific errors may also
1746 *         be returned.
1747 */
1748int vme_slot_num(struct vme_dev *vdev)
1749{
1750        struct vme_bridge *bridge;
1751
1752        bridge = vdev->bridge;
1753        if (!bridge) {
1754                printk(KERN_ERR "Can't find VME bus\n");
1755                return -EINVAL;
1756        }
1757
1758        if (!bridge->slot_get) {
1759                printk(KERN_WARNING "vme_slot_num not supported\n");
1760                return -EINVAL;
1761        }
1762
1763        return bridge->slot_get(bridge);
1764}
1765EXPORT_SYMBOL(vme_slot_num);
1766
1767/**
1768 * vme_bus_num - Retrieve bus number
1769 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1770 *
1771 * Retrieve the bus enumeration associated with the provided VME device.
1772 *
1773 * Return: The bus number on success, -EINVAL if VME bridge cannot be
1774 *         determined.
1775 */
1776int vme_bus_num(struct vme_dev *vdev)
1777{
1778        struct vme_bridge *bridge;
1779
1780        bridge = vdev->bridge;
1781        if (!bridge) {
1782                pr_err("Can't find VME bus\n");
1783                return -EINVAL;
1784        }
1785
1786        return bridge->num;
1787}
1788EXPORT_SYMBOL(vme_bus_num);
1789
1790/* - Bridge Registration --------------------------------------------------- */
1791
1792static void vme_dev_release(struct device *dev)
1793{
1794        kfree(dev_to_vme_dev(dev));
1795}
1796
1797/* Common bridge initialization */
1798struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
1799{
1800        INIT_LIST_HEAD(&bridge->vme_error_handlers);
1801        INIT_LIST_HEAD(&bridge->master_resources);
1802        INIT_LIST_HEAD(&bridge->slave_resources);
1803        INIT_LIST_HEAD(&bridge->dma_resources);
1804        INIT_LIST_HEAD(&bridge->lm_resources);
1805        mutex_init(&bridge->irq_mtx);
1806
1807        return bridge;
1808}
1809EXPORT_SYMBOL(vme_init_bridge);
1810
1811int vme_register_bridge(struct vme_bridge *bridge)
1812{
1813        int i;
1814        int ret = -1;
1815
1816        mutex_lock(&vme_buses_lock);
1817        for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1818                if ((vme_bus_numbers & (1 << i)) == 0) {
1819                        vme_bus_numbers |= (1 << i);
1820                        bridge->num = i;
1821                        INIT_LIST_HEAD(&bridge->devices);
1822                        list_add_tail(&bridge->bus_list, &vme_bus_list);
1823                        ret = 0;
1824                        break;
1825                }
1826        }
1827        mutex_unlock(&vme_buses_lock);
1828
1829        return ret;
1830}
1831EXPORT_SYMBOL(vme_register_bridge);
1832
1833void vme_unregister_bridge(struct vme_bridge *bridge)
1834{
1835        struct vme_dev *vdev;
1836        struct vme_dev *tmp;
1837
1838        mutex_lock(&vme_buses_lock);
1839        vme_bus_numbers &= ~(1 << bridge->num);
1840        list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1841                list_del(&vdev->drv_list);
1842                list_del(&vdev->bridge_list);
1843                device_unregister(&vdev->dev);
1844        }
1845        list_del(&bridge->bus_list);
1846        mutex_unlock(&vme_buses_lock);
1847}
1848EXPORT_SYMBOL(vme_unregister_bridge);
1849
1850/* - Driver Registration --------------------------------------------------- */
1851
1852static int __vme_register_driver_bus(struct vme_driver *drv,
1853        struct vme_bridge *bridge, unsigned int ndevs)
1854{
1855        int err;
1856        unsigned int i;
1857        struct vme_dev *vdev;
1858        struct vme_dev *tmp;
1859
1860        for (i = 0; i < ndevs; i++) {
1861                vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1862                if (!vdev) {
1863                        err = -ENOMEM;
1864                        goto err_devalloc;
1865                }
1866                vdev->num = i;
1867                vdev->bridge = bridge;
1868                vdev->dev.platform_data = drv;
1869                vdev->dev.release = vme_dev_release;
1870                vdev->dev.parent = bridge->parent;
1871                vdev->dev.bus = &vme_bus_type;
1872                dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1873                        vdev->num);
1874
1875                err = device_register(&vdev->dev);
1876                if (err)
1877                        goto err_reg;
1878
1879                if (vdev->dev.platform_data) {
1880                        list_add_tail(&vdev->drv_list, &drv->devices);
1881                        list_add_tail(&vdev->bridge_list, &bridge->devices);
1882                } else
1883                        device_unregister(&vdev->dev);
1884        }
1885        return 0;
1886
1887err_reg:
1888        put_device(&vdev->dev);
1889err_devalloc:
1890        list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1891                list_del(&vdev->drv_list);
1892                list_del(&vdev->bridge_list);
1893                device_unregister(&vdev->dev);
1894        }
1895        return err;
1896}
1897
1898static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1899{
1900        struct vme_bridge *bridge;
1901        int err = 0;
1902
1903        mutex_lock(&vme_buses_lock);
1904        list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1905                /*
1906                 * This cannot cause trouble as we already have vme_buses_lock
1907                 * and if the bridge is removed, it will have to go through
1908                 * vme_unregister_bridge() to do it (which calls remove() on
1909                 * the bridge which in turn tries to acquire vme_buses_lock and
1910                 * will have to wait).
1911                 */
1912                err = __vme_register_driver_bus(drv, bridge, ndevs);
1913                if (err)
1914                        break;
1915        }
1916        mutex_unlock(&vme_buses_lock);
1917        return err;
1918}
1919
1920/**
1921 * vme_register_driver - Register a VME driver
1922 * @drv: Pointer to VME driver structure to register.
1923 * @ndevs: Maximum number of devices to allow to be enumerated.
1924 *
1925 * Register a VME device driver with the VME subsystem.
1926 *
1927 * Return: Zero on success, error value on registration failure.
1928 */
1929int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1930{
1931        int err;
1932
1933        drv->driver.name = drv->name;
1934        drv->driver.bus = &vme_bus_type;
1935        INIT_LIST_HEAD(&drv->devices);
1936
1937        err = driver_register(&drv->driver);
1938        if (err)
1939                return err;
1940
1941        err = __vme_register_driver(drv, ndevs);
1942        if (err)
1943                driver_unregister(&drv->driver);
1944
1945        return err;
1946}
1947EXPORT_SYMBOL(vme_register_driver);
1948
1949/**
1950 * vme_unregister_driver - Unregister a VME driver
1951 * @drv: Pointer to VME driver structure to unregister.
1952 *
1953 * Unregister a VME device driver from the VME subsystem.
1954 */
1955void vme_unregister_driver(struct vme_driver *drv)
1956{
1957        struct vme_dev *dev, *dev_tmp;
1958
1959        mutex_lock(&vme_buses_lock);
1960        list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1961                list_del(&dev->drv_list);
1962                list_del(&dev->bridge_list);
1963                device_unregister(&dev->dev);
1964        }
1965        mutex_unlock(&vme_buses_lock);
1966
1967        driver_unregister(&drv->driver);
1968}
1969EXPORT_SYMBOL(vme_unregister_driver);
1970
1971/* - Bus Registration ------------------------------------------------------ */
1972
1973static int vme_bus_match(struct device *dev, struct device_driver *drv)
1974{
1975        struct vme_driver *vme_drv;
1976
1977        vme_drv = container_of(drv, struct vme_driver, driver);
1978
1979        if (dev->platform_data == vme_drv) {
1980                struct vme_dev *vdev = dev_to_vme_dev(dev);
1981
1982                if (vme_drv->match && vme_drv->match(vdev))
1983                        return 1;
1984
1985                dev->platform_data = NULL;
1986        }
1987        return 0;
1988}
1989
1990static int vme_bus_probe(struct device *dev)
1991{
1992        struct vme_driver *driver;
1993        struct vme_dev *vdev = dev_to_vme_dev(dev);
1994
1995        driver = dev->platform_data;
1996        if (driver->probe)
1997                return driver->probe(vdev);
1998
1999        return -ENODEV;
2000}
2001
2002static int vme_bus_remove(struct device *dev)
2003{
2004        struct vme_driver *driver;
2005        struct vme_dev *vdev = dev_to_vme_dev(dev);
2006
2007        driver = dev->platform_data;
2008        if (driver->remove)
2009                return driver->remove(vdev);
2010
2011        return -ENODEV;
2012}
2013
2014struct bus_type vme_bus_type = {
2015        .name = "vme",
2016        .match = vme_bus_match,
2017        .probe = vme_bus_probe,
2018        .remove = vme_bus_remove,
2019};
2020EXPORT_SYMBOL(vme_bus_type);
2021
2022static int __init vme_init(void)
2023{
2024        return bus_register(&vme_bus_type);
2025}
2026subsys_initcall(vme_init);
2027