linux/drivers/staging/vme/devices/vme_user.c
<<
>>
Prefs
   1/*
   2 * VMEbus User access driver
   3 *
   4 * Author: Martyn Welch <martyn.welch@ge.com>
   5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
   6 *
   7 * Based on work by:
   8 *   Tom Armistead and Ajit Prem
   9 *     Copyright 2004 Motorola Inc.
  10 *
  11 *
  12 * This program is free software; you can redistribute  it and/or modify it
  13 * under  the terms of  the GNU General  Public License as published by the
  14 * Free Software Foundation;  either version 2 of the  License, or (at your
  15 * option) any later version.
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/cdev.h>
  21#include <linux/delay.h>
  22#include <linux/device.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/errno.h>
  25#include <linux/init.h>
  26#include <linux/ioctl.h>
  27#include <linux/kernel.h>
  28#include <linux/mm.h>
  29#include <linux/module.h>
  30#include <linux/pagemap.h>
  31#include <linux/pci.h>
  32#include <linux/mutex.h>
  33#include <linux/slab.h>
  34#include <linux/spinlock.h>
  35#include <linux/syscalls.h>
  36#include <linux/types.h>
  37
  38#include <linux/io.h>
  39#include <linux/uaccess.h>
  40#include <linux/vme.h>
  41
  42#include "vme_user.h"
  43
  44static DEFINE_MUTEX(vme_user_mutex);
  45static const char driver_name[] = "vme_user";
  46
  47static int bus[VME_USER_BUS_MAX];
  48static unsigned int bus_num;
  49
  50/* Currently Documentation/devices.txt defines the following for VME:
  51 *
  52 * 221 char     VME bus
  53 *                0 = /dev/bus/vme/m0           First master image
  54 *                1 = /dev/bus/vme/m1           Second master image
  55 *                2 = /dev/bus/vme/m2           Third master image
  56 *                3 = /dev/bus/vme/m3           Fourth master image
  57 *                4 = /dev/bus/vme/s0           First slave image
  58 *                5 = /dev/bus/vme/s1           Second slave image
  59 *                6 = /dev/bus/vme/s2           Third slave image
  60 *                7 = /dev/bus/vme/s3           Fourth slave image
  61 *                8 = /dev/bus/vme/ctl          Control
  62 *
  63 *              It is expected that all VME bus drivers will use the
  64 *              same interface.  For interface documentation see
  65 *              http://www.vmelinux.org/.
  66 *
  67 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
  68 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
  69 * We'll run with this for now as far as possible, however it probably makes
  70 * sense to get rid of the old mappings and just do everything dynamically.
  71 *
  72 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
  73 * defined above and try to support at least some of the interface from
  74 * http://www.vmelinux.org/ as an alternative the driver can be written
  75 * providing a saner interface later.
  76 *
  77 * The vmelinux.org driver never supported slave images, the devices reserved
  78 * for slaves were repurposed to support all 8 master images on the UniverseII!
  79 * We shall support 4 masters and 4 slaves with this driver.
  80 */
  81#define VME_MAJOR       221     /* VME Major Device Number */
  82#define VME_DEVS        9       /* Number of dev entries */
  83
  84#define MASTER_MINOR    0
  85#define MASTER_MAX      3
  86#define SLAVE_MINOR     4
  87#define SLAVE_MAX       7
  88#define CONTROL_MINOR   8
  89
  90#define PCI_BUF_SIZE  0x20000   /* Size of one slave image buffer */
  91
  92/*
  93 * Structure to handle image related parameters.
  94 */
  95struct image_desc {
  96        void *kern_buf; /* Buffer address in kernel space */
  97        dma_addr_t pci_buf;     /* Buffer address in PCI address space */
  98        unsigned long long size_buf;    /* Buffer size */
  99        struct mutex mutex;     /* Mutex for locking image */
 100        struct device *device;  /* Sysfs device */
 101        struct vme_resource *resource;  /* VME resource */
 102        int users;              /* Number of current users */
 103};
 104static struct image_desc image[VME_DEVS];
 105
 106struct driver_stats {
 107        unsigned long reads;
 108        unsigned long writes;
 109        unsigned long ioctls;
 110        unsigned long irqs;
 111        unsigned long berrs;
 112        unsigned long dmaErrors;
 113        unsigned long timeouts;
 114        unsigned long external;
 115};
 116static struct driver_stats statistics;
 117
 118static struct cdev *vme_user_cdev;              /* Character device */
 119static struct class *vme_user_sysfs_class;      /* Sysfs class */
 120static struct vme_dev *vme_user_bridge;         /* Pointer to user device */
 121
 122
 123static const int type[VME_DEVS] = {     MASTER_MINOR,   MASTER_MINOR,
 124                                        MASTER_MINOR,   MASTER_MINOR,
 125                                        SLAVE_MINOR,    SLAVE_MINOR,
 126                                        SLAVE_MINOR,    SLAVE_MINOR,
 127                                        CONTROL_MINOR
 128                                };
 129
 130
 131static int vme_user_open(struct inode *, struct file *);
 132static int vme_user_release(struct inode *, struct file *);
 133static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
 134static ssize_t vme_user_write(struct file *, const char __user *, size_t,
 135        loff_t *);
 136static loff_t vme_user_llseek(struct file *, loff_t, int);
 137static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
 138
 139static int vme_user_match(struct vme_dev *);
 140static int vme_user_probe(struct vme_dev *);
 141static int vme_user_remove(struct vme_dev *);
 142
 143static const struct file_operations vme_user_fops = {
 144        .open = vme_user_open,
 145        .release = vme_user_release,
 146        .read = vme_user_read,
 147        .write = vme_user_write,
 148        .llseek = vme_user_llseek,
 149        .unlocked_ioctl = vme_user_unlocked_ioctl,
 150};
 151
 152
 153/*
 154 * Reset all the statistic counters
 155 */
 156static void reset_counters(void)
 157{
 158        statistics.reads = 0;
 159        statistics.writes = 0;
 160        statistics.ioctls = 0;
 161        statistics.irqs = 0;
 162        statistics.berrs = 0;
 163        statistics.dmaErrors = 0;
 164        statistics.timeouts = 0;
 165}
 166
 167static int vme_user_open(struct inode *inode, struct file *file)
 168{
 169        int err;
 170        unsigned int minor = MINOR(inode->i_rdev);
 171
 172        mutex_lock(&image[minor].mutex);
 173        /* Allow device to be opened if a resource is needed and allocated. */
 174        if (minor < CONTROL_MINOR && image[minor].resource == NULL) {
 175                pr_err("No resources allocated for device\n");
 176                err = -EINVAL;
 177                goto err_res;
 178        }
 179
 180        /* Increment user count */
 181        image[minor].users++;
 182
 183        mutex_unlock(&image[minor].mutex);
 184
 185        return 0;
 186
 187err_res:
 188        mutex_unlock(&image[minor].mutex);
 189
 190        return err;
 191}
 192
 193static int vme_user_release(struct inode *inode, struct file *file)
 194{
 195        unsigned int minor = MINOR(inode->i_rdev);
 196
 197        mutex_lock(&image[minor].mutex);
 198
 199        /* Decrement user count */
 200        image[minor].users--;
 201
 202        mutex_unlock(&image[minor].mutex);
 203
 204        return 0;
 205}
 206
 207/*
 208 * We are going ot alloc a page during init per window for small transfers.
 209 * Small transfers will go VME -> buffer -> user space. Larger (more than a
 210 * page) transfers will lock the user space buffer into memory and then
 211 * transfer the data directly into the user space buffers.
 212 */
 213static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
 214        loff_t *ppos)
 215{
 216        ssize_t retval;
 217        ssize_t copied = 0;
 218
 219        if (count <= image[minor].size_buf) {
 220                /* We copy to kernel buffer */
 221                copied = vme_master_read(image[minor].resource,
 222                        image[minor].kern_buf, count, *ppos);
 223                if (copied < 0)
 224                        return (int)copied;
 225
 226                retval = __copy_to_user(buf, image[minor].kern_buf,
 227                        (unsigned long)copied);
 228                if (retval != 0) {
 229                        copied = (copied - retval);
 230                        pr_info("User copy failed\n");
 231                        return -EINVAL;
 232                }
 233
 234        } else {
 235                /* XXX Need to write this */
 236                pr_info("Currently don't support large transfers\n");
 237                /* Map in pages from userspace */
 238
 239                /* Call vme_master_read to do the transfer */
 240                return -EINVAL;
 241        }
 242
 243        return copied;
 244}
 245
 246/*
 247 * We are going to alloc a page during init per window for small transfers.
 248 * Small transfers will go user space -> buffer -> VME. Larger (more than a
 249 * page) transfers will lock the user space buffer into memory and then
 250 * transfer the data directly from the user space buffers out to VME.
 251 */
 252static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
 253        size_t count, loff_t *ppos)
 254{
 255        ssize_t retval;
 256        ssize_t copied = 0;
 257
 258        if (count <= image[minor].size_buf) {
 259                retval = __copy_from_user(image[minor].kern_buf, buf,
 260                        (unsigned long)count);
 261                if (retval != 0)
 262                        copied = (copied - retval);
 263                else
 264                        copied = count;
 265
 266                copied = vme_master_write(image[minor].resource,
 267                        image[minor].kern_buf, copied, *ppos);
 268        } else {
 269                /* XXX Need to write this */
 270                pr_info("Currently don't support large transfers\n");
 271                /* Map in pages from userspace */
 272
 273                /* Call vme_master_write to do the transfer */
 274                return -EINVAL;
 275        }
 276
 277        return copied;
 278}
 279
 280static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
 281        size_t count, loff_t *ppos)
 282{
 283        void *image_ptr;
 284        ssize_t retval;
 285
 286        image_ptr = image[minor].kern_buf + *ppos;
 287
 288        retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
 289        if (retval != 0) {
 290                retval = (count - retval);
 291                pr_warn("Partial copy to userspace\n");
 292        } else
 293                retval = count;
 294
 295        /* Return number of bytes successfully read */
 296        return retval;
 297}
 298
 299static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
 300        size_t count, loff_t *ppos)
 301{
 302        void *image_ptr;
 303        size_t retval;
 304
 305        image_ptr = image[minor].kern_buf + *ppos;
 306
 307        retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
 308        if (retval != 0) {
 309                retval = (count - retval);
 310                pr_warn("Partial copy to userspace\n");
 311        } else
 312                retval = count;
 313
 314        /* Return number of bytes successfully read */
 315        return retval;
 316}
 317
 318static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
 319                        loff_t *ppos)
 320{
 321        unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
 322        ssize_t retval;
 323        size_t image_size;
 324        size_t okcount;
 325
 326        if (minor == CONTROL_MINOR)
 327                return 0;
 328
 329        mutex_lock(&image[minor].mutex);
 330
 331        /* XXX Do we *really* want this helper - we can use vme_*_get ? */
 332        image_size = vme_get_size(image[minor].resource);
 333
 334        /* Ensure we are starting at a valid location */
 335        if ((*ppos < 0) || (*ppos > (image_size - 1))) {
 336                mutex_unlock(&image[minor].mutex);
 337                return 0;
 338        }
 339
 340        /* Ensure not reading past end of the image */
 341        if (*ppos + count > image_size)
 342                okcount = image_size - *ppos;
 343        else
 344                okcount = count;
 345
 346        switch (type[minor]) {
 347        case MASTER_MINOR:
 348                retval = resource_to_user(minor, buf, okcount, ppos);
 349                break;
 350        case SLAVE_MINOR:
 351                retval = buffer_to_user(minor, buf, okcount, ppos);
 352                break;
 353        default:
 354                retval = -EINVAL;
 355        }
 356
 357        mutex_unlock(&image[minor].mutex);
 358        if (retval > 0)
 359                *ppos += retval;
 360
 361        return retval;
 362}
 363
 364static ssize_t vme_user_write(struct file *file, const char __user *buf,
 365                        size_t count, loff_t *ppos)
 366{
 367        unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
 368        ssize_t retval;
 369        size_t image_size;
 370        size_t okcount;
 371
 372        if (minor == CONTROL_MINOR)
 373                return 0;
 374
 375        mutex_lock(&image[minor].mutex);
 376
 377        image_size = vme_get_size(image[minor].resource);
 378
 379        /* Ensure we are starting at a valid location */
 380        if ((*ppos < 0) || (*ppos > (image_size - 1))) {
 381                mutex_unlock(&image[minor].mutex);
 382                return 0;
 383        }
 384
 385        /* Ensure not reading past end of the image */
 386        if (*ppos + count > image_size)
 387                okcount = image_size - *ppos;
 388        else
 389                okcount = count;
 390
 391        switch (type[minor]) {
 392        case MASTER_MINOR:
 393                retval = resource_from_user(minor, buf, okcount, ppos);
 394                break;
 395        case SLAVE_MINOR:
 396                retval = buffer_from_user(minor, buf, okcount, ppos);
 397                break;
 398        default:
 399                retval = -EINVAL;
 400        }
 401
 402        mutex_unlock(&image[minor].mutex);
 403
 404        if (retval > 0)
 405                *ppos += retval;
 406
 407        return retval;
 408}
 409
 410static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
 411{
 412        loff_t absolute = -1;
 413        unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
 414        size_t image_size;
 415
 416        if (minor == CONTROL_MINOR)
 417                return -EINVAL;
 418
 419        mutex_lock(&image[minor].mutex);
 420        image_size = vme_get_size(image[minor].resource);
 421
 422        switch (whence) {
 423        case SEEK_SET:
 424                absolute = off;
 425                break;
 426        case SEEK_CUR:
 427                absolute = file->f_pos + off;
 428                break;
 429        case SEEK_END:
 430                absolute = image_size + off;
 431                break;
 432        default:
 433                mutex_unlock(&image[minor].mutex);
 434                return -EINVAL;
 435                break;
 436        }
 437
 438        if ((absolute < 0) || (absolute >= image_size)) {
 439                mutex_unlock(&image[minor].mutex);
 440                return -EINVAL;
 441        }
 442
 443        file->f_pos = absolute;
 444
 445        mutex_unlock(&image[minor].mutex);
 446
 447        return absolute;
 448}
 449
 450/*
 451 * The ioctls provided by the old VME access method (the one at vmelinux.org)
 452 * are most certainly wrong as the effectively push the registers layout
 453 * through to user space. Given that the VME core can handle multiple bridges,
 454 * with different register layouts this is most certainly not the way to go.
 455 *
 456 * We aren't using the structures defined in the Motorola driver either - these
 457 * are also quite low level, however we should use the definitions that have
 458 * already been defined.
 459 */
 460static int vme_user_ioctl(struct inode *inode, struct file *file,
 461        unsigned int cmd, unsigned long arg)
 462{
 463        struct vme_master master;
 464        struct vme_slave slave;
 465        struct vme_irq_id irq_req;
 466        unsigned long copied;
 467        unsigned int minor = MINOR(inode->i_rdev);
 468        int retval;
 469        dma_addr_t pci_addr;
 470        void __user *argp = (void __user *)arg;
 471
 472        statistics.ioctls++;
 473
 474        switch (type[minor]) {
 475        case CONTROL_MINOR:
 476                switch (cmd) {
 477                case VME_IRQ_GEN:
 478                        copied = copy_from_user(&irq_req, argp,
 479                                                sizeof(struct vme_irq_id));
 480                        if (copied != 0) {
 481                                pr_warn("Partial copy from userspace\n");
 482                                return -EFAULT;
 483                        }
 484
 485                        retval = vme_irq_generate(vme_user_bridge,
 486                                                  irq_req.level,
 487                                                  irq_req.statid);
 488
 489                        return retval;
 490                }
 491                break;
 492        case MASTER_MINOR:
 493                switch (cmd) {
 494                case VME_GET_MASTER:
 495                        memset(&master, 0, sizeof(struct vme_master));
 496
 497                        /* XXX  We do not want to push aspace, cycle and width
 498                         *      to userspace as they are
 499                         */
 500                        retval = vme_master_get(image[minor].resource,
 501                                &master.enable, &master.vme_addr,
 502                                &master.size, &master.aspace,
 503                                &master.cycle, &master.dwidth);
 504
 505                        copied = copy_to_user(argp, &master,
 506                                sizeof(struct vme_master));
 507                        if (copied != 0) {
 508                                pr_warn("Partial copy to userspace\n");
 509                                return -EFAULT;
 510                        }
 511
 512                        return retval;
 513                        break;
 514
 515                case VME_SET_MASTER:
 516
 517                        copied = copy_from_user(&master, argp, sizeof(master));
 518                        if (copied != 0) {
 519                                pr_warn("Partial copy from userspace\n");
 520                                return -EFAULT;
 521                        }
 522
 523                        /* XXX  We do not want to push aspace, cycle and width
 524                         *      to userspace as they are
 525                         */
 526                        return vme_master_set(image[minor].resource,
 527                                master.enable, master.vme_addr, master.size,
 528                                master.aspace, master.cycle, master.dwidth);
 529
 530                        break;
 531                }
 532                break;
 533        case SLAVE_MINOR:
 534                switch (cmd) {
 535                case VME_GET_SLAVE:
 536                        memset(&slave, 0, sizeof(struct vme_slave));
 537
 538                        /* XXX  We do not want to push aspace, cycle and width
 539                         *      to userspace as they are
 540                         */
 541                        retval = vme_slave_get(image[minor].resource,
 542                                &slave.enable, &slave.vme_addr,
 543                                &slave.size, &pci_addr, &slave.aspace,
 544                                &slave.cycle);
 545
 546                        copied = copy_to_user(argp, &slave,
 547                                sizeof(struct vme_slave));
 548                        if (copied != 0) {
 549                                pr_warn("Partial copy to userspace\n");
 550                                return -EFAULT;
 551                        }
 552
 553                        return retval;
 554                        break;
 555
 556                case VME_SET_SLAVE:
 557
 558                        copied = copy_from_user(&slave, argp, sizeof(slave));
 559                        if (copied != 0) {
 560                                pr_warn("Partial copy from userspace\n");
 561                                return -EFAULT;
 562                        }
 563
 564                        /* XXX  We do not want to push aspace, cycle and width
 565                         *      to userspace as they are
 566                         */
 567                        return vme_slave_set(image[minor].resource,
 568                                slave.enable, slave.vme_addr, slave.size,
 569                                image[minor].pci_buf, slave.aspace,
 570                                slave.cycle);
 571
 572                        break;
 573                }
 574                break;
 575        }
 576
 577        return -EINVAL;
 578}
 579
 580static long
 581vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 582{
 583        int ret;
 584
 585        mutex_lock(&vme_user_mutex);
 586        ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
 587        mutex_unlock(&vme_user_mutex);
 588
 589        return ret;
 590}
 591
 592
 593/*
 594 * Unallocate a previously allocated buffer
 595 */
 596static void buf_unalloc(int num)
 597{
 598        if (image[num].kern_buf) {
 599#ifdef VME_DEBUG
 600                pr_debug("UniverseII:Releasing buffer at %p\n",
 601                         image[num].pci_buf);
 602#endif
 603
 604                vme_free_consistent(image[num].resource, image[num].size_buf,
 605                        image[num].kern_buf, image[num].pci_buf);
 606
 607                image[num].kern_buf = NULL;
 608                image[num].pci_buf = 0;
 609                image[num].size_buf = 0;
 610
 611#ifdef VME_DEBUG
 612        } else {
 613                pr_debug("UniverseII: Buffer not allocated\n");
 614#endif
 615        }
 616}
 617
 618static struct vme_driver vme_user_driver = {
 619        .name = driver_name,
 620        .match = vme_user_match,
 621        .probe = vme_user_probe,
 622        .remove = vme_user_remove,
 623};
 624
 625
 626static int __init vme_user_init(void)
 627{
 628        int retval = 0;
 629
 630        pr_info("VME User Space Access Driver\n");
 631
 632        if (bus_num == 0) {
 633                pr_err("No cards, skipping registration\n");
 634                retval = -ENODEV;
 635                goto err_nocard;
 636        }
 637
 638        /* Let's start by supporting one bus, we can support more than one
 639         * in future revisions if that ever becomes necessary.
 640         */
 641        if (bus_num > VME_USER_BUS_MAX) {
 642                pr_err("Driver only able to handle %d buses\n",
 643                       VME_USER_BUS_MAX);
 644                bus_num = VME_USER_BUS_MAX;
 645        }
 646
 647        /*
 648         * Here we just register the maximum number of devices we can and
 649         * leave vme_user_match() to allow only 1 to go through to probe().
 650         * This way, if we later want to allow multiple user access devices,
 651         * we just change the code in vme_user_match().
 652         */
 653        retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
 654        if (retval != 0)
 655                goto err_reg;
 656
 657        return retval;
 658
 659err_reg:
 660err_nocard:
 661        return retval;
 662}
 663
 664static int vme_user_match(struct vme_dev *vdev)
 665{
 666        if (vdev->num >= VME_USER_BUS_MAX)
 667                return 0;
 668        return 1;
 669}
 670
 671/*
 672 * In this simple access driver, the old behaviour is being preserved as much
 673 * as practical. We will therefore reserve the buffers and request the images
 674 * here so that we don't have to do it later.
 675 */
 676static int vme_user_probe(struct vme_dev *vdev)
 677{
 678        int i, err;
 679        char name[12];
 680
 681        /* Save pointer to the bridge device */
 682        if (vme_user_bridge != NULL) {
 683                dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
 684                err = -EINVAL;
 685                goto err_dev;
 686        }
 687        vme_user_bridge = vdev;
 688
 689        /* Initialise descriptors */
 690        for (i = 0; i < VME_DEVS; i++) {
 691                image[i].kern_buf = NULL;
 692                image[i].pci_buf = 0;
 693                mutex_init(&image[i].mutex);
 694                image[i].device = NULL;
 695                image[i].resource = NULL;
 696                image[i].users = 0;
 697        }
 698
 699        /* Initialise statistics counters */
 700        reset_counters();
 701
 702        /* Assign major and minor numbers for the driver */
 703        err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
 704                driver_name);
 705        if (err) {
 706                dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
 707                         VME_MAJOR);
 708                goto err_region;
 709        }
 710
 711        /* Register the driver as a char device */
 712        vme_user_cdev = cdev_alloc();
 713        vme_user_cdev->ops = &vme_user_fops;
 714        vme_user_cdev->owner = THIS_MODULE;
 715        err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
 716        if (err) {
 717                dev_warn(&vdev->dev, "cdev_all failed\n");
 718                goto err_char;
 719        }
 720
 721        /* Request slave resources and allocate buffers (128kB wide) */
 722        for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
 723                /* XXX Need to properly request attributes */
 724                /* For ca91cx42 bridge there are only two slave windows
 725                 * supporting A16 addressing, so we request A24 supported
 726                 * by all windows.
 727                 */
 728                image[i].resource = vme_slave_request(vme_user_bridge,
 729                        VME_A24, VME_SCT);
 730                if (image[i].resource == NULL) {
 731                        dev_warn(&vdev->dev,
 732                                 "Unable to allocate slave resource\n");
 733                        goto err_slave;
 734                }
 735                image[i].size_buf = PCI_BUF_SIZE;
 736                image[i].kern_buf = vme_alloc_consistent(image[i].resource,
 737                        image[i].size_buf, &image[i].pci_buf);
 738                if (image[i].kern_buf == NULL) {
 739                        dev_warn(&vdev->dev,
 740                                 "Unable to allocate memory for buffer\n");
 741                        image[i].pci_buf = 0;
 742                        vme_slave_free(image[i].resource);
 743                        err = -ENOMEM;
 744                        goto err_slave;
 745                }
 746        }
 747
 748        /*
 749         * Request master resources allocate page sized buffers for small
 750         * reads and writes
 751         */
 752        for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
 753                /* XXX Need to properly request attributes */
 754                image[i].resource = vme_master_request(vme_user_bridge,
 755                        VME_A32, VME_SCT, VME_D32);
 756                if (image[i].resource == NULL) {
 757                        dev_warn(&vdev->dev,
 758                                 "Unable to allocate master resource\n");
 759                        goto err_master;
 760                }
 761                image[i].size_buf = PCI_BUF_SIZE;
 762                image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
 763                if (image[i].kern_buf == NULL) {
 764                        dev_warn(&vdev->dev,
 765                                 "Unable to allocate memory for master window buffers\n");
 766                        err = -ENOMEM;
 767                        goto err_master_buf;
 768                }
 769        }
 770
 771        /* Create sysfs entries - on udev systems this creates the dev files */
 772        vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
 773        if (IS_ERR(vme_user_sysfs_class)) {
 774                dev_err(&vdev->dev, "Error creating vme_user class.\n");
 775                err = PTR_ERR(vme_user_sysfs_class);
 776                goto err_class;
 777        }
 778
 779        /* Add sysfs Entries */
 780        for (i = 0; i < VME_DEVS; i++) {
 781                int num;
 782                switch (type[i]) {
 783                case MASTER_MINOR:
 784                        sprintf(name, "bus/vme/m%%d");
 785                        break;
 786                case CONTROL_MINOR:
 787                        sprintf(name, "bus/vme/ctl");
 788                        break;
 789                case SLAVE_MINOR:
 790                        sprintf(name, "bus/vme/s%%d");
 791                        break;
 792                default:
 793                        err = -EINVAL;
 794                        goto err_sysfs;
 795                        break;
 796                }
 797
 798                num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
 799                image[i].device = device_create(vme_user_sysfs_class, NULL,
 800                                        MKDEV(VME_MAJOR, i), NULL, name, num);
 801                if (IS_ERR(image[i].device)) {
 802                        dev_info(&vdev->dev, "Error creating sysfs device\n");
 803                        err = PTR_ERR(image[i].device);
 804                        goto err_sysfs;
 805                }
 806        }
 807
 808        return 0;
 809
 810        /* Ensure counter set correcty to destroy all sysfs devices */
 811        i = VME_DEVS;
 812err_sysfs:
 813        while (i > 0) {
 814                i--;
 815                device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
 816        }
 817        class_destroy(vme_user_sysfs_class);
 818
 819        /* Ensure counter set correcty to unalloc all master windows */
 820        i = MASTER_MAX + 1;
 821err_master_buf:
 822        for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
 823                kfree(image[i].kern_buf);
 824err_master:
 825        while (i > MASTER_MINOR) {
 826                i--;
 827                vme_master_free(image[i].resource);
 828        }
 829
 830        /*
 831         * Ensure counter set correcty to unalloc all slave windows and buffers
 832         */
 833        i = SLAVE_MAX + 1;
 834err_slave:
 835        while (i > SLAVE_MINOR) {
 836                i--;
 837                buf_unalloc(i);
 838                vme_slave_free(image[i].resource);
 839        }
 840err_class:
 841        cdev_del(vme_user_cdev);
 842err_char:
 843        unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
 844err_region:
 845err_dev:
 846        return err;
 847}
 848
 849static int vme_user_remove(struct vme_dev *dev)
 850{
 851        int i;
 852
 853        /* Remove sysfs Entries */
 854        for (i = 0; i < VME_DEVS; i++) {
 855                mutex_destroy(&image[i].mutex);
 856                device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
 857        }
 858        class_destroy(vme_user_sysfs_class);
 859
 860        for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
 861                kfree(image[i].kern_buf);
 862                vme_master_free(image[i].resource);
 863        }
 864
 865        for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
 866                vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
 867                buf_unalloc(i);
 868                vme_slave_free(image[i].resource);
 869        }
 870
 871        /* Unregister device driver */
 872        cdev_del(vme_user_cdev);
 873
 874        /* Unregiser the major and minor device numbers */
 875        unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
 876
 877        return 0;
 878}
 879
 880static void __exit vme_user_exit(void)
 881{
 882        vme_unregister_driver(&vme_user_driver);
 883}
 884
 885
 886MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
 887module_param_array(bus, int, &bus_num, 0);
 888
 889MODULE_DESCRIPTION("VME User Space Access Driver");
 890MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
 891MODULE_LICENSE("GPL");
 892
 893module_init(vme_user_init);
 894module_exit(vme_user_exit);
 895