linux/drivers/uio/uio.c
<<
>>
Prefs
   1/*
   2 * drivers/uio/uio.c
   3 *
   4 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
   5 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
   6 * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de>
   7 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
   8 *
   9 * Userspace IO
  10 *
  11 * Base Functions
  12 *
  13 * Licensed under the GPLv2 only.
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/poll.h>
  19#include <linux/device.h>
  20#include <linux/slab.h>
  21#include <linux/mm.h>
  22#include <linux/idr.h>
  23#include <linux/sched.h>
  24#include <linux/string.h>
  25#include <linux/kobject.h>
  26#include <linux/cdev.h>
  27#include <linux/uio_driver.h>
  28
  29#define UIO_MAX_DEVICES         (1U << MINORBITS)
  30
  31static int uio_major;
  32static struct cdev *uio_cdev;
  33static DEFINE_IDR(uio_idr);
  34static const struct file_operations uio_fops;
  35
  36/* Protect idr accesses */
  37static DEFINE_MUTEX(minor_lock);
  38
  39/*
  40 * attributes
  41 */
  42
  43struct uio_map {
  44        struct kobject kobj;
  45        struct uio_mem *mem;
  46};
  47#define to_map(map) container_of(map, struct uio_map, kobj)
  48
  49static ssize_t map_name_show(struct uio_mem *mem, char *buf)
  50{
  51        if (unlikely(!mem->name))
  52                mem->name = "";
  53
  54        return sprintf(buf, "%s\n", mem->name);
  55}
  56
  57static ssize_t map_addr_show(struct uio_mem *mem, char *buf)
  58{
  59        return sprintf(buf, "%pa\n", &mem->addr);
  60}
  61
  62static ssize_t map_size_show(struct uio_mem *mem, char *buf)
  63{
  64        return sprintf(buf, "%pa\n", &mem->size);
  65}
  66
  67static ssize_t map_offset_show(struct uio_mem *mem, char *buf)
  68{
  69        return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr & ~PAGE_MASK);
  70}
  71
  72struct map_sysfs_entry {
  73        struct attribute attr;
  74        ssize_t (*show)(struct uio_mem *, char *);
  75        ssize_t (*store)(struct uio_mem *, const char *, size_t);
  76};
  77
  78static struct map_sysfs_entry name_attribute =
  79        __ATTR(name, S_IRUGO, map_name_show, NULL);
  80static struct map_sysfs_entry addr_attribute =
  81        __ATTR(addr, S_IRUGO, map_addr_show, NULL);
  82static struct map_sysfs_entry size_attribute =
  83        __ATTR(size, S_IRUGO, map_size_show, NULL);
  84static struct map_sysfs_entry offset_attribute =
  85        __ATTR(offset, S_IRUGO, map_offset_show, NULL);
  86
  87static struct attribute *attrs[] = {
  88        &name_attribute.attr,
  89        &addr_attribute.attr,
  90        &size_attribute.attr,
  91        &offset_attribute.attr,
  92        NULL,   /* need to NULL terminate the list of attributes */
  93};
  94
  95static void map_release(struct kobject *kobj)
  96{
  97        struct uio_map *map = to_map(kobj);
  98        kfree(map);
  99}
 100
 101static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
 102                             char *buf)
 103{
 104        struct uio_map *map = to_map(kobj);
 105        struct uio_mem *mem = map->mem;
 106        struct map_sysfs_entry *entry;
 107
 108        entry = container_of(attr, struct map_sysfs_entry, attr);
 109
 110        if (!entry->show)
 111                return -EIO;
 112
 113        return entry->show(mem, buf);
 114}
 115
 116static const struct sysfs_ops map_sysfs_ops = {
 117        .show = map_type_show,
 118};
 119
 120static struct kobj_type map_attr_type = {
 121        .release        = map_release,
 122        .sysfs_ops      = &map_sysfs_ops,
 123        .default_attrs  = attrs,
 124};
 125
 126struct uio_portio {
 127        struct kobject kobj;
 128        struct uio_port *port;
 129};
 130#define to_portio(portio) container_of(portio, struct uio_portio, kobj)
 131
 132static ssize_t portio_name_show(struct uio_port *port, char *buf)
 133{
 134        if (unlikely(!port->name))
 135                port->name = "";
 136
 137        return sprintf(buf, "%s\n", port->name);
 138}
 139
 140static ssize_t portio_start_show(struct uio_port *port, char *buf)
 141{
 142        return sprintf(buf, "0x%lx\n", port->start);
 143}
 144
 145static ssize_t portio_size_show(struct uio_port *port, char *buf)
 146{
 147        return sprintf(buf, "0x%lx\n", port->size);
 148}
 149
 150static ssize_t portio_porttype_show(struct uio_port *port, char *buf)
 151{
 152        const char *porttypes[] = {"none", "x86", "gpio", "other"};
 153
 154        if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER))
 155                return -EINVAL;
 156
 157        return sprintf(buf, "port_%s\n", porttypes[port->porttype]);
 158}
 159
 160struct portio_sysfs_entry {
 161        struct attribute attr;
 162        ssize_t (*show)(struct uio_port *, char *);
 163        ssize_t (*store)(struct uio_port *, const char *, size_t);
 164};
 165
 166static struct portio_sysfs_entry portio_name_attribute =
 167        __ATTR(name, S_IRUGO, portio_name_show, NULL);
 168static struct portio_sysfs_entry portio_start_attribute =
 169        __ATTR(start, S_IRUGO, portio_start_show, NULL);
 170static struct portio_sysfs_entry portio_size_attribute =
 171        __ATTR(size, S_IRUGO, portio_size_show, NULL);
 172static struct portio_sysfs_entry portio_porttype_attribute =
 173        __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL);
 174
 175static struct attribute *portio_attrs[] = {
 176        &portio_name_attribute.attr,
 177        &portio_start_attribute.attr,
 178        &portio_size_attribute.attr,
 179        &portio_porttype_attribute.attr,
 180        NULL,
 181};
 182
 183static void portio_release(struct kobject *kobj)
 184{
 185        struct uio_portio *portio = to_portio(kobj);
 186        kfree(portio);
 187}
 188
 189static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
 190                             char *buf)
 191{
 192        struct uio_portio *portio = to_portio(kobj);
 193        struct uio_port *port = portio->port;
 194        struct portio_sysfs_entry *entry;
 195
 196        entry = container_of(attr, struct portio_sysfs_entry, attr);
 197
 198        if (!entry->show)
 199                return -EIO;
 200
 201        return entry->show(port, buf);
 202}
 203
 204static const struct sysfs_ops portio_sysfs_ops = {
 205        .show = portio_type_show,
 206};
 207
 208static struct kobj_type portio_attr_type = {
 209        .release        = portio_release,
 210        .sysfs_ops      = &portio_sysfs_ops,
 211        .default_attrs  = portio_attrs,
 212};
 213
 214static ssize_t show_name(struct device *dev,
 215                         struct device_attribute *attr, char *buf)
 216{
 217        struct uio_device *idev = dev_get_drvdata(dev);
 218        int ret;
 219
 220        mutex_lock(&idev->info_lock);
 221        if (!idev->info) {
 222                ret = -EINVAL;
 223                dev_err(dev, "the device has been unregistered\n");
 224                goto out;
 225        }
 226
 227        ret = sprintf(buf, "%s\n", idev->info->name);
 228out:
 229        mutex_unlock(&idev->info_lock);
 230        return ret;
 231}
 232
 233static ssize_t show_version(struct device *dev,
 234                            struct device_attribute *attr, char *buf)
 235{
 236        struct uio_device *idev = dev_get_drvdata(dev);
 237        int ret;
 238
 239        mutex_lock(&idev->info_lock);
 240        if (!idev->info) {
 241                ret = -EINVAL;
 242                dev_err(dev, "the device has been unregistered\n");
 243                goto out;
 244        }
 245
 246        ret = sprintf(buf, "%s\n", idev->info->version);
 247
 248out:
 249        mutex_unlock(&idev->info_lock);
 250        return ret;
 251}
 252
 253static ssize_t show_event(struct device *dev,
 254                          struct device_attribute *attr, char *buf)
 255{
 256        struct uio_device *idev = dev_get_drvdata(dev);
 257        return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
 258}
 259
 260static struct device_attribute uio_class_attributes[] = {
 261        __ATTR(name, S_IRUGO, show_name, NULL),
 262        __ATTR(version, S_IRUGO, show_version, NULL),
 263        __ATTR(event, S_IRUGO, show_event, NULL),
 264        {}
 265};
 266
 267/* UIO class infrastructure */
 268static struct class uio_class = {
 269        .name = "uio",
 270        .dev_attrs = uio_class_attributes,
 271};
 272
 273/*
 274 * device functions
 275 */
 276static int uio_dev_add_attributes(struct uio_device *idev)
 277{
 278        int ret;
 279        int mi, pi;
 280        int map_found = 0;
 281        int portio_found = 0;
 282        struct uio_mem *mem;
 283        struct uio_map *map;
 284        struct uio_port *port;
 285        struct uio_portio *portio;
 286
 287        for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
 288                mem = &idev->info->mem[mi];
 289                if (mem->size == 0)
 290                        break;
 291                if (!map_found) {
 292                        map_found = 1;
 293                        idev->map_dir = kobject_create_and_add("maps",
 294                                                        &idev->dev.kobj);
 295                        if (!idev->map_dir) {
 296                                ret = -ENOMEM;
 297                                goto err_map;
 298                        }
 299                }
 300                map = kzalloc(sizeof(*map), GFP_KERNEL);
 301                if (!map) {
 302                        ret = -ENOMEM;
 303                        goto err_map;
 304                }
 305                kobject_init(&map->kobj, &map_attr_type);
 306                map->mem = mem;
 307                mem->map = map;
 308                ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi);
 309                if (ret)
 310                        goto err_map_kobj;
 311                ret = kobject_uevent(&map->kobj, KOBJ_ADD);
 312                if (ret)
 313                        goto err_map_kobj;
 314        }
 315
 316        for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) {
 317                port = &idev->info->port[pi];
 318                if (port->size == 0)
 319                        break;
 320                if (!portio_found) {
 321                        portio_found = 1;
 322                        idev->portio_dir = kobject_create_and_add("portio",
 323                                                        &idev->dev.kobj);
 324                        if (!idev->portio_dir) {
 325                                ret = -ENOMEM;
 326                                goto err_portio;
 327                        }
 328                }
 329                portio = kzalloc(sizeof(*portio), GFP_KERNEL);
 330                if (!portio) {
 331                        ret = -ENOMEM;
 332                        goto err_portio;
 333                }
 334                kobject_init(&portio->kobj, &portio_attr_type);
 335                portio->port = port;
 336                port->portio = portio;
 337                ret = kobject_add(&portio->kobj, idev->portio_dir,
 338                                                        "port%d", pi);
 339                if (ret)
 340                        goto err_portio_kobj;
 341                ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
 342                if (ret)
 343                        goto err_portio_kobj;
 344        }
 345
 346        return 0;
 347
 348err_portio:
 349        pi--;
 350err_portio_kobj:
 351        for (; pi >= 0; pi--) {
 352                port = &idev->info->port[pi];
 353                portio = port->portio;
 354                kobject_put(&portio->kobj);
 355        }
 356        kobject_put(idev->portio_dir);
 357err_map:
 358        mi--;
 359err_map_kobj:
 360        for (; mi >= 0; mi--) {
 361                mem = &idev->info->mem[mi];
 362                map = mem->map;
 363                kobject_put(&map->kobj);
 364        }
 365        kobject_put(idev->map_dir);
 366        dev_err(&idev->dev, "error creating sysfs files (%d)\n", ret);
 367        return ret;
 368}
 369
 370static void uio_dev_del_attributes(struct uio_device *idev)
 371{
 372        int i;
 373        struct uio_mem *mem;
 374        struct uio_port *port;
 375
 376        for (i = 0; i < MAX_UIO_MAPS; i++) {
 377                mem = &idev->info->mem[i];
 378                if (mem->size == 0)
 379                        break;
 380                kobject_put(&mem->map->kobj);
 381        }
 382        kobject_put(idev->map_dir);
 383
 384        for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) {
 385                port = &idev->info->port[i];
 386                if (port->size == 0)
 387                        break;
 388                kobject_put(&port->portio->kobj);
 389        }
 390        kobject_put(idev->portio_dir);
 391}
 392
 393static int uio_get_minor(struct uio_device *idev)
 394{
 395        int retval = -ENOMEM;
 396
 397        mutex_lock(&minor_lock);
 398        retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
 399        if (retval >= 0) {
 400                idev->minor = retval;
 401                retval = 0;
 402        } else if (retval == -ENOSPC) {
 403                dev_err(&idev->dev, "too many uio devices\n");
 404                retval = -EINVAL;
 405        }
 406        mutex_unlock(&minor_lock);
 407        return retval;
 408}
 409
 410static void uio_free_minor(struct uio_device *idev)
 411{
 412        mutex_lock(&minor_lock);
 413        idr_remove(&uio_idr, idev->minor);
 414        mutex_unlock(&minor_lock);
 415}
 416
 417/**
 418 * uio_event_notify - trigger an interrupt event
 419 * @info: UIO device capabilities
 420 */
 421void uio_event_notify(struct uio_info *info)
 422{
 423        struct uio_device *idev = info->uio_dev;
 424
 425        atomic_inc(&idev->event);
 426        wake_up_interruptible(&idev->wait);
 427        kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
 428}
 429EXPORT_SYMBOL_GPL(uio_event_notify);
 430
 431/**
 432 * uio_interrupt - hardware interrupt handler
 433 * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer
 434 * @dev_id: Pointer to the devices uio_device structure
 435 */
 436static irqreturn_t uio_interrupt(int irq, void *dev_id)
 437{
 438        struct uio_device *idev = (struct uio_device *)dev_id;
 439        irqreturn_t ret;
 440
 441        ret = idev->info->handler(irq, idev->info);
 442        if (ret == IRQ_HANDLED)
 443                uio_event_notify(idev->info);
 444
 445        return ret;
 446}
 447
 448struct uio_listener {
 449        struct uio_device *dev;
 450        s32 event_count;
 451};
 452
 453static int uio_open(struct inode *inode, struct file *filep)
 454{
 455        struct uio_device *idev;
 456        struct uio_listener *listener;
 457        int ret = 0;
 458
 459        mutex_lock(&minor_lock);
 460        idev = idr_find(&uio_idr, iminor(inode));
 461        mutex_unlock(&minor_lock);
 462        if (!idev) {
 463                ret = -ENODEV;
 464                goto out;
 465        }
 466
 467        get_device(&idev->dev);
 468
 469        if (!try_module_get(idev->owner)) {
 470                ret = -ENODEV;
 471                goto err_module_get;
 472        }
 473
 474        listener = kmalloc(sizeof(*listener), GFP_KERNEL);
 475        if (!listener) {
 476                ret = -ENOMEM;
 477                goto err_alloc_listener;
 478        }
 479
 480        listener->dev = idev;
 481        listener->event_count = atomic_read(&idev->event);
 482        filep->private_data = listener;
 483
 484        mutex_lock(&idev->info_lock);
 485        if (!idev->info) {
 486                mutex_unlock(&idev->info_lock);
 487                ret = -EINVAL;
 488                goto err_alloc_listener;
 489        }
 490        if (idev->info && idev->info->open)
 491                ret = idev->info->open(idev->info, inode);
 492        mutex_unlock(&idev->info_lock);
 493        if (ret)
 494                goto err_infoopen;
 495
 496        return 0;
 497
 498err_infoopen:
 499        kfree(listener);
 500
 501err_alloc_listener:
 502        module_put(idev->owner);
 503
 504err_module_get:
 505        put_device(&idev->dev);
 506
 507out:
 508        return ret;
 509}
 510
 511static int uio_fasync(int fd, struct file *filep, int on)
 512{
 513        struct uio_listener *listener = filep->private_data;
 514        struct uio_device *idev = listener->dev;
 515
 516        return fasync_helper(fd, filep, on, &idev->async_queue);
 517}
 518
 519static int uio_release(struct inode *inode, struct file *filep)
 520{
 521        int ret = 0;
 522        struct uio_listener *listener = filep->private_data;
 523        struct uio_device *idev = listener->dev;
 524
 525        mutex_lock(&idev->info_lock);
 526        if (idev->info && idev->info->release)
 527                ret = idev->info->release(idev->info, inode);
 528        mutex_unlock(&idev->info_lock);
 529
 530        module_put(idev->owner);
 531        kfree(listener);
 532        put_device(&idev->dev);
 533        return ret;
 534}
 535
 536static unsigned int uio_poll(struct file *filep, poll_table *wait)
 537{
 538        struct uio_listener *listener = filep->private_data;
 539        struct uio_device *idev = listener->dev;
 540        unsigned int ret = 0;
 541
 542        mutex_lock(&idev->info_lock);
 543        if (!idev->info || !idev->info->irq)
 544                ret = POLLERR;
 545        mutex_unlock(&idev->info_lock);
 546
 547        if (ret)
 548                return ret;
 549
 550        poll_wait(filep, &idev->wait, wait);
 551        if (listener->event_count != atomic_read(&idev->event))
 552                return POLLIN | POLLRDNORM;
 553        return 0;
 554}
 555
 556static ssize_t uio_read(struct file *filep, char __user *buf,
 557                        size_t count, loff_t *ppos)
 558{
 559        struct uio_listener *listener = filep->private_data;
 560        struct uio_device *idev = listener->dev;
 561        DECLARE_WAITQUEUE(wait, current);
 562        ssize_t retval = 0;
 563        s32 event_count;
 564
 565        mutex_lock(&idev->info_lock);
 566        if (!idev->info || !idev->info->irq)
 567                retval = -EIO;
 568        mutex_unlock(&idev->info_lock);
 569
 570        if (retval)
 571                return retval;
 572
 573        if (count != sizeof(s32))
 574                return -EINVAL;
 575
 576        add_wait_queue(&idev->wait, &wait);
 577
 578        do {
 579                set_current_state(TASK_INTERRUPTIBLE);
 580
 581                event_count = atomic_read(&idev->event);
 582                if (event_count != listener->event_count) {
 583                        __set_current_state(TASK_RUNNING);
 584                        if (copy_to_user(buf, &event_count, count))
 585                                retval = -EFAULT;
 586                        else {
 587                                listener->event_count = event_count;
 588                                retval = count;
 589                        }
 590                        break;
 591                }
 592
 593                if (filep->f_flags & O_NONBLOCK) {
 594                        retval = -EAGAIN;
 595                        break;
 596                }
 597
 598                if (signal_pending(current)) {
 599                        retval = -ERESTARTSYS;
 600                        break;
 601                }
 602                schedule();
 603        } while (1);
 604
 605        __set_current_state(TASK_RUNNING);
 606        remove_wait_queue(&idev->wait, &wait);
 607
 608        return retval;
 609}
 610
 611static ssize_t uio_write(struct file *filep, const char __user *buf,
 612                        size_t count, loff_t *ppos)
 613{
 614        struct uio_listener *listener = filep->private_data;
 615        struct uio_device *idev = listener->dev;
 616        ssize_t retval;
 617        s32 irq_on;
 618
 619        if (count != sizeof(s32))
 620                return -EINVAL;
 621
 622        if (copy_from_user(&irq_on, buf, count))
 623                return -EFAULT;
 624
 625        mutex_lock(&idev->info_lock);
 626        if (!idev->info) {
 627                retval = -EINVAL;
 628                goto out;
 629        }
 630
 631        if (!idev->info || !idev->info->irq) {
 632                retval = -EIO;
 633                goto out;
 634        }
 635
 636        if (!idev->info->irqcontrol) {
 637                retval = -ENOSYS;
 638                goto out;
 639        }
 640
 641        retval = idev->info->irqcontrol(idev->info, irq_on);
 642
 643out:
 644        mutex_unlock(&idev->info_lock);
 645        return retval ? retval : sizeof(s32);
 646}
 647
 648static int uio_find_mem_index(struct vm_area_struct *vma)
 649{
 650        struct uio_device *idev = vma->vm_private_data;
 651
 652        if (vma->vm_pgoff < MAX_UIO_MAPS) {
 653                if (idev->info->mem[vma->vm_pgoff].size == 0)
 654                        return -1;
 655                return (int)vma->vm_pgoff;
 656        }
 657        return -1;
 658}
 659
 660static void uio_vma_open(struct vm_area_struct *vma)
 661{
 662        struct uio_device *idev = vma->vm_private_data;
 663        idev->vma_count++;
 664}
 665
 666static void uio_vma_close(struct vm_area_struct *vma)
 667{
 668        struct uio_device *idev = vma->vm_private_data;
 669        idev->vma_count--;
 670}
 671
 672static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 673{
 674        struct uio_device *idev = vma->vm_private_data;
 675        struct page *page;
 676        unsigned long offset;
 677        void *addr;
 678        int ret = 0;
 679        int mi;
 680
 681        mutex_lock(&idev->info_lock);
 682        if (!idev->info) {
 683                ret = VM_FAULT_SIGBUS;
 684                goto out;
 685        }
 686
 687        mi = uio_find_mem_index(vma);
 688        if (mi < 0) {
 689                ret = VM_FAULT_SIGBUS;
 690                goto out;
 691        }
 692
 693        /*
 694         * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
 695         * to use mem[N].
 696         */
 697        offset = (vmf->pgoff - mi) << PAGE_SHIFT;
 698
 699        addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset;
 700        if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
 701                page = virt_to_page(addr);
 702        else
 703                page = vmalloc_to_page(addr);
 704        get_page(page);
 705        vmf->page = page;
 706
 707out:
 708        mutex_unlock(&idev->info_lock);
 709
 710        return ret;
 711}
 712
 713static const struct vm_operations_struct uio_logical_vm_ops = {
 714        .open = uio_vma_open,
 715        .close = uio_vma_close,
 716        .fault = uio_vma_fault,
 717};
 718
 719static int uio_mmap_logical(struct vm_area_struct *vma)
 720{
 721        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 722        vma->vm_ops = &uio_logical_vm_ops;
 723        uio_vma_open(vma);
 724        return 0;
 725}
 726
 727static const struct vm_operations_struct uio_physical_vm_ops = {
 728#ifdef CONFIG_HAVE_IOREMAP_PROT
 729        .access = generic_access_phys,
 730#endif
 731};
 732
 733static int uio_mmap_physical(struct vm_area_struct *vma)
 734{
 735        struct uio_device *idev = vma->vm_private_data;
 736        int mi = uio_find_mem_index(vma);
 737        struct uio_mem *mem;
 738
 739        if (mi < 0)
 740                return -EINVAL;
 741        mem = idev->info->mem + mi;
 742
 743        if (mem->addr & ~PAGE_MASK)
 744                return -ENODEV;
 745        if (vma->vm_end - vma->vm_start > mem->size)
 746                return -EINVAL;
 747
 748        vma->vm_ops = &uio_physical_vm_ops;
 749        if (idev->info->mem[mi].memtype == UIO_MEM_PHYS)
 750                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 751
 752        /*
 753         * We cannot use the vm_iomap_memory() helper here,
 754         * because vma->vm_pgoff is the map index we looked
 755         * up above in uio_find_mem_index(), rather than an
 756         * actual page offset into the mmap.
 757         *
 758         * So we just do the physical mmap without a page
 759         * offset.
 760         */
 761        return remap_pfn_range(vma,
 762                               vma->vm_start,
 763                               mem->addr >> PAGE_SHIFT,
 764                               vma->vm_end - vma->vm_start,
 765                               vma->vm_page_prot);
 766}
 767
 768static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
 769{
 770        struct uio_listener *listener = filep->private_data;
 771        struct uio_device *idev = listener->dev;
 772        int mi;
 773        unsigned long requested_pages, actual_pages;
 774        int ret = 0;
 775
 776        mutex_lock(&idev->info_lock);
 777        if (!idev->info) {
 778                ret = -EINVAL;
 779                goto out;
 780        }
 781
 782        if (vma->vm_end < vma->vm_start) {
 783                ret = -EINVAL;
 784                goto out;
 785        }
 786
 787        vma->vm_private_data = idev;
 788
 789        mi = uio_find_mem_index(vma);
 790        if (mi < 0) {
 791                ret = -EINVAL;
 792                goto out;
 793        }
 794
 795        requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 796        actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
 797                        + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
 798        if (requested_pages > actual_pages) {
 799                ret = -EINVAL;
 800                goto out;
 801        }
 802
 803        if (idev->info->mmap) {
 804                ret = idev->info->mmap(idev->info, vma);
 805                goto out;
 806        }
 807
 808        switch (idev->info->mem[mi].memtype) {
 809        case UIO_MEM_IOVA:
 810        case UIO_MEM_PHYS:
 811                ret = uio_mmap_physical(vma);
 812                break;
 813        case UIO_MEM_LOGICAL:
 814        case UIO_MEM_VIRTUAL:
 815                ret = uio_mmap_logical(vma);
 816                break;
 817        default:
 818                ret = -EINVAL;
 819        }
 820
 821 out:
 822        mutex_unlock(&idev->info_lock);
 823        return ret;
 824}
 825
 826static const struct file_operations uio_fops = {
 827        .owner          = THIS_MODULE,
 828        .open           = uio_open,
 829        .release        = uio_release,
 830        .read           = uio_read,
 831        .write          = uio_write,
 832        .mmap           = uio_mmap,
 833        .poll           = uio_poll,
 834        .fasync         = uio_fasync,
 835        .llseek         = noop_llseek,
 836};
 837
 838static int uio_major_init(void)
 839{
 840        static const char name[] = "uio";
 841        struct cdev *cdev = NULL;
 842        dev_t uio_dev = 0;
 843        int result;
 844
 845        result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name);
 846        if (result)
 847                goto out;
 848
 849        result = -ENOMEM;
 850        cdev = cdev_alloc();
 851        if (!cdev)
 852                goto out_unregister;
 853
 854        cdev->owner = THIS_MODULE;
 855        cdev->ops = &uio_fops;
 856        kobject_set_name(&cdev->kobj, "%s", name);
 857
 858        result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES);
 859        if (result)
 860                goto out_put;
 861
 862        uio_major = MAJOR(uio_dev);
 863        uio_cdev = cdev;
 864        return 0;
 865out_put:
 866        kobject_put(&cdev->kobj);
 867out_unregister:
 868        unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES);
 869out:
 870        return result;
 871}
 872
 873static void uio_major_cleanup(void)
 874{
 875        unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES);
 876        cdev_del(uio_cdev);
 877}
 878
 879static int init_uio_class(void)
 880{
 881        int ret;
 882
 883        /* This is the first time in here, set everything up properly */
 884        ret = uio_major_init();
 885        if (ret)
 886                goto exit;
 887
 888        ret = class_register(&uio_class);
 889        if (ret) {
 890                printk(KERN_ERR "class_register failed for uio\n");
 891                goto err_class_register;
 892        }
 893        return 0;
 894
 895err_class_register:
 896        uio_major_cleanup();
 897exit:
 898        return ret;
 899}
 900
 901static void release_uio_class(void)
 902{
 903        class_unregister(&uio_class);
 904        uio_major_cleanup();
 905}
 906
 907static void uio_device_release(struct device *dev)
 908{
 909        struct uio_device *idev = dev_get_drvdata(dev);
 910
 911        kfree(idev);
 912}
 913
 914/**
 915 * uio_register_device - register a new userspace IO device
 916 * @owner:      module that creates the new device
 917 * @parent:     parent device
 918 * @info:       UIO device capabilities
 919 *
 920 * returns zero on success or a negative error code.
 921 */
 922int __uio_register_device(struct module *owner,
 923                          struct device *parent,
 924                          struct uio_info *info)
 925{
 926        struct uio_device *idev;
 927        int ret = 0;
 928
 929        if (!parent || !info || !info->name || !info->version)
 930                return -EINVAL;
 931
 932        info->uio_dev = NULL;
 933
 934        idev = kzalloc(sizeof(*idev), GFP_KERNEL);
 935        if (!idev) {
 936                return -ENOMEM;
 937        }
 938
 939        idev->owner = owner;
 940        idev->info = info;
 941        mutex_init(&idev->info_lock);
 942        init_waitqueue_head(&idev->wait);
 943        atomic_set(&idev->event, 0);
 944
 945        ret = uio_get_minor(idev);
 946        if (ret)
 947                return ret;
 948
 949        idev->dev.devt = MKDEV(uio_major, idev->minor);
 950        idev->dev.class = &uio_class;
 951        idev->dev.parent = parent;
 952        idev->dev.release = uio_device_release;
 953        dev_set_drvdata(&idev->dev, idev);
 954
 955        ret = dev_set_name(&idev->dev, "uio%d", idev->minor);
 956        if (ret)
 957                goto err_device_create;
 958
 959        ret = device_register(&idev->dev);
 960        if (ret)
 961                goto err_device_create;
 962
 963        ret = uio_dev_add_attributes(idev);
 964        if (ret)
 965                goto err_uio_dev_add_attributes;
 966
 967        info->uio_dev = idev;
 968
 969        if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
 970                /*
 971                 * Note that we deliberately don't use devm_request_irq
 972                 * here. The parent module can unregister the UIO device
 973                 * and call pci_disable_msi, which requires that this
 974                 * irq has been freed. However, the device may have open
 975                 * FDs at the time of unregister and therefore may not be
 976                 * freed until they are released.
 977                 */
 978                ret = request_irq(info->irq, uio_interrupt,
 979                                  info->irq_flags, info->name, idev);
 980                if (ret)
 981                        goto err_request_irq;
 982        }
 983
 984        return 0;
 985
 986err_request_irq:
 987        uio_dev_del_attributes(idev);
 988err_uio_dev_add_attributes:
 989        device_unregister(&idev->dev);
 990err_device_create:
 991        uio_free_minor(idev);
 992        return ret;
 993}
 994EXPORT_SYMBOL_GPL(__uio_register_device);
 995
 996/**
 997 * uio_unregister_device - unregister a industrial IO device
 998 * @info:       UIO device capabilities
 999 *
1000 */
1001void uio_unregister_device(struct uio_info *info)
1002{
1003        struct uio_device *idev;
1004
1005        if (!info || !info->uio_dev)
1006                return;
1007
1008        idev = info->uio_dev;
1009
1010        uio_free_minor(idev);
1011
1012        mutex_lock(&idev->info_lock);
1013        uio_dev_del_attributes(idev);
1014
1015        if (info->irq && info->irq != UIO_IRQ_CUSTOM)
1016                free_irq(info->irq, idev);
1017
1018        idev->info = NULL;
1019        mutex_unlock(&idev->info_lock);
1020
1021        device_unregister(&idev->dev);
1022
1023        return;
1024}
1025EXPORT_SYMBOL_GPL(uio_unregister_device);
1026
1027static int __init uio_init(void)
1028{
1029        return init_uio_class();
1030}
1031
1032static void __exit uio_exit(void)
1033{
1034        release_uio_class();
1035        idr_destroy(&uio_idr);
1036}
1037
1038module_init(uio_init)
1039module_exit(uio_exit)
1040MODULE_LICENSE("GPL v2");
1041