linux/drivers/rapidio/devices/rio_mport_cdev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * RapidIO mport character device
   4 *
   5 * Copyright 2014-2015 Integrated Device Technology, Inc.
   6 *    Alexandre Bounine <alexandre.bounine@idt.com>
   7 * Copyright 2014-2015 Prodrive Technologies
   8 *    Andre van Herk <andre.van.herk@prodrive-technologies.com>
   9 *    Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
  10 * Copyright (C) 2014 Texas Instruments Incorporated
  11 *    Aurelien Jacquiot <a-jacquiot@ti.com>
  12 */
  13#include <linux/module.h>
  14#include <linux/kernel.h>
  15#include <linux/cdev.h>
  16#include <linux/ioctl.h>
  17#include <linux/uaccess.h>
  18#include <linux/list.h>
  19#include <linux/fs.h>
  20#include <linux/err.h>
  21#include <linux/net.h>
  22#include <linux/poll.h>
  23#include <linux/spinlock.h>
  24#include <linux/sched.h>
  25#include <linux/kfifo.h>
  26
  27#include <linux/mm.h>
  28#include <linux/slab.h>
  29#include <linux/vmalloc.h>
  30#include <linux/mman.h>
  31
  32#include <linux/dma-mapping.h>
  33#ifdef CONFIG_RAPIDIO_DMA_ENGINE
  34#include <linux/dmaengine.h>
  35#endif
  36
  37#include <linux/rio.h>
  38#include <linux/rio_ids.h>
  39#include <linux/rio_drv.h>
  40#include <linux/rio_mport_cdev.h>
  41
  42#include "../rio.h"
  43
  44#define DRV_NAME        "rio_mport"
  45#define DRV_PREFIX      DRV_NAME ": "
  46#define DEV_NAME        "rio_mport"
  47#define DRV_VERSION     "1.0.0"
  48
  49/* Debug output filtering masks */
  50enum {
  51        DBG_NONE        = 0,
  52        DBG_INIT        = BIT(0), /* driver init */
  53        DBG_EXIT        = BIT(1), /* driver exit */
  54        DBG_MPORT       = BIT(2), /* mport add/remove */
  55        DBG_RDEV        = BIT(3), /* RapidIO device add/remove */
  56        DBG_DMA         = BIT(4), /* DMA transfer messages */
  57        DBG_MMAP        = BIT(5), /* mapping messages */
  58        DBG_IBW         = BIT(6), /* inbound window */
  59        DBG_EVENT       = BIT(7), /* event handling messages */
  60        DBG_OBW         = BIT(8), /* outbound window messages */
  61        DBG_DBELL       = BIT(9), /* doorbell messages */
  62        DBG_ALL         = ~0,
  63};
  64
  65#ifdef DEBUG
  66#define rmcd_debug(level, fmt, arg...)          \
  67        do {                                    \
  68                if (DBG_##level & dbg_level)    \
  69                        pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
  70        } while (0)
  71#else
  72#define rmcd_debug(level, fmt, arg...) \
  73                no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
  74#endif
  75
  76#define rmcd_warn(fmt, arg...) \
  77        pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
  78
  79#define rmcd_error(fmt, arg...) \
  80        pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
  81
  82MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
  83MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
  84MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
  85MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
  86MODULE_DESCRIPTION("RapidIO mport character device driver");
  87MODULE_LICENSE("GPL");
  88MODULE_VERSION(DRV_VERSION);
  89
  90static int dma_timeout = 3000; /* DMA transfer timeout in msec */
  91module_param(dma_timeout, int, S_IRUGO);
  92MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
  93
  94#ifdef DEBUG
  95static u32 dbg_level = DBG_NONE;
  96module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
  97MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
  98#endif
  99
 100/*
 101 * An internal DMA coherent buffer
 102 */
 103struct mport_dma_buf {
 104        void            *ib_base;
 105        dma_addr_t      ib_phys;
 106        u32             ib_size;
 107        u64             ib_rio_base;
 108        bool            ib_map;
 109        struct file     *filp;
 110};
 111
 112/*
 113 * Internal memory mapping structure
 114 */
 115enum rio_mport_map_dir {
 116        MAP_INBOUND,
 117        MAP_OUTBOUND,
 118        MAP_DMA,
 119};
 120
 121struct rio_mport_mapping {
 122        struct list_head node;
 123        struct mport_dev *md;
 124        enum rio_mport_map_dir dir;
 125        u16 rioid;
 126        u64 rio_addr;
 127        dma_addr_t phys_addr; /* for mmap */
 128        void *virt_addr; /* kernel address, for dma_free_coherent */
 129        u64 size;
 130        struct kref ref; /* refcount of vmas sharing the mapping */
 131        struct file *filp;
 132};
 133
 134struct rio_mport_dma_map {
 135        int valid;
 136        u64 length;
 137        void *vaddr;
 138        dma_addr_t paddr;
 139};
 140
 141#define MPORT_MAX_DMA_BUFS      16
 142#define MPORT_EVENT_DEPTH       10
 143
 144/*
 145 * mport_dev  driver-specific structure that represents mport device
 146 * @active    mport device status flag
 147 * @node      list node to maintain list of registered mports
 148 * @cdev      character device
 149 * @dev       associated device object
 150 * @mport     associated subsystem's master port device object
 151 * @buf_mutex lock for buffer handling
 152 * @file_mutex - lock for open files list
 153 * @file_list  - list of open files on given mport
 154 * @properties properties of this mport
 155 * @portwrites queue of inbound portwrites
 156 * @pw_lock    lock for port write queue
 157 * @mappings   queue for memory mappings
 158 * @dma_chan   DMA channels associated with this device
 159 * @dma_ref:
 160 * @comp:
 161 */
 162struct mport_dev {
 163        atomic_t                active;
 164        struct list_head        node;
 165        struct cdev             cdev;
 166        struct device           dev;
 167        struct rio_mport        *mport;
 168        struct mutex            buf_mutex;
 169        struct mutex            file_mutex;
 170        struct list_head        file_list;
 171        struct rio_mport_properties     properties;
 172        struct list_head                doorbells;
 173        spinlock_t                      db_lock;
 174        struct list_head                portwrites;
 175        spinlock_t                      pw_lock;
 176        struct list_head        mappings;
 177#ifdef CONFIG_RAPIDIO_DMA_ENGINE
 178        struct dma_chan *dma_chan;
 179        struct kref     dma_ref;
 180        struct completion comp;
 181#endif
 182};
 183
 184/*
 185 * mport_cdev_priv - data structure specific to individual file object
 186 *                   associated with an open device
 187 * @md    master port character device object
 188 * @async_queue - asynchronous notification queue
 189 * @list - file objects tracking list
 190 * @db_filters    inbound doorbell filters for this descriptor
 191 * @pw_filters    portwrite filters for this descriptor
 192 * @event_fifo    event fifo for this descriptor
 193 * @event_rx_wait wait queue for this descriptor
 194 * @fifo_lock     lock for event_fifo
 195 * @event_mask    event mask for this descriptor
 196 * @dmach DMA engine channel allocated for specific file object
 197 */
 198struct mport_cdev_priv {
 199        struct mport_dev        *md;
 200        struct fasync_struct    *async_queue;
 201        struct list_head        list;
 202        struct list_head        db_filters;
 203        struct list_head        pw_filters;
 204        struct kfifo            event_fifo;
 205        wait_queue_head_t       event_rx_wait;
 206        spinlock_t              fifo_lock;
 207        u32                     event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
 208#ifdef CONFIG_RAPIDIO_DMA_ENGINE
 209        struct dma_chan         *dmach;
 210        struct list_head        async_list;
 211        spinlock_t              req_lock;
 212        struct mutex            dma_lock;
 213        struct kref             dma_ref;
 214        struct completion       comp;
 215#endif
 216};
 217
 218/*
 219 * rio_mport_pw_filter - structure to describe a portwrite filter
 220 * md_node   node in mport device's list
 221 * priv_node node in private file object's list
 222 * priv      reference to private data
 223 * filter    actual portwrite filter
 224 */
 225struct rio_mport_pw_filter {
 226        struct list_head md_node;
 227        struct list_head priv_node;
 228        struct mport_cdev_priv *priv;
 229        struct rio_pw_filter filter;
 230};
 231
 232/*
 233 * rio_mport_db_filter - structure to describe a doorbell filter
 234 * @data_node reference to device node
 235 * @priv_node node in private data
 236 * @priv      reference to private data
 237 * @filter    actual doorbell filter
 238 */
 239struct rio_mport_db_filter {
 240        struct list_head data_node;
 241        struct list_head priv_node;
 242        struct mport_cdev_priv *priv;
 243        struct rio_doorbell_filter filter;
 244};
 245
 246static LIST_HEAD(mport_devs);
 247static DEFINE_MUTEX(mport_devs_lock);
 248
 249#if (0) /* used by commented out portion of poll function : FIXME */
 250static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
 251#endif
 252
 253static struct class *dev_class;
 254static dev_t dev_number;
 255
 256static void mport_release_mapping(struct kref *ref);
 257
 258static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
 259                              int local)
 260{
 261        struct rio_mport *mport = priv->md->mport;
 262        struct rio_mport_maint_io maint_io;
 263        u32 *buffer;
 264        u32 offset;
 265        size_t length;
 266        int ret, i;
 267
 268        if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
 269                return -EFAULT;
 270
 271        if ((maint_io.offset % 4) ||
 272            (maint_io.length == 0) || (maint_io.length % 4) ||
 273            (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
 274                return -EINVAL;
 275
 276        buffer = vmalloc(maint_io.length);
 277        if (buffer == NULL)
 278                return -ENOMEM;
 279        length = maint_io.length/sizeof(u32);
 280        offset = maint_io.offset;
 281
 282        for (i = 0; i < length; i++) {
 283                if (local)
 284                        ret = __rio_local_read_config_32(mport,
 285                                offset, &buffer[i]);
 286                else
 287                        ret = rio_mport_read_config_32(mport, maint_io.rioid,
 288                                maint_io.hopcount, offset, &buffer[i]);
 289                if (ret)
 290                        goto out;
 291
 292                offset += 4;
 293        }
 294
 295        if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
 296                                   buffer, maint_io.length)))
 297                ret = -EFAULT;
 298out:
 299        vfree(buffer);
 300        return ret;
 301}
 302
 303static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
 304                              int local)
 305{
 306        struct rio_mport *mport = priv->md->mport;
 307        struct rio_mport_maint_io maint_io;
 308        u32 *buffer;
 309        u32 offset;
 310        size_t length;
 311        int ret = -EINVAL, i;
 312
 313        if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
 314                return -EFAULT;
 315
 316        if ((maint_io.offset % 4) ||
 317            (maint_io.length == 0) || (maint_io.length % 4) ||
 318            (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
 319                return -EINVAL;
 320
 321        buffer = vmalloc(maint_io.length);
 322        if (buffer == NULL)
 323                return -ENOMEM;
 324        length = maint_io.length;
 325
 326        if (unlikely(copy_from_user(buffer,
 327                        (void __user *)(uintptr_t)maint_io.buffer, length))) {
 328                ret = -EFAULT;
 329                goto out;
 330        }
 331
 332        offset = maint_io.offset;
 333        length /= sizeof(u32);
 334
 335        for (i = 0; i < length; i++) {
 336                if (local)
 337                        ret = __rio_local_write_config_32(mport,
 338                                                          offset, buffer[i]);
 339                else
 340                        ret = rio_mport_write_config_32(mport, maint_io.rioid,
 341                                                        maint_io.hopcount,
 342                                                        offset, buffer[i]);
 343                if (ret)
 344                        goto out;
 345
 346                offset += 4;
 347        }
 348
 349out:
 350        vfree(buffer);
 351        return ret;
 352}
 353
 354
 355/*
 356 * Inbound/outbound memory mapping functions
 357 */
 358static int
 359rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
 360                                  u16 rioid, u64 raddr, u32 size,
 361                                  dma_addr_t *paddr)
 362{
 363        struct rio_mport *mport = md->mport;
 364        struct rio_mport_mapping *map;
 365        int ret;
 366
 367        rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
 368
 369        map = kzalloc(sizeof(*map), GFP_KERNEL);
 370        if (map == NULL)
 371                return -ENOMEM;
 372
 373        ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
 374        if (ret < 0)
 375                goto err_map_outb;
 376
 377        map->dir = MAP_OUTBOUND;
 378        map->rioid = rioid;
 379        map->rio_addr = raddr;
 380        map->size = size;
 381        map->phys_addr = *paddr;
 382        map->filp = filp;
 383        map->md = md;
 384        kref_init(&map->ref);
 385        list_add_tail(&map->node, &md->mappings);
 386        return 0;
 387err_map_outb:
 388        kfree(map);
 389        return ret;
 390}
 391
 392static int
 393rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
 394                               u16 rioid, u64 raddr, u32 size,
 395                               dma_addr_t *paddr)
 396{
 397        struct rio_mport_mapping *map;
 398        int err = -ENOMEM;
 399
 400        mutex_lock(&md->buf_mutex);
 401        list_for_each_entry(map, &md->mappings, node) {
 402                if (map->dir != MAP_OUTBOUND)
 403                        continue;
 404                if (rioid == map->rioid &&
 405                    raddr == map->rio_addr && size == map->size) {
 406                        *paddr = map->phys_addr;
 407                        err = 0;
 408                        break;
 409                } else if (rioid == map->rioid &&
 410                           raddr < (map->rio_addr + map->size - 1) &&
 411                           (raddr + size) > map->rio_addr) {
 412                        err = -EBUSY;
 413                        break;
 414                }
 415        }
 416
 417        /* If not found, create new */
 418        if (err == -ENOMEM)
 419                err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
 420                                                size, paddr);
 421        mutex_unlock(&md->buf_mutex);
 422        return err;
 423}
 424
 425static int rio_mport_obw_map(struct file *filp, void __user *arg)
 426{
 427        struct mport_cdev_priv *priv = filp->private_data;
 428        struct mport_dev *data = priv->md;
 429        struct rio_mmap map;
 430        dma_addr_t paddr;
 431        int ret;
 432
 433        if (unlikely(copy_from_user(&map, arg, sizeof(map))))
 434                return -EFAULT;
 435
 436        rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
 437                   map.rioid, map.rio_addr, map.length);
 438
 439        ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
 440                                             map.rio_addr, map.length, &paddr);
 441        if (ret < 0) {
 442                rmcd_error("Failed to set OBW err= %d", ret);
 443                return ret;
 444        }
 445
 446        map.handle = paddr;
 447
 448        if (unlikely(copy_to_user(arg, &map, sizeof(map))))
 449                return -EFAULT;
 450        return 0;
 451}
 452
 453/*
 454 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
 455 *
 456 * @priv: driver private data
 457 * @arg:  buffer handle returned by allocation routine
 458 */
 459static int rio_mport_obw_free(struct file *filp, void __user *arg)
 460{
 461        struct mport_cdev_priv *priv = filp->private_data;
 462        struct mport_dev *md = priv->md;
 463        u64 handle;
 464        struct rio_mport_mapping *map, *_map;
 465
 466        if (!md->mport->ops->unmap_outb)
 467                return -EPROTONOSUPPORT;
 468
 469        if (copy_from_user(&handle, arg, sizeof(handle)))
 470                return -EFAULT;
 471
 472        rmcd_debug(OBW, "h=0x%llx", handle);
 473
 474        mutex_lock(&md->buf_mutex);
 475        list_for_each_entry_safe(map, _map, &md->mappings, node) {
 476                if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
 477                        if (map->filp == filp) {
 478                                rmcd_debug(OBW, "kref_put h=0x%llx", handle);
 479                                map->filp = NULL;
 480                                kref_put(&map->ref, mport_release_mapping);
 481                        }
 482                        break;
 483                }
 484        }
 485        mutex_unlock(&md->buf_mutex);
 486
 487        return 0;
 488}
 489
 490/*
 491 * maint_hdid_set() - Set the host Device ID
 492 * @priv: driver private data
 493 * @arg:        Device Id
 494 */
 495static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
 496{
 497        struct mport_dev *md = priv->md;
 498        u16 hdid;
 499
 500        if (copy_from_user(&hdid, arg, sizeof(hdid)))
 501                return -EFAULT;
 502
 503        md->mport->host_deviceid = hdid;
 504        md->properties.hdid = hdid;
 505        rio_local_set_device_id(md->mport, hdid);
 506
 507        rmcd_debug(MPORT, "Set host device Id to %d", hdid);
 508
 509        return 0;
 510}
 511
 512/*
 513 * maint_comptag_set() - Set the host Component Tag
 514 * @priv: driver private data
 515 * @arg:        Component Tag
 516 */
 517static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
 518{
 519        struct mport_dev *md = priv->md;
 520        u32 comptag;
 521
 522        if (copy_from_user(&comptag, arg, sizeof(comptag)))
 523                return -EFAULT;
 524
 525        rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
 526
 527        rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
 528
 529        return 0;
 530}
 531
 532#ifdef CONFIG_RAPIDIO_DMA_ENGINE
 533
 534struct mport_dma_req {
 535        struct kref refcount;
 536        struct list_head node;
 537        struct file *filp;
 538        struct mport_cdev_priv *priv;
 539        enum rio_transfer_sync sync;
 540        struct sg_table sgt;
 541        struct page **page_list;
 542        unsigned int nr_pages;
 543        struct rio_mport_mapping *map;
 544        struct dma_chan *dmach;
 545        enum dma_data_direction dir;
 546        dma_cookie_t cookie;
 547        enum dma_status status;
 548        struct completion req_comp;
 549};
 550
 551static void mport_release_def_dma(struct kref *dma_ref)
 552{
 553        struct mport_dev *md =
 554                        container_of(dma_ref, struct mport_dev, dma_ref);
 555
 556        rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
 557        rio_release_dma(md->dma_chan);
 558        md->dma_chan = NULL;
 559}
 560
 561static void mport_release_dma(struct kref *dma_ref)
 562{
 563        struct mport_cdev_priv *priv =
 564                        container_of(dma_ref, struct mport_cdev_priv, dma_ref);
 565
 566        rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
 567        complete(&priv->comp);
 568}
 569
 570static void dma_req_free(struct kref *ref)
 571{
 572        struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
 573                        refcount);
 574        struct mport_cdev_priv *priv = req->priv;
 575
 576        dma_unmap_sg(req->dmach->device->dev,
 577                     req->sgt.sgl, req->sgt.nents, req->dir);
 578        sg_free_table(&req->sgt);
 579        if (req->page_list) {
 580                unpin_user_pages(req->page_list, req->nr_pages);
 581                kfree(req->page_list);
 582        }
 583
 584        if (req->map) {
 585                mutex_lock(&req->map->md->buf_mutex);
 586                kref_put(&req->map->ref, mport_release_mapping);
 587                mutex_unlock(&req->map->md->buf_mutex);
 588        }
 589
 590        kref_put(&priv->dma_ref, mport_release_dma);
 591
 592        kfree(req);
 593}
 594
 595static void dma_xfer_callback(void *param)
 596{
 597        struct mport_dma_req *req = (struct mport_dma_req *)param;
 598        struct mport_cdev_priv *priv = req->priv;
 599
 600        req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
 601                                               NULL, NULL);
 602        complete(&req->req_comp);
 603        kref_put(&req->refcount, dma_req_free);
 604}
 605
 606/*
 607 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
 608 *                   transfer object.
 609 * Returns pointer to DMA transaction descriptor allocated by DMA driver on
 610 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
 611 * non-NULL pointer using IS_ERR macro.
 612 */
 613static struct dma_async_tx_descriptor
 614*prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
 615        struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
 616        enum dma_ctrl_flags flags)
 617{
 618        struct rio_dma_data tx_data;
 619
 620        tx_data.sg = sgt->sgl;
 621        tx_data.sg_len = nents;
 622        tx_data.rio_addr_u = 0;
 623        tx_data.rio_addr = transfer->rio_addr;
 624        if (dir == DMA_MEM_TO_DEV) {
 625                switch (transfer->method) {
 626                case RIO_EXCHANGE_NWRITE:
 627                        tx_data.wr_type = RDW_ALL_NWRITE;
 628                        break;
 629                case RIO_EXCHANGE_NWRITE_R_ALL:
 630                        tx_data.wr_type = RDW_ALL_NWRITE_R;
 631                        break;
 632                case RIO_EXCHANGE_NWRITE_R:
 633                        tx_data.wr_type = RDW_LAST_NWRITE_R;
 634                        break;
 635                case RIO_EXCHANGE_DEFAULT:
 636                        tx_data.wr_type = RDW_DEFAULT;
 637                        break;
 638                default:
 639                        return ERR_PTR(-EINVAL);
 640                }
 641        }
 642
 643        return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
 644}
 645
 646/* Request DMA channel associated with this mport device.
 647 * Try to request DMA channel for every new process that opened given
 648 * mport. If a new DMA channel is not available use default channel
 649 * which is the first DMA channel opened on mport device.
 650 */
 651static int get_dma_channel(struct mport_cdev_priv *priv)
 652{
 653        mutex_lock(&priv->dma_lock);
 654        if (!priv->dmach) {
 655                priv->dmach = rio_request_mport_dma(priv->md->mport);
 656                if (!priv->dmach) {
 657                        /* Use default DMA channel if available */
 658                        if (priv->md->dma_chan) {
 659                                priv->dmach = priv->md->dma_chan;
 660                                kref_get(&priv->md->dma_ref);
 661                        } else {
 662                                rmcd_error("Failed to get DMA channel");
 663                                mutex_unlock(&priv->dma_lock);
 664                                return -ENODEV;
 665                        }
 666                } else if (!priv->md->dma_chan) {
 667                        /* Register default DMA channel if we do not have one */
 668                        priv->md->dma_chan = priv->dmach;
 669                        kref_init(&priv->md->dma_ref);
 670                        rmcd_debug(DMA, "Register DMA_chan %d as default",
 671                                   priv->dmach->chan_id);
 672                }
 673
 674                kref_init(&priv->dma_ref);
 675                init_completion(&priv->comp);
 676        }
 677
 678        kref_get(&priv->dma_ref);
 679        mutex_unlock(&priv->dma_lock);
 680        return 0;
 681}
 682
 683static void put_dma_channel(struct mport_cdev_priv *priv)
 684{
 685        kref_put(&priv->dma_ref, mport_release_dma);
 686}
 687
 688/*
 689 * DMA transfer functions
 690 */
 691static int do_dma_request(struct mport_dma_req *req,
 692                          struct rio_transfer_io *xfer,
 693                          enum rio_transfer_sync sync, int nents)
 694{
 695        struct mport_cdev_priv *priv;
 696        struct sg_table *sgt;
 697        struct dma_chan *chan;
 698        struct dma_async_tx_descriptor *tx;
 699        dma_cookie_t cookie;
 700        unsigned long tmo = msecs_to_jiffies(dma_timeout);
 701        enum dma_transfer_direction dir;
 702        long wret;
 703        int ret = 0;
 704
 705        priv = req->priv;
 706        sgt = &req->sgt;
 707
 708        chan = priv->dmach;
 709        dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 710
 711        rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
 712                   current->comm, task_pid_nr(current),
 713                   dev_name(&chan->dev->device),
 714                   (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
 715
 716        /* Initialize DMA transaction request */
 717        tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
 718                           DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
 719
 720        if (!tx) {
 721                rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
 722                        (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
 723                        xfer->rio_addr, xfer->length);
 724                ret = -EIO;
 725                goto err_out;
 726        } else if (IS_ERR(tx)) {
 727                ret = PTR_ERR(tx);
 728                rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
 729                        (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
 730                        xfer->rio_addr, xfer->length);
 731                goto err_out;
 732        }
 733
 734        tx->callback = dma_xfer_callback;
 735        tx->callback_param = req;
 736
 737        req->status = DMA_IN_PROGRESS;
 738        kref_get(&req->refcount);
 739
 740        cookie = dmaengine_submit(tx);
 741        req->cookie = cookie;
 742
 743        rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
 744                   (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
 745
 746        if (dma_submit_error(cookie)) {
 747                rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
 748                           cookie, xfer->rio_addr, xfer->length);
 749                kref_put(&req->refcount, dma_req_free);
 750                ret = -EIO;
 751                goto err_out;
 752        }
 753
 754        dma_async_issue_pending(chan);
 755
 756        if (sync == RIO_TRANSFER_ASYNC) {
 757                spin_lock(&priv->req_lock);
 758                list_add_tail(&req->node, &priv->async_list);
 759                spin_unlock(&priv->req_lock);
 760                return cookie;
 761        } else if (sync == RIO_TRANSFER_FAF)
 762                return 0;
 763
 764        wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
 765
 766        if (wret == 0) {
 767                /* Timeout on wait occurred */
 768                rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
 769                       current->comm, task_pid_nr(current),
 770                       (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
 771                return -ETIMEDOUT;
 772        } else if (wret == -ERESTARTSYS) {
 773                /* Wait_for_completion was interrupted by a signal but DMA may
 774                 * be in progress
 775                 */
 776                rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
 777                        current->comm, task_pid_nr(current),
 778                        (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
 779                return -EINTR;
 780        }
 781
 782        if (req->status != DMA_COMPLETE) {
 783                /* DMA transaction completion was signaled with error */
 784                rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
 785                        current->comm, task_pid_nr(current),
 786                        (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
 787                        cookie, req->status, ret);
 788                ret = -EIO;
 789        }
 790
 791err_out:
 792        return ret;
 793}
 794
 795/*
 796 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
 797 *                      the remote RapidIO device
 798 * @filp: file pointer associated with the call
 799 * @transfer_mode: DMA transfer mode
 800 * @sync: synchronization mode
 801 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
 802 *                               DMA_DEV_TO_MEM = read)
 803 * @xfer: data transfer descriptor structure
 804 */
 805static int
 806rio_dma_transfer(struct file *filp, u32 transfer_mode,
 807                 enum rio_transfer_sync sync, enum dma_data_direction dir,
 808                 struct rio_transfer_io *xfer)
 809{
 810        struct mport_cdev_priv *priv = filp->private_data;
 811        unsigned long nr_pages = 0;
 812        struct page **page_list = NULL;
 813        struct mport_dma_req *req;
 814        struct mport_dev *md = priv->md;
 815        struct dma_chan *chan;
 816        int ret;
 817        int nents;
 818
 819        if (xfer->length == 0)
 820                return -EINVAL;
 821        req = kzalloc(sizeof(*req), GFP_KERNEL);
 822        if (!req)
 823                return -ENOMEM;
 824
 825        ret = get_dma_channel(priv);
 826        if (ret) {
 827                kfree(req);
 828                return ret;
 829        }
 830        chan = priv->dmach;
 831
 832        kref_init(&req->refcount);
 833        init_completion(&req->req_comp);
 834        req->dir = dir;
 835        req->filp = filp;
 836        req->priv = priv;
 837        req->dmach = chan;
 838        req->sync = sync;
 839
 840        /*
 841         * If parameter loc_addr != NULL, we are transferring data from/to
 842         * data buffer allocated in user-space: lock in memory user-space
 843         * buffer pages and build an SG table for DMA transfer request
 844         *
 845         * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
 846         * used for DMA data transfers: build single entry SG table using
 847         * offset within the internal buffer specified by handle parameter.
 848         */
 849        if (xfer->loc_addr) {
 850                unsigned int offset;
 851                long pinned;
 852
 853                offset = lower_32_bits(offset_in_page(xfer->loc_addr));
 854                nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
 855
 856                page_list = kmalloc_array(nr_pages,
 857                                          sizeof(*page_list), GFP_KERNEL);
 858                if (page_list == NULL) {
 859                        ret = -ENOMEM;
 860                        goto err_req;
 861                }
 862
 863                pinned = pin_user_pages_fast(
 864                                (unsigned long)xfer->loc_addr & PAGE_MASK,
 865                                nr_pages,
 866                                dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
 867                                page_list);
 868
 869                if (pinned != nr_pages) {
 870                        if (pinned < 0) {
 871                                rmcd_error("pin_user_pages_fast err=%ld",
 872                                           pinned);
 873                                nr_pages = 0;
 874                        } else {
 875                                rmcd_error("pinned %ld out of %ld pages",
 876                                           pinned, nr_pages);
 877                                /*
 878                                 * Set nr_pages up to mean "how many pages to unpin, in
 879                                 * the error handler:
 880                                 */
 881                                nr_pages = pinned;
 882                        }
 883                        ret = -EFAULT;
 884                        goto err_pg;
 885                }
 886
 887                ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
 888                                        offset, xfer->length, GFP_KERNEL);
 889                if (ret) {
 890                        rmcd_error("sg_alloc_table failed with err=%d", ret);
 891                        goto err_pg;
 892                }
 893
 894                req->page_list = page_list;
 895                req->nr_pages = nr_pages;
 896        } else {
 897                dma_addr_t baddr;
 898                struct rio_mport_mapping *map;
 899
 900                baddr = (dma_addr_t)xfer->handle;
 901
 902                mutex_lock(&md->buf_mutex);
 903                list_for_each_entry(map, &md->mappings, node) {
 904                        if (baddr >= map->phys_addr &&
 905                            baddr < (map->phys_addr + map->size)) {
 906                                kref_get(&map->ref);
 907                                req->map = map;
 908                                break;
 909                        }
 910                }
 911                mutex_unlock(&md->buf_mutex);
 912
 913                if (req->map == NULL) {
 914                        ret = -ENOMEM;
 915                        goto err_req;
 916                }
 917
 918                if (xfer->length + xfer->offset > map->size) {
 919                        ret = -EINVAL;
 920                        goto err_req;
 921                }
 922
 923                ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
 924                if (unlikely(ret)) {
 925                        rmcd_error("sg_alloc_table failed for internal buf");
 926                        goto err_req;
 927                }
 928
 929                sg_set_buf(req->sgt.sgl,
 930                           map->virt_addr + (baddr - map->phys_addr) +
 931                                xfer->offset, xfer->length);
 932        }
 933
 934        nents = dma_map_sg(chan->device->dev,
 935                           req->sgt.sgl, req->sgt.nents, dir);
 936        if (nents == 0) {
 937                rmcd_error("Failed to map SG list");
 938                ret = -EFAULT;
 939                goto err_pg;
 940        }
 941
 942        ret = do_dma_request(req, xfer, sync, nents);
 943
 944        if (ret >= 0) {
 945                if (sync == RIO_TRANSFER_ASYNC)
 946                        return ret; /* return ASYNC cookie */
 947        } else {
 948                rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
 949        }
 950
 951err_pg:
 952        if (!req->page_list) {
 953                unpin_user_pages(page_list, nr_pages);
 954                kfree(page_list);
 955        }
 956err_req:
 957        kref_put(&req->refcount, dma_req_free);
 958        return ret;
 959}
 960
 961static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
 962{
 963        struct mport_cdev_priv *priv = filp->private_data;
 964        struct rio_transaction transaction;
 965        struct rio_transfer_io *transfer;
 966        enum dma_data_direction dir;
 967        int i, ret = 0;
 968        size_t size;
 969
 970        if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
 971                return -EFAULT;
 972
 973        if (transaction.count != 1) /* only single transfer for now */
 974                return -EINVAL;
 975
 976        if ((transaction.transfer_mode &
 977             priv->md->properties.transfer_mode) == 0)
 978                return -ENODEV;
 979
 980        size = array_size(sizeof(*transfer), transaction.count);
 981        transfer = vmalloc(size);
 982        if (!transfer)
 983                return -ENOMEM;
 984
 985        if (unlikely(copy_from_user(transfer,
 986                                    (void __user *)(uintptr_t)transaction.block,
 987                                    size))) {
 988                ret = -EFAULT;
 989                goto out_free;
 990        }
 991
 992        dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
 993                                        DMA_FROM_DEVICE : DMA_TO_DEVICE;
 994        for (i = 0; i < transaction.count && ret == 0; i++)
 995                ret = rio_dma_transfer(filp, transaction.transfer_mode,
 996                        transaction.sync, dir, &transfer[i]);
 997
 998        if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
 999                                  transfer, size)))
1000                ret = -EFAULT;
1001
1002out_free:
1003        vfree(transfer);
1004
1005        return ret;
1006}
1007
1008static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1009{
1010        struct mport_cdev_priv *priv;
1011        struct rio_async_tx_wait w_param;
1012        struct mport_dma_req *req;
1013        dma_cookie_t cookie;
1014        unsigned long tmo;
1015        long wret;
1016        int found = 0;
1017        int ret;
1018
1019        priv = (struct mport_cdev_priv *)filp->private_data;
1020
1021        if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1022                return -EFAULT;
1023
1024        cookie = w_param.token;
1025        if (w_param.timeout)
1026                tmo = msecs_to_jiffies(w_param.timeout);
1027        else /* Use default DMA timeout */
1028                tmo = msecs_to_jiffies(dma_timeout);
1029
1030        spin_lock(&priv->req_lock);
1031        list_for_each_entry(req, &priv->async_list, node) {
1032                if (req->cookie == cookie) {
1033                        list_del(&req->node);
1034                        found = 1;
1035                        break;
1036                }
1037        }
1038        spin_unlock(&priv->req_lock);
1039
1040        if (!found)
1041                return -EAGAIN;
1042
1043        wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1044
1045        if (wret == 0) {
1046                /* Timeout on wait occurred */
1047                rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1048                       current->comm, task_pid_nr(current),
1049                       (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1050                ret = -ETIMEDOUT;
1051                goto err_tmo;
1052        } else if (wret == -ERESTARTSYS) {
1053                /* Wait_for_completion was interrupted by a signal but DMA may
1054                 * be still in progress
1055                 */
1056                rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1057                        current->comm, task_pid_nr(current),
1058                        (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1059                ret = -EINTR;
1060                goto err_tmo;
1061        }
1062
1063        if (req->status != DMA_COMPLETE) {
1064                /* DMA transaction completion signaled with transfer error */
1065                rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1066                        current->comm, task_pid_nr(current),
1067                        (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1068                        req->status);
1069                ret = -EIO;
1070        } else
1071                ret = 0;
1072
1073        if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
1074                kref_put(&req->refcount, dma_req_free);
1075
1076        return ret;
1077
1078err_tmo:
1079        /* Return request back into async queue */
1080        spin_lock(&priv->req_lock);
1081        list_add_tail(&req->node, &priv->async_list);
1082        spin_unlock(&priv->req_lock);
1083        return ret;
1084}
1085
1086static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1087                        u64 size, struct rio_mport_mapping **mapping)
1088{
1089        struct rio_mport_mapping *map;
1090
1091        map = kzalloc(sizeof(*map), GFP_KERNEL);
1092        if (map == NULL)
1093                return -ENOMEM;
1094
1095        map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1096                                            &map->phys_addr, GFP_KERNEL);
1097        if (map->virt_addr == NULL) {
1098                kfree(map);
1099                return -ENOMEM;
1100        }
1101
1102        map->dir = MAP_DMA;
1103        map->size = size;
1104        map->filp = filp;
1105        map->md = md;
1106        kref_init(&map->ref);
1107        mutex_lock(&md->buf_mutex);
1108        list_add_tail(&map->node, &md->mappings);
1109        mutex_unlock(&md->buf_mutex);
1110        *mapping = map;
1111
1112        return 0;
1113}
1114
1115static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1116{
1117        struct mport_cdev_priv *priv = filp->private_data;
1118        struct mport_dev *md = priv->md;
1119        struct rio_dma_mem map;
1120        struct rio_mport_mapping *mapping = NULL;
1121        int ret;
1122
1123        if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1124                return -EFAULT;
1125
1126        ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1127        if (ret)
1128                return ret;
1129
1130        map.dma_handle = mapping->phys_addr;
1131
1132        if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1133                mutex_lock(&md->buf_mutex);
1134                kref_put(&mapping->ref, mport_release_mapping);
1135                mutex_unlock(&md->buf_mutex);
1136                return -EFAULT;
1137        }
1138
1139        return 0;
1140}
1141
1142static int rio_mport_free_dma(struct file *filp, void __user *arg)
1143{
1144        struct mport_cdev_priv *priv = filp->private_data;
1145        struct mport_dev *md = priv->md;
1146        u64 handle;
1147        int ret = -EFAULT;
1148        struct rio_mport_mapping *map, *_map;
1149
1150        if (copy_from_user(&handle, arg, sizeof(handle)))
1151                return -EFAULT;
1152        rmcd_debug(EXIT, "filp=%p", filp);
1153
1154        mutex_lock(&md->buf_mutex);
1155        list_for_each_entry_safe(map, _map, &md->mappings, node) {
1156                if (map->dir == MAP_DMA && map->phys_addr == handle &&
1157                    map->filp == filp) {
1158                        kref_put(&map->ref, mport_release_mapping);
1159                        ret = 0;
1160                        break;
1161                }
1162        }
1163        mutex_unlock(&md->buf_mutex);
1164
1165        if (ret == -EFAULT) {
1166                rmcd_debug(DMA, "ERR no matching mapping");
1167                return ret;
1168        }
1169
1170        return 0;
1171}
1172#else
1173static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1174{
1175        return -ENODEV;
1176}
1177
1178static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1179{
1180        return -ENODEV;
1181}
1182
1183static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1184{
1185        return -ENODEV;
1186}
1187
1188static int rio_mport_free_dma(struct file *filp, void __user *arg)
1189{
1190        return -ENODEV;
1191}
1192#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1193
1194/*
1195 * Inbound/outbound memory mapping functions
1196 */
1197
1198static int
1199rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1200                                u64 raddr, u64 size,
1201                                struct rio_mport_mapping **mapping)
1202{
1203        struct rio_mport *mport = md->mport;
1204        struct rio_mport_mapping *map;
1205        int ret;
1206
1207        /* rio_map_inb_region() accepts u32 size */
1208        if (size > 0xffffffff)
1209                return -EINVAL;
1210
1211        map = kzalloc(sizeof(*map), GFP_KERNEL);
1212        if (map == NULL)
1213                return -ENOMEM;
1214
1215        map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1216                                            &map->phys_addr, GFP_KERNEL);
1217        if (map->virt_addr == NULL) {
1218                ret = -ENOMEM;
1219                goto err_dma_alloc;
1220        }
1221
1222        if (raddr == RIO_MAP_ANY_ADDR)
1223                raddr = map->phys_addr;
1224        ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1225        if (ret < 0)
1226                goto err_map_inb;
1227
1228        map->dir = MAP_INBOUND;
1229        map->rio_addr = raddr;
1230        map->size = size;
1231        map->filp = filp;
1232        map->md = md;
1233        kref_init(&map->ref);
1234        mutex_lock(&md->buf_mutex);
1235        list_add_tail(&map->node, &md->mappings);
1236        mutex_unlock(&md->buf_mutex);
1237        *mapping = map;
1238        return 0;
1239
1240err_map_inb:
1241        dma_free_coherent(mport->dev.parent, size,
1242                          map->virt_addr, map->phys_addr);
1243err_dma_alloc:
1244        kfree(map);
1245        return ret;
1246}
1247
1248static int
1249rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1250                              u64 raddr, u64 size,
1251                              struct rio_mport_mapping **mapping)
1252{
1253        struct rio_mport_mapping *map;
1254        int err = -ENOMEM;
1255
1256        if (raddr == RIO_MAP_ANY_ADDR)
1257                goto get_new;
1258
1259        mutex_lock(&md->buf_mutex);
1260        list_for_each_entry(map, &md->mappings, node) {
1261                if (map->dir != MAP_INBOUND)
1262                        continue;
1263                if (raddr == map->rio_addr && size == map->size) {
1264                        /* allow exact match only */
1265                        *mapping = map;
1266                        err = 0;
1267                        break;
1268                } else if (raddr < (map->rio_addr + map->size - 1) &&
1269                           (raddr + size) > map->rio_addr) {
1270                        err = -EBUSY;
1271                        break;
1272                }
1273        }
1274        mutex_unlock(&md->buf_mutex);
1275
1276        if (err != -ENOMEM)
1277                return err;
1278get_new:
1279        /* not found, create new */
1280        return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1281}
1282
1283static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1284{
1285        struct mport_cdev_priv *priv = filp->private_data;
1286        struct mport_dev *md = priv->md;
1287        struct rio_mmap map;
1288        struct rio_mport_mapping *mapping = NULL;
1289        int ret;
1290
1291        if (!md->mport->ops->map_inb)
1292                return -EPROTONOSUPPORT;
1293        if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1294                return -EFAULT;
1295
1296        rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1297
1298        ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1299                                            map.length, &mapping);
1300        if (ret)
1301                return ret;
1302
1303        map.handle = mapping->phys_addr;
1304        map.rio_addr = mapping->rio_addr;
1305
1306        if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1307                /* Delete mapping if it was created by this request */
1308                if (ret == 0 && mapping->filp == filp) {
1309                        mutex_lock(&md->buf_mutex);
1310                        kref_put(&mapping->ref, mport_release_mapping);
1311                        mutex_unlock(&md->buf_mutex);
1312                }
1313                return -EFAULT;
1314        }
1315
1316        return 0;
1317}
1318
1319/*
1320 * rio_mport_inbound_free() - unmap from RapidIO address space and free
1321 *                    previously allocated inbound DMA coherent buffer
1322 * @priv: driver private data
1323 * @arg:  buffer handle returned by allocation routine
1324 */
1325static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1326{
1327        struct mport_cdev_priv *priv = filp->private_data;
1328        struct mport_dev *md = priv->md;
1329        u64 handle;
1330        struct rio_mport_mapping *map, *_map;
1331
1332        rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1333
1334        if (!md->mport->ops->unmap_inb)
1335                return -EPROTONOSUPPORT;
1336
1337        if (copy_from_user(&handle, arg, sizeof(handle)))
1338                return -EFAULT;
1339
1340        mutex_lock(&md->buf_mutex);
1341        list_for_each_entry_safe(map, _map, &md->mappings, node) {
1342                if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1343                        if (map->filp == filp) {
1344                                map->filp = NULL;
1345                                kref_put(&map->ref, mport_release_mapping);
1346                        }
1347                        break;
1348                }
1349        }
1350        mutex_unlock(&md->buf_mutex);
1351
1352        return 0;
1353}
1354
1355/*
1356 * maint_port_idx_get() - Get the port index of the mport instance
1357 * @priv: driver private data
1358 * @arg:  port index
1359 */
1360static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1361{
1362        struct mport_dev *md = priv->md;
1363        u32 port_idx = md->mport->index;
1364
1365        rmcd_debug(MPORT, "port_index=%d", port_idx);
1366
1367        if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1368                return -EFAULT;
1369
1370        return 0;
1371}
1372
1373static int rio_mport_add_event(struct mport_cdev_priv *priv,
1374                               struct rio_event *event)
1375{
1376        int overflow;
1377
1378        if (!(priv->event_mask & event->header))
1379                return -EACCES;
1380
1381        spin_lock(&priv->fifo_lock);
1382        overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1383                || kfifo_in(&priv->event_fifo, (unsigned char *)event,
1384                        sizeof(*event)) != sizeof(*event);
1385        spin_unlock(&priv->fifo_lock);
1386
1387        wake_up_interruptible(&priv->event_rx_wait);
1388
1389        if (overflow) {
1390                dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1391                return -EBUSY;
1392        }
1393
1394        return 0;
1395}
1396
1397static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1398                                       u16 src, u16 dst, u16 info)
1399{
1400        struct mport_dev *data = dev_id;
1401        struct mport_cdev_priv *priv;
1402        struct rio_mport_db_filter *db_filter;
1403        struct rio_event event;
1404        int handled;
1405
1406        event.header = RIO_DOORBELL;
1407        event.u.doorbell.rioid = src;
1408        event.u.doorbell.payload = info;
1409
1410        handled = 0;
1411        spin_lock(&data->db_lock);
1412        list_for_each_entry(db_filter, &data->doorbells, data_node) {
1413                if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1414                      db_filter->filter.rioid == src)) &&
1415                      info >= db_filter->filter.low &&
1416                      info <= db_filter->filter.high) {
1417                        priv = db_filter->priv;
1418                        rio_mport_add_event(priv, &event);
1419                        handled = 1;
1420                }
1421        }
1422        spin_unlock(&data->db_lock);
1423
1424        if (!handled)
1425                dev_warn(&data->dev,
1426                        "%s: spurious DB received from 0x%x, info=0x%04x\n",
1427                        __func__, src, info);
1428}
1429
1430static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1431                                   void __user *arg)
1432{
1433        struct mport_dev *md = priv->md;
1434        struct rio_mport_db_filter *db_filter;
1435        struct rio_doorbell_filter filter;
1436        unsigned long flags;
1437        int ret;
1438
1439        if (copy_from_user(&filter, arg, sizeof(filter)))
1440                return -EFAULT;
1441
1442        if (filter.low > filter.high)
1443                return -EINVAL;
1444
1445        ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1446                                    rio_mport_doorbell_handler);
1447        if (ret) {
1448                rmcd_error("%s failed to register IBDB, err=%d",
1449                           dev_name(&md->dev), ret);
1450                return ret;
1451        }
1452
1453        db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1454        if (db_filter == NULL) {
1455                rio_release_inb_dbell(md->mport, filter.low, filter.high);
1456                return -ENOMEM;
1457        }
1458
1459        db_filter->filter = filter;
1460        db_filter->priv = priv;
1461        spin_lock_irqsave(&md->db_lock, flags);
1462        list_add_tail(&db_filter->priv_node, &priv->db_filters);
1463        list_add_tail(&db_filter->data_node, &md->doorbells);
1464        spin_unlock_irqrestore(&md->db_lock, flags);
1465
1466        return 0;
1467}
1468
1469static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1470{
1471        list_del(&db_filter->data_node);
1472        list_del(&db_filter->priv_node);
1473        kfree(db_filter);
1474}
1475
1476static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1477                                      void __user *arg)
1478{
1479        struct rio_mport_db_filter *db_filter;
1480        struct rio_doorbell_filter filter;
1481        unsigned long flags;
1482        int ret = -EINVAL;
1483
1484        if (copy_from_user(&filter, arg, sizeof(filter)))
1485                return -EFAULT;
1486
1487        if (filter.low > filter.high)
1488                return -EINVAL;
1489
1490        spin_lock_irqsave(&priv->md->db_lock, flags);
1491        list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1492                if (db_filter->filter.rioid == filter.rioid &&
1493                    db_filter->filter.low == filter.low &&
1494                    db_filter->filter.high == filter.high) {
1495                        rio_mport_delete_db_filter(db_filter);
1496                        ret = 0;
1497                        break;
1498                }
1499        }
1500        spin_unlock_irqrestore(&priv->md->db_lock, flags);
1501
1502        if (!ret)
1503                rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1504
1505        return ret;
1506}
1507
1508static int rio_mport_match_pw(union rio_pw_msg *msg,
1509                              struct rio_pw_filter *filter)
1510{
1511        if ((msg->em.comptag & filter->mask) < filter->low ||
1512                (msg->em.comptag & filter->mask) > filter->high)
1513                return 0;
1514        return 1;
1515}
1516
1517static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1518                                union rio_pw_msg *msg, int step)
1519{
1520        struct mport_dev *md = context;
1521        struct mport_cdev_priv *priv;
1522        struct rio_mport_pw_filter *pw_filter;
1523        struct rio_event event;
1524        int handled;
1525
1526        event.header = RIO_PORTWRITE;
1527        memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1528
1529        handled = 0;
1530        spin_lock(&md->pw_lock);
1531        list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1532                if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1533                        priv = pw_filter->priv;
1534                        rio_mport_add_event(priv, &event);
1535                        handled = 1;
1536                }
1537        }
1538        spin_unlock(&md->pw_lock);
1539
1540        if (!handled) {
1541                printk_ratelimited(KERN_WARNING DRV_NAME
1542                        ": mport%d received spurious PW from 0x%08x\n",
1543                        mport->id, msg->em.comptag);
1544        }
1545
1546        return 0;
1547}
1548
1549static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1550                                   void __user *arg)
1551{
1552        struct mport_dev *md = priv->md;
1553        struct rio_mport_pw_filter *pw_filter;
1554        struct rio_pw_filter filter;
1555        unsigned long flags;
1556        int hadd = 0;
1557
1558        if (copy_from_user(&filter, arg, sizeof(filter)))
1559                return -EFAULT;
1560
1561        pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1562        if (pw_filter == NULL)
1563                return -ENOMEM;
1564
1565        pw_filter->filter = filter;
1566        pw_filter->priv = priv;
1567        spin_lock_irqsave(&md->pw_lock, flags);
1568        if (list_empty(&md->portwrites))
1569                hadd = 1;
1570        list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1571        list_add_tail(&pw_filter->md_node, &md->portwrites);
1572        spin_unlock_irqrestore(&md->pw_lock, flags);
1573
1574        if (hadd) {
1575                int ret;
1576
1577                ret = rio_add_mport_pw_handler(md->mport, md,
1578                                               rio_mport_pw_handler);
1579                if (ret) {
1580                        dev_err(&md->dev,
1581                                "%s: failed to add IB_PW handler, err=%d\n",
1582                                __func__, ret);
1583                        return ret;
1584                }
1585                rio_pw_enable(md->mport, 1);
1586        }
1587
1588        return 0;
1589}
1590
1591static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1592{
1593        list_del(&pw_filter->md_node);
1594        list_del(&pw_filter->priv_node);
1595        kfree(pw_filter);
1596}
1597
1598static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1599                                     struct rio_pw_filter *b)
1600{
1601        if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1602                return 1;
1603        return 0;
1604}
1605
1606static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1607                                      void __user *arg)
1608{
1609        struct mport_dev *md = priv->md;
1610        struct rio_mport_pw_filter *pw_filter;
1611        struct rio_pw_filter filter;
1612        unsigned long flags;
1613        int ret = -EINVAL;
1614        int hdel = 0;
1615
1616        if (copy_from_user(&filter, arg, sizeof(filter)))
1617                return -EFAULT;
1618
1619        spin_lock_irqsave(&md->pw_lock, flags);
1620        list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1621                if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1622                        rio_mport_delete_pw_filter(pw_filter);
1623                        ret = 0;
1624                        break;
1625                }
1626        }
1627
1628        if (list_empty(&md->portwrites))
1629                hdel = 1;
1630        spin_unlock_irqrestore(&md->pw_lock, flags);
1631
1632        if (hdel) {
1633                rio_del_mport_pw_handler(md->mport, priv->md,
1634                                         rio_mport_pw_handler);
1635                rio_pw_enable(md->mport, 0);
1636        }
1637
1638        return ret;
1639}
1640
1641/*
1642 * rio_release_dev - release routine for kernel RIO device object
1643 * @dev: kernel device object associated with a RIO device structure
1644 *
1645 * Frees a RIO device struct associated a RIO device struct.
1646 * The RIO device struct is freed.
1647 */
1648static void rio_release_dev(struct device *dev)
1649{
1650        struct rio_dev *rdev;
1651
1652        rdev = to_rio_dev(dev);
1653        pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1654        kfree(rdev);
1655}
1656
1657
1658static void rio_release_net(struct device *dev)
1659{
1660        struct rio_net *net;
1661
1662        net = to_rio_net(dev);
1663        rmcd_debug(RDEV, "net_%d", net->id);
1664        kfree(net);
1665}
1666
1667
1668/*
1669 * rio_mport_add_riodev - creates a kernel RIO device object
1670 *
1671 * Allocates a RIO device data structure and initializes required fields based
1672 * on device's configuration space contents.
1673 * If the device has switch capabilities, then a switch specific portion is
1674 * allocated and configured.
1675 */
1676static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1677                                   void __user *arg)
1678{
1679        struct mport_dev *md = priv->md;
1680        struct rio_rdev_info dev_info;
1681        struct rio_dev *rdev;
1682        struct rio_switch *rswitch = NULL;
1683        struct rio_mport *mport;
1684        struct device *dev;
1685        size_t size;
1686        u32 rval;
1687        u32 swpinfo = 0;
1688        u16 destid;
1689        u8 hopcount;
1690        int err;
1691
1692        if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1693                return -EFAULT;
1694        dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1695
1696        rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1697                   dev_info.comptag, dev_info.destid, dev_info.hopcount);
1698
1699        dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
1700        if (dev) {
1701                rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1702                put_device(dev);
1703                return -EEXIST;
1704        }
1705
1706        size = sizeof(*rdev);
1707        mport = md->mport;
1708        destid = dev_info.destid;
1709        hopcount = dev_info.hopcount;
1710
1711        if (rio_mport_read_config_32(mport, destid, hopcount,
1712                                     RIO_PEF_CAR, &rval))
1713                return -EIO;
1714
1715        if (rval & RIO_PEF_SWITCH) {
1716                rio_mport_read_config_32(mport, destid, hopcount,
1717                                         RIO_SWP_INFO_CAR, &swpinfo);
1718                size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo));
1719        }
1720
1721        rdev = kzalloc(size, GFP_KERNEL);
1722        if (rdev == NULL)
1723                return -ENOMEM;
1724
1725        if (mport->net == NULL) {
1726                struct rio_net *net;
1727
1728                net = rio_alloc_net(mport);
1729                if (!net) {
1730                        err = -ENOMEM;
1731                        rmcd_debug(RDEV, "failed to allocate net object");
1732                        goto cleanup;
1733                }
1734
1735                net->id = mport->id;
1736                net->hport = mport;
1737                dev_set_name(&net->dev, "rnet_%d", net->id);
1738                net->dev.parent = &mport->dev;
1739                net->dev.release = rio_release_net;
1740                err = rio_add_net(net);
1741                if (err) {
1742                        rmcd_debug(RDEV, "failed to register net, err=%d", err);
1743                        kfree(net);
1744                        goto cleanup;
1745                }
1746        }
1747
1748        rdev->net = mport->net;
1749        rdev->pef = rval;
1750        rdev->swpinfo = swpinfo;
1751        rio_mport_read_config_32(mport, destid, hopcount,
1752                                 RIO_DEV_ID_CAR, &rval);
1753        rdev->did = rval >> 16;
1754        rdev->vid = rval & 0xffff;
1755        rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1756                                 &rdev->device_rev);
1757        rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1758                                 &rval);
1759        rdev->asm_did = rval >> 16;
1760        rdev->asm_vid = rval & 0xffff;
1761        rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1762                                 &rval);
1763        rdev->asm_rev = rval >> 16;
1764
1765        if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1766                rdev->efptr = rval & 0xffff;
1767                rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
1768                                                hopcount, &rdev->phys_rmap);
1769
1770                rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1771                                                hopcount, RIO_EFB_ERR_MGMNT);
1772        }
1773
1774        rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1775                                 &rdev->src_ops);
1776        rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1777                                 &rdev->dst_ops);
1778
1779        rdev->comp_tag = dev_info.comptag;
1780        rdev->destid = destid;
1781        /* hopcount is stored as specified by a caller, regardles of EP or SW */
1782        rdev->hopcount = hopcount;
1783
1784        if (rdev->pef & RIO_PEF_SWITCH) {
1785                rswitch = rdev->rswitch;
1786                rswitch->route_table = NULL;
1787        }
1788
1789        if (strlen(dev_info.name))
1790                dev_set_name(&rdev->dev, "%s", dev_info.name);
1791        else if (rdev->pef & RIO_PEF_SWITCH)
1792                dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1793                             rdev->comp_tag & RIO_CTAG_UDEVID);
1794        else
1795                dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1796                             rdev->comp_tag & RIO_CTAG_UDEVID);
1797
1798        INIT_LIST_HEAD(&rdev->net_list);
1799        rdev->dev.parent = &mport->net->dev;
1800        rio_attach_device(rdev);
1801        rdev->dev.release = rio_release_dev;
1802
1803        if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1804                rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1805                                   0, 0xffff);
1806        err = rio_add_device(rdev);
1807        if (err)
1808                goto cleanup;
1809        rio_dev_get(rdev);
1810
1811        return 0;
1812cleanup:
1813        kfree(rdev);
1814        return err;
1815}
1816
1817static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1818{
1819        struct rio_rdev_info dev_info;
1820        struct rio_dev *rdev = NULL;
1821        struct device  *dev;
1822        struct rio_mport *mport;
1823        struct rio_net *net;
1824
1825        if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1826                return -EFAULT;
1827        dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1828
1829        mport = priv->md->mport;
1830
1831        /* If device name is specified, removal by name has priority */
1832        if (strlen(dev_info.name)) {
1833                dev = bus_find_device_by_name(&rio_bus_type, NULL,
1834                                              dev_info.name);
1835                if (dev)
1836                        rdev = to_rio_dev(dev);
1837        } else {
1838                do {
1839                        rdev = rio_get_comptag(dev_info.comptag, rdev);
1840                        if (rdev && rdev->dev.parent == &mport->net->dev &&
1841                            rdev->destid == dev_info.destid &&
1842                            rdev->hopcount == dev_info.hopcount)
1843                                break;
1844                } while (rdev);
1845        }
1846
1847        if (!rdev) {
1848                rmcd_debug(RDEV,
1849                        "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1850                        dev_info.name, dev_info.comptag, dev_info.destid,
1851                        dev_info.hopcount);
1852                return -ENODEV;
1853        }
1854
1855        net = rdev->net;
1856        rio_dev_put(rdev);
1857        rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1858
1859        if (list_empty(&net->devices)) {
1860                rio_free_net(net);
1861                mport->net = NULL;
1862        }
1863
1864        return 0;
1865}
1866
1867/*
1868 * Mport cdev management
1869 */
1870
1871/*
1872 * mport_cdev_open() - Open character device (mport)
1873 */
1874static int mport_cdev_open(struct inode *inode, struct file *filp)
1875{
1876        int ret;
1877        int minor = iminor(inode);
1878        struct mport_dev *chdev;
1879        struct mport_cdev_priv *priv;
1880
1881        /* Test for valid device */
1882        if (minor >= RIO_MAX_MPORTS) {
1883                rmcd_error("Invalid minor device number");
1884                return -EINVAL;
1885        }
1886
1887        chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1888
1889        rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1890
1891        if (atomic_read(&chdev->active) == 0)
1892                return -ENODEV;
1893
1894        get_device(&chdev->dev);
1895
1896        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1897        if (!priv) {
1898                put_device(&chdev->dev);
1899                return -ENOMEM;
1900        }
1901
1902        priv->md = chdev;
1903
1904        mutex_lock(&chdev->file_mutex);
1905        list_add_tail(&priv->list, &chdev->file_list);
1906        mutex_unlock(&chdev->file_mutex);
1907
1908        INIT_LIST_HEAD(&priv->db_filters);
1909        INIT_LIST_HEAD(&priv->pw_filters);
1910        spin_lock_init(&priv->fifo_lock);
1911        init_waitqueue_head(&priv->event_rx_wait);
1912        ret = kfifo_alloc(&priv->event_fifo,
1913                          sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1914                          GFP_KERNEL);
1915        if (ret < 0) {
1916                dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1917                ret = -ENOMEM;
1918                goto err_fifo;
1919        }
1920
1921#ifdef CONFIG_RAPIDIO_DMA_ENGINE
1922        INIT_LIST_HEAD(&priv->async_list);
1923        spin_lock_init(&priv->req_lock);
1924        mutex_init(&priv->dma_lock);
1925#endif
1926
1927        filp->private_data = priv;
1928        goto out;
1929err_fifo:
1930        kfree(priv);
1931out:
1932        return ret;
1933}
1934
1935static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1936{
1937        struct mport_cdev_priv *priv = filp->private_data;
1938
1939        return fasync_helper(fd, filp, mode, &priv->async_queue);
1940}
1941
1942#ifdef CONFIG_RAPIDIO_DMA_ENGINE
1943static void mport_cdev_release_dma(struct file *filp)
1944{
1945        struct mport_cdev_priv *priv = filp->private_data;
1946        struct mport_dev *md;
1947        struct mport_dma_req *req, *req_next;
1948        unsigned long tmo = msecs_to_jiffies(dma_timeout);
1949        long wret;
1950        LIST_HEAD(list);
1951
1952        rmcd_debug(EXIT, "from filp=%p %s(%d)",
1953                   filp, current->comm, task_pid_nr(current));
1954
1955        if (!priv->dmach) {
1956                rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
1957                return;
1958        }
1959
1960        md = priv->md;
1961
1962        spin_lock(&priv->req_lock);
1963        if (!list_empty(&priv->async_list)) {
1964                rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
1965                           filp, current->comm, task_pid_nr(current));
1966                list_splice_init(&priv->async_list, &list);
1967        }
1968        spin_unlock(&priv->req_lock);
1969
1970        if (!list_empty(&list)) {
1971                rmcd_debug(EXIT, "temp list not empty");
1972                list_for_each_entry_safe(req, req_next, &list, node) {
1973                        rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
1974                                   req->filp, req->cookie,
1975                                   completion_done(&req->req_comp)?"yes":"no");
1976                        list_del(&req->node);
1977                        kref_put(&req->refcount, dma_req_free);
1978                }
1979        }
1980
1981        put_dma_channel(priv);
1982        wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
1983
1984        if (wret <= 0) {
1985                rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
1986                        current->comm, task_pid_nr(current), wret);
1987        }
1988
1989        if (priv->dmach != priv->md->dma_chan) {
1990                rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
1991                           filp, current->comm, task_pid_nr(current));
1992                rio_release_dma(priv->dmach);
1993        } else {
1994                rmcd_debug(EXIT, "Adjust default DMA channel refcount");
1995                kref_put(&md->dma_ref, mport_release_def_dma);
1996        }
1997
1998        priv->dmach = NULL;
1999}
2000#else
2001#define mport_cdev_release_dma(priv) do {} while (0)
2002#endif
2003
2004/*
2005 * mport_cdev_release() - Release character device
2006 */
2007static int mport_cdev_release(struct inode *inode, struct file *filp)
2008{
2009        struct mport_cdev_priv *priv = filp->private_data;
2010        struct mport_dev *chdev;
2011        struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
2012        struct rio_mport_db_filter *db_filter, *db_filter_next;
2013        struct rio_mport_mapping *map, *_map;
2014        unsigned long flags;
2015
2016        rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2017
2018        chdev = priv->md;
2019        mport_cdev_release_dma(filp);
2020
2021        priv->event_mask = 0;
2022
2023        spin_lock_irqsave(&chdev->pw_lock, flags);
2024        if (!list_empty(&priv->pw_filters)) {
2025                list_for_each_entry_safe(pw_filter, pw_filter_next,
2026                                         &priv->pw_filters, priv_node)
2027                        rio_mport_delete_pw_filter(pw_filter);
2028        }
2029        spin_unlock_irqrestore(&chdev->pw_lock, flags);
2030
2031        spin_lock_irqsave(&chdev->db_lock, flags);
2032        list_for_each_entry_safe(db_filter, db_filter_next,
2033                                 &priv->db_filters, priv_node) {
2034                rio_mport_delete_db_filter(db_filter);
2035        }
2036        spin_unlock_irqrestore(&chdev->db_lock, flags);
2037
2038        kfifo_free(&priv->event_fifo);
2039
2040        mutex_lock(&chdev->buf_mutex);
2041        list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2042                if (map->filp == filp) {
2043                        rmcd_debug(EXIT, "release mapping %p filp=%p",
2044                                   map->virt_addr, filp);
2045                        kref_put(&map->ref, mport_release_mapping);
2046                }
2047        }
2048        mutex_unlock(&chdev->buf_mutex);
2049
2050        mport_cdev_fasync(-1, filp, 0);
2051        filp->private_data = NULL;
2052        mutex_lock(&chdev->file_mutex);
2053        list_del(&priv->list);
2054        mutex_unlock(&chdev->file_mutex);
2055        put_device(&chdev->dev);
2056        kfree(priv);
2057        return 0;
2058}
2059
2060/*
2061 * mport_cdev_ioctl() - IOCTLs for character device
2062 */
2063static long mport_cdev_ioctl(struct file *filp,
2064                unsigned int cmd, unsigned long arg)
2065{
2066        int err = -EINVAL;
2067        struct mport_cdev_priv *data = filp->private_data;
2068        struct mport_dev *md = data->md;
2069
2070        if (atomic_read(&md->active) == 0)
2071                return -ENODEV;
2072
2073        switch (cmd) {
2074        case RIO_MPORT_MAINT_READ_LOCAL:
2075                return rio_mport_maint_rd(data, (void __user *)arg, 1);
2076        case RIO_MPORT_MAINT_WRITE_LOCAL:
2077                return rio_mport_maint_wr(data, (void __user *)arg, 1);
2078        case RIO_MPORT_MAINT_READ_REMOTE:
2079                return rio_mport_maint_rd(data, (void __user *)arg, 0);
2080        case RIO_MPORT_MAINT_WRITE_REMOTE:
2081                return rio_mport_maint_wr(data, (void __user *)arg, 0);
2082        case RIO_MPORT_MAINT_HDID_SET:
2083                return maint_hdid_set(data, (void __user *)arg);
2084        case RIO_MPORT_MAINT_COMPTAG_SET:
2085                return maint_comptag_set(data, (void __user *)arg);
2086        case RIO_MPORT_MAINT_PORT_IDX_GET:
2087                return maint_port_idx_get(data, (void __user *)arg);
2088        case RIO_MPORT_GET_PROPERTIES:
2089                md->properties.hdid = md->mport->host_deviceid;
2090                if (copy_to_user((void __user *)arg, &(md->properties),
2091                                 sizeof(md->properties)))
2092                        return -EFAULT;
2093                return 0;
2094        case RIO_ENABLE_DOORBELL_RANGE:
2095                return rio_mport_add_db_filter(data, (void __user *)arg);
2096        case RIO_DISABLE_DOORBELL_RANGE:
2097                return rio_mport_remove_db_filter(data, (void __user *)arg);
2098        case RIO_ENABLE_PORTWRITE_RANGE:
2099                return rio_mport_add_pw_filter(data, (void __user *)arg);
2100        case RIO_DISABLE_PORTWRITE_RANGE:
2101                return rio_mport_remove_pw_filter(data, (void __user *)arg);
2102        case RIO_SET_EVENT_MASK:
2103                data->event_mask = (u32)arg;
2104                return 0;
2105        case RIO_GET_EVENT_MASK:
2106                if (copy_to_user((void __user *)arg, &data->event_mask,
2107                                    sizeof(u32)))
2108                        return -EFAULT;
2109                return 0;
2110        case RIO_MAP_OUTBOUND:
2111                return rio_mport_obw_map(filp, (void __user *)arg);
2112        case RIO_MAP_INBOUND:
2113                return rio_mport_map_inbound(filp, (void __user *)arg);
2114        case RIO_UNMAP_OUTBOUND:
2115                return rio_mport_obw_free(filp, (void __user *)arg);
2116        case RIO_UNMAP_INBOUND:
2117                return rio_mport_inbound_free(filp, (void __user *)arg);
2118        case RIO_ALLOC_DMA:
2119                return rio_mport_alloc_dma(filp, (void __user *)arg);
2120        case RIO_FREE_DMA:
2121                return rio_mport_free_dma(filp, (void __user *)arg);
2122        case RIO_WAIT_FOR_ASYNC:
2123                return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2124        case RIO_TRANSFER:
2125                return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2126        case RIO_DEV_ADD:
2127                return rio_mport_add_riodev(data, (void __user *)arg);
2128        case RIO_DEV_DEL:
2129                return rio_mport_del_riodev(data, (void __user *)arg);
2130        default:
2131                break;
2132        }
2133
2134        return err;
2135}
2136
2137/*
2138 * mport_release_mapping - free mapping resources and info structure
2139 * @ref: a pointer to the kref within struct rio_mport_mapping
2140 *
2141 * NOTE: Shall be called while holding buf_mutex.
2142 */
2143static void mport_release_mapping(struct kref *ref)
2144{
2145        struct rio_mport_mapping *map =
2146                        container_of(ref, struct rio_mport_mapping, ref);
2147        struct rio_mport *mport = map->md->mport;
2148
2149        rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2150                   map->dir, map->virt_addr,
2151                   &map->phys_addr, mport->name);
2152
2153        list_del(&map->node);
2154
2155        switch (map->dir) {
2156        case MAP_INBOUND:
2157                rio_unmap_inb_region(mport, map->phys_addr);
2158                fallthrough;
2159        case MAP_DMA:
2160                dma_free_coherent(mport->dev.parent, map->size,
2161                                  map->virt_addr, map->phys_addr);
2162                break;
2163        case MAP_OUTBOUND:
2164                rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2165                break;
2166        }
2167        kfree(map);
2168}
2169
2170static void mport_mm_open(struct vm_area_struct *vma)
2171{
2172        struct rio_mport_mapping *map = vma->vm_private_data;
2173
2174        rmcd_debug(MMAP, "%pad", &map->phys_addr);
2175        kref_get(&map->ref);
2176}
2177
2178static void mport_mm_close(struct vm_area_struct *vma)
2179{
2180        struct rio_mport_mapping *map = vma->vm_private_data;
2181
2182        rmcd_debug(MMAP, "%pad", &map->phys_addr);
2183        mutex_lock(&map->md->buf_mutex);
2184        kref_put(&map->ref, mport_release_mapping);
2185        mutex_unlock(&map->md->buf_mutex);
2186}
2187
2188static const struct vm_operations_struct vm_ops = {
2189        .open = mport_mm_open,
2190        .close = mport_mm_close,
2191};
2192
2193static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2194{
2195        struct mport_cdev_priv *priv = filp->private_data;
2196        struct mport_dev *md;
2197        size_t size = vma->vm_end - vma->vm_start;
2198        dma_addr_t baddr;
2199        unsigned long offset;
2200        int found = 0, ret;
2201        struct rio_mport_mapping *map;
2202
2203        rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2204                   (unsigned int)size, vma->vm_pgoff);
2205
2206        md = priv->md;
2207        baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2208
2209        mutex_lock(&md->buf_mutex);
2210        list_for_each_entry(map, &md->mappings, node) {
2211                if (baddr >= map->phys_addr &&
2212                    baddr < (map->phys_addr + map->size)) {
2213                        found = 1;
2214                        break;
2215                }
2216        }
2217        mutex_unlock(&md->buf_mutex);
2218
2219        if (!found)
2220                return -ENOMEM;
2221
2222        offset = baddr - map->phys_addr;
2223
2224        if (size + offset > map->size)
2225                return -EINVAL;
2226
2227        vma->vm_pgoff = offset >> PAGE_SHIFT;
2228        rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2229
2230        if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2231                ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2232                                map->virt_addr, map->phys_addr, map->size);
2233        else if (map->dir == MAP_OUTBOUND) {
2234                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2235                ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2236        } else {
2237                rmcd_error("Attempt to mmap unsupported mapping type");
2238                ret = -EIO;
2239        }
2240
2241        if (!ret) {
2242                vma->vm_private_data = map;
2243                vma->vm_ops = &vm_ops;
2244                mport_mm_open(vma);
2245        } else {
2246                rmcd_error("MMAP exit with err=%d", ret);
2247        }
2248
2249        return ret;
2250}
2251
2252static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
2253{
2254        struct mport_cdev_priv *priv = filp->private_data;
2255
2256        poll_wait(filp, &priv->event_rx_wait, wait);
2257        if (kfifo_len(&priv->event_fifo))
2258                return EPOLLIN | EPOLLRDNORM;
2259
2260        return 0;
2261}
2262
2263static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2264                        loff_t *ppos)
2265{
2266        struct mport_cdev_priv *priv = filp->private_data;
2267        int copied;
2268        ssize_t ret;
2269
2270        if (!count)
2271                return 0;
2272
2273        if (kfifo_is_empty(&priv->event_fifo) &&
2274            (filp->f_flags & O_NONBLOCK))
2275                return -EAGAIN;
2276
2277        if (count % sizeof(struct rio_event))
2278                return -EINVAL;
2279
2280        ret = wait_event_interruptible(priv->event_rx_wait,
2281                                        kfifo_len(&priv->event_fifo) != 0);
2282        if (ret)
2283                return ret;
2284
2285        while (ret < count) {
2286                if (kfifo_to_user(&priv->event_fifo, buf,
2287                      sizeof(struct rio_event), &copied))
2288                        return -EFAULT;
2289                ret += copied;
2290                buf += copied;
2291        }
2292
2293        return ret;
2294}
2295
2296static ssize_t mport_write(struct file *filp, const char __user *buf,
2297                         size_t count, loff_t *ppos)
2298{
2299        struct mport_cdev_priv *priv = filp->private_data;
2300        struct rio_mport *mport = priv->md->mport;
2301        struct rio_event event;
2302        int len, ret;
2303
2304        if (!count)
2305                return 0;
2306
2307        if (count % sizeof(event))
2308                return -EINVAL;
2309
2310        len = 0;
2311        while ((count - len) >= (int)sizeof(event)) {
2312                if (copy_from_user(&event, buf, sizeof(event)))
2313                        return -EFAULT;
2314
2315                if (event.header != RIO_DOORBELL)
2316                        return -EINVAL;
2317
2318                ret = rio_mport_send_doorbell(mport,
2319                                              event.u.doorbell.rioid,
2320                                              event.u.doorbell.payload);
2321                if (ret < 0)
2322                        return ret;
2323
2324                len += sizeof(event);
2325                buf += sizeof(event);
2326        }
2327
2328        return len;
2329}
2330
2331static const struct file_operations mport_fops = {
2332        .owner          = THIS_MODULE,
2333        .open           = mport_cdev_open,
2334        .release        = mport_cdev_release,
2335        .poll           = mport_cdev_poll,
2336        .read           = mport_read,
2337        .write          = mport_write,
2338        .mmap           = mport_cdev_mmap,
2339        .fasync         = mport_cdev_fasync,
2340        .unlocked_ioctl = mport_cdev_ioctl
2341};
2342
2343/*
2344 * Character device management
2345 */
2346
2347static void mport_device_release(struct device *dev)
2348{
2349        struct mport_dev *md;
2350
2351        rmcd_debug(EXIT, "%s", dev_name(dev));
2352        md = container_of(dev, struct mport_dev, dev);
2353        kfree(md);
2354}
2355
2356/*
2357 * mport_cdev_add() - Create mport_dev from rio_mport
2358 * @mport:      RapidIO master port
2359 */
2360static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2361{
2362        int ret = 0;
2363        struct mport_dev *md;
2364        struct rio_mport_attr attr;
2365
2366        md = kzalloc(sizeof(*md), GFP_KERNEL);
2367        if (!md) {
2368                rmcd_error("Unable allocate a device object");
2369                return NULL;
2370        }
2371
2372        md->mport = mport;
2373        mutex_init(&md->buf_mutex);
2374        mutex_init(&md->file_mutex);
2375        INIT_LIST_HEAD(&md->file_list);
2376
2377        device_initialize(&md->dev);
2378        md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
2379        md->dev.class = dev_class;
2380        md->dev.parent = &mport->dev;
2381        md->dev.release = mport_device_release;
2382        dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2383        atomic_set(&md->active, 1);
2384
2385        cdev_init(&md->cdev, &mport_fops);
2386        md->cdev.owner = THIS_MODULE;
2387
2388        INIT_LIST_HEAD(&md->doorbells);
2389        spin_lock_init(&md->db_lock);
2390        INIT_LIST_HEAD(&md->portwrites);
2391        spin_lock_init(&md->pw_lock);
2392        INIT_LIST_HEAD(&md->mappings);
2393
2394        md->properties.id = mport->id;
2395        md->properties.sys_size = mport->sys_size;
2396        md->properties.hdid = mport->host_deviceid;
2397        md->properties.index = mport->index;
2398
2399        /* The transfer_mode property will be returned through mport query
2400         * interface
2401         */
2402#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2403        md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2404#else
2405        md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2406#endif
2407
2408        ret = cdev_device_add(&md->cdev, &md->dev);
2409        if (ret) {
2410                rmcd_error("Failed to register mport %d (err=%d)",
2411                       mport->id, ret);
2412                goto err_cdev;
2413        }
2414        ret = rio_query_mport(mport, &attr);
2415        if (!ret) {
2416                md->properties.flags = attr.flags;
2417                md->properties.link_speed = attr.link_speed;
2418                md->properties.link_width = attr.link_width;
2419                md->properties.dma_max_sge = attr.dma_max_sge;
2420                md->properties.dma_max_size = attr.dma_max_size;
2421                md->properties.dma_align = attr.dma_align;
2422                md->properties.cap_sys_size = 0;
2423                md->properties.cap_transfer_mode = 0;
2424                md->properties.cap_addr_size = 0;
2425        } else
2426                pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2427                        mport->name, MAJOR(dev_number), mport->id);
2428
2429        mutex_lock(&mport_devs_lock);
2430        list_add_tail(&md->node, &mport_devs);
2431        mutex_unlock(&mport_devs_lock);
2432
2433        pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2434                mport->name, MAJOR(dev_number), mport->id);
2435
2436        return md;
2437
2438err_cdev:
2439        put_device(&md->dev);
2440        return NULL;
2441}
2442
2443/*
2444 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2445 *                              associated DMA channels.
2446 */
2447static void mport_cdev_terminate_dma(struct mport_dev *md)
2448{
2449#ifdef CONFIG_RAPIDIO_DMA_ENGINE
2450        struct mport_cdev_priv *client;
2451
2452        rmcd_debug(DMA, "%s", dev_name(&md->dev));
2453
2454        mutex_lock(&md->file_mutex);
2455        list_for_each_entry(client, &md->file_list, list) {
2456                if (client->dmach) {
2457                        dmaengine_terminate_all(client->dmach);
2458                        rio_release_dma(client->dmach);
2459                }
2460        }
2461        mutex_unlock(&md->file_mutex);
2462
2463        if (md->dma_chan) {
2464                dmaengine_terminate_all(md->dma_chan);
2465                rio_release_dma(md->dma_chan);
2466                md->dma_chan = NULL;
2467        }
2468#endif
2469}
2470
2471
2472/*
2473 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2474 *                            mport_cdev files.
2475 */
2476static int mport_cdev_kill_fasync(struct mport_dev *md)
2477{
2478        unsigned int files = 0;
2479        struct mport_cdev_priv *client;
2480
2481        mutex_lock(&md->file_mutex);
2482        list_for_each_entry(client, &md->file_list, list) {
2483                if (client->async_queue)
2484                        kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2485                files++;
2486        }
2487        mutex_unlock(&md->file_mutex);
2488        return files;
2489}
2490
2491/*
2492 * mport_cdev_remove() - Remove mport character device
2493 * @dev:        Mport device to remove
2494 */
2495static void mport_cdev_remove(struct mport_dev *md)
2496{
2497        struct rio_mport_mapping *map, *_map;
2498
2499        rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2500        atomic_set(&md->active, 0);
2501        mport_cdev_terminate_dma(md);
2502        rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
2503        cdev_device_del(&md->cdev, &md->dev);
2504        mport_cdev_kill_fasync(md);
2505
2506        /* TODO: do we need to give clients some time to close file
2507         * descriptors? Simple wait for XX, or kref?
2508         */
2509
2510        /*
2511         * Release DMA buffers allocated for the mport device.
2512         * Disable associated inbound Rapidio requests mapping if applicable.
2513         */
2514        mutex_lock(&md->buf_mutex);
2515        list_for_each_entry_safe(map, _map, &md->mappings, node) {
2516                kref_put(&map->ref, mport_release_mapping);
2517        }
2518        mutex_unlock(&md->buf_mutex);
2519
2520        if (!list_empty(&md->mappings))
2521                rmcd_warn("WARNING: %s pending mappings on removal",
2522                          md->mport->name);
2523
2524        rio_release_inb_dbell(md->mport, 0, 0x0fff);
2525
2526        put_device(&md->dev);
2527}
2528
2529/*
2530 * RIO rio_mport_interface driver
2531 */
2532
2533/*
2534 * mport_add_mport() - Add rio_mport from LDM device struct
2535 * @dev:                Linux device model struct
2536 * @class_intf: Linux class_interface
2537 */
2538static int mport_add_mport(struct device *dev,
2539                struct class_interface *class_intf)
2540{
2541        struct rio_mport *mport = NULL;
2542        struct mport_dev *chdev = NULL;
2543
2544        mport = to_rio_mport(dev);
2545        if (!mport)
2546                return -ENODEV;
2547
2548        chdev = mport_cdev_add(mport);
2549        if (!chdev)
2550                return -ENODEV;
2551
2552        return 0;
2553}
2554
2555/*
2556 * mport_remove_mport() - Remove rio_mport from global list
2557 * TODO remove device from global mport_dev list
2558 */
2559static void mport_remove_mport(struct device *dev,
2560                struct class_interface *class_intf)
2561{
2562        struct rio_mport *mport = NULL;
2563        struct mport_dev *chdev;
2564        int found = 0;
2565
2566        mport = to_rio_mport(dev);
2567        rmcd_debug(EXIT, "Remove %s", mport->name);
2568
2569        mutex_lock(&mport_devs_lock);
2570        list_for_each_entry(chdev, &mport_devs, node) {
2571                if (chdev->mport->id == mport->id) {
2572                        atomic_set(&chdev->active, 0);
2573                        list_del(&chdev->node);
2574                        found = 1;
2575                        break;
2576                }
2577        }
2578        mutex_unlock(&mport_devs_lock);
2579
2580        if (found)
2581                mport_cdev_remove(chdev);
2582}
2583
2584/* the rio_mport_interface is used to handle local mport devices */
2585static struct class_interface rio_mport_interface __refdata = {
2586        .class          = &rio_mport_class,
2587        .add_dev        = mport_add_mport,
2588        .remove_dev     = mport_remove_mport,
2589};
2590
2591/*
2592 * Linux kernel module
2593 */
2594
2595/*
2596 * mport_init - Driver module loading
2597 */
2598static int __init mport_init(void)
2599{
2600        int ret;
2601
2602        /* Create device class needed by udev */
2603        dev_class = class_create(THIS_MODULE, DRV_NAME);
2604        if (IS_ERR(dev_class)) {
2605                rmcd_error("Unable to create " DRV_NAME " class");
2606                return PTR_ERR(dev_class);
2607        }
2608
2609        ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2610        if (ret < 0)
2611                goto err_chr;
2612
2613        rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2614
2615        /* Register to rio_mport_interface */
2616        ret = class_interface_register(&rio_mport_interface);
2617        if (ret) {
2618                rmcd_error("class_interface_register() failed, err=%d", ret);
2619                goto err_cli;
2620        }
2621
2622        return 0;
2623
2624err_cli:
2625        unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2626err_chr:
2627        class_destroy(dev_class);
2628        return ret;
2629}
2630
2631/**
2632 * mport_exit - Driver module unloading
2633 */
2634static void __exit mport_exit(void)
2635{
2636        class_interface_unregister(&rio_mport_interface);
2637        class_destroy(dev_class);
2638        unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2639}
2640
2641module_init(mport_init);
2642module_exit(mport_exit);
2643