linux/drivers/iio/buffer/industrialio-buffer-dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2013-2015 Analog Devices Inc.
   4 *  Author: Lars-Peter Clausen <lars@metafoo.de>
   5 */
   6
   7#include <linux/slab.h>
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/device.h>
  11#include <linux/workqueue.h>
  12#include <linux/mutex.h>
  13#include <linux/sched.h>
  14#include <linux/poll.h>
  15#include <linux/iio/buffer_impl.h>
  16#include <linux/iio/buffer-dma.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/sizes.h>
  19
  20/*
  21 * For DMA buffers the storage is sub-divided into so called blocks. Each block
  22 * has its own memory buffer. The size of the block is the granularity at which
  23 * memory is exchanged between the hardware and the application. Increasing the
  24 * basic unit of data exchange from one sample to one block decreases the
  25 * management overhead that is associated with each sample. E.g. if we say the
  26 * management overhead for one exchange is x and the unit of exchange is one
  27 * sample the overhead will be x for each sample. Whereas when using a block
  28 * which contains n samples the overhead per sample is reduced to x/n. This
  29 * allows to achieve much higher samplerates than what can be sustained with
  30 * the one sample approach.
  31 *
  32 * Blocks are exchanged between the DMA controller and the application via the
  33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
  34 * incoming queue are waiting for the DMA controller to pick them up and fill
  35 * them with data. Block on the outgoing queue have been filled with data and
  36 * are waiting for the application to dequeue them and read the data.
  37 *
  38 * A block can be in one of the following states:
  39 *  * Owned by the application. In this state the application can read data from
  40 *    the block.
  41 *  * On the incoming list: Blocks on the incoming list are queued up to be
  42 *    processed by the DMA controller.
  43 *  * Owned by the DMA controller: The DMA controller is processing the block
  44 *    and filling it with data.
  45 *  * On the outgoing list: Blocks on the outgoing list have been successfully
  46 *    processed by the DMA controller and contain data. They can be dequeued by
  47 *    the application.
  48 *  * Dead: A block that is dead has been marked as to be freed. It might still
  49 *    be owned by either the application or the DMA controller at the moment.
  50 *    But once they are done processing it instead of going to either the
  51 *    incoming or outgoing queue the block will be freed.
  52 *
  53 * In addition to this blocks are reference counted and the memory associated
  54 * with both the block structure as well as the storage memory for the block
  55 * will be freed when the last reference to the block is dropped. This means a
  56 * block must not be accessed without holding a reference.
  57 *
  58 * The iio_dma_buffer implementation provides a generic infrastructure for
  59 * managing the blocks.
  60 *
  61 * A driver for a specific piece of hardware that has DMA capabilities need to
  62 * implement the submit() callback from the iio_dma_buffer_ops structure. This
  63 * callback is supposed to initiate the DMA transfer copying data from the
  64 * converter to the memory region of the block. Once the DMA transfer has been
  65 * completed the driver must call iio_dma_buffer_block_done() for the completed
  66 * block.
  67 *
  68 * Prior to this it must set the bytes_used field of the block contains
  69 * the actual number of bytes in the buffer. Typically this will be equal to the
  70 * size of the block, but if the DMA hardware has certain alignment requirements
  71 * for the transfer length it might choose to use less than the full size. In
  72 * either case it is expected that bytes_used is a multiple of the bytes per
  73 * datum, i.e. the block must not contain partial samples.
  74 *
  75 * The driver must call iio_dma_buffer_block_done() for each block it has
  76 * received through its submit_block() callback, even if it does not actually
  77 * perform a DMA transfer for the block, e.g. because the buffer was disabled
  78 * before the block transfer was started. In this case it should set bytes_used
  79 * to 0.
  80 *
  81 * In addition it is recommended that a driver implements the abort() callback.
  82 * It will be called when the buffer is disabled and can be used to cancel
  83 * pending and stop active transfers.
  84 *
  85 * The specific driver implementation should use the default callback
  86 * implementations provided by this module for the iio_buffer_access_funcs
  87 * struct. It may overload some callbacks with custom variants if the hardware
  88 * has special requirements that are not handled by the generic functions. If a
  89 * driver chooses to overload a callback it has to ensure that the generic
  90 * callback is called from within the custom callback.
  91 */
  92
  93static void iio_buffer_block_release(struct kref *kref)
  94{
  95        struct iio_dma_buffer_block *block = container_of(kref,
  96                struct iio_dma_buffer_block, kref);
  97
  98        WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
  99
 100        dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
 101                                        block->vaddr, block->phys_addr);
 102
 103        iio_buffer_put(&block->queue->buffer);
 104        kfree(block);
 105}
 106
 107static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
 108{
 109        kref_get(&block->kref);
 110}
 111
 112static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
 113{
 114        kref_put(&block->kref, iio_buffer_block_release);
 115}
 116
 117/*
 118 * dma_free_coherent can sleep, hence we need to take some special care to be
 119 * able to drop a reference from an atomic context.
 120 */
 121static LIST_HEAD(iio_dma_buffer_dead_blocks);
 122static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
 123
 124static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
 125{
 126        struct iio_dma_buffer_block *block, *_block;
 127        LIST_HEAD(block_list);
 128
 129        spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
 130        list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
 131        spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
 132
 133        list_for_each_entry_safe(block, _block, &block_list, head)
 134                iio_buffer_block_release(&block->kref);
 135}
 136static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
 137
 138static void iio_buffer_block_release_atomic(struct kref *kref)
 139{
 140        struct iio_dma_buffer_block *block;
 141        unsigned long flags;
 142
 143        block = container_of(kref, struct iio_dma_buffer_block, kref);
 144
 145        spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
 146        list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
 147        spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
 148
 149        schedule_work(&iio_dma_buffer_cleanup_work);
 150}
 151
 152/*
 153 * Version of iio_buffer_block_put() that can be called from atomic context
 154 */
 155static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
 156{
 157        kref_put(&block->kref, iio_buffer_block_release_atomic);
 158}
 159
 160static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
 161{
 162        return container_of(buf, struct iio_dma_buffer_queue, buffer);
 163}
 164
 165static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
 166        struct iio_dma_buffer_queue *queue, size_t size)
 167{
 168        struct iio_dma_buffer_block *block;
 169
 170        block = kzalloc(sizeof(*block), GFP_KERNEL);
 171        if (!block)
 172                return NULL;
 173
 174        block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
 175                &block->phys_addr, GFP_KERNEL);
 176        if (!block->vaddr) {
 177                kfree(block);
 178                return NULL;
 179        }
 180
 181        block->size = size;
 182        block->state = IIO_BLOCK_STATE_DEQUEUED;
 183        block->queue = queue;
 184        INIT_LIST_HEAD(&block->head);
 185        kref_init(&block->kref);
 186
 187        iio_buffer_get(&queue->buffer);
 188
 189        return block;
 190}
 191
 192static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 193{
 194        struct iio_dma_buffer_queue *queue = block->queue;
 195
 196        /*
 197         * The buffer has already been freed by the application, just drop the
 198         * reference.
 199         */
 200        if (block->state != IIO_BLOCK_STATE_DEAD) {
 201                block->state = IIO_BLOCK_STATE_DONE;
 202                list_add_tail(&block->head, &queue->outgoing);
 203        }
 204}
 205
 206/**
 207 * iio_dma_buffer_block_done() - Indicate that a block has been completed
 208 * @block: The completed block
 209 *
 210 * Should be called when the DMA controller has finished handling the block to
 211 * pass back ownership of the block to the queue.
 212 */
 213void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 214{
 215        struct iio_dma_buffer_queue *queue = block->queue;
 216        unsigned long flags;
 217
 218        spin_lock_irqsave(&queue->list_lock, flags);
 219        _iio_dma_buffer_block_done(block);
 220        spin_unlock_irqrestore(&queue->list_lock, flags);
 221
 222        iio_buffer_block_put_atomic(block);
 223        wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
 224}
 225EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
 226
 227/**
 228 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
 229 *   aborted
 230 * @queue: Queue for which to complete blocks.
 231 * @list: List of aborted blocks. All blocks in this list must be from @queue.
 232 *
 233 * Typically called from the abort() callback after the DMA controller has been
 234 * stopped. This will set bytes_used to 0 for each block in the list and then
 235 * hand the blocks back to the queue.
 236 */
 237void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
 238        struct list_head *list)
 239{
 240        struct iio_dma_buffer_block *block, *_block;
 241        unsigned long flags;
 242
 243        spin_lock_irqsave(&queue->list_lock, flags);
 244        list_for_each_entry_safe(block, _block, list, head) {
 245                list_del(&block->head);
 246                block->bytes_used = 0;
 247                _iio_dma_buffer_block_done(block);
 248                iio_buffer_block_put_atomic(block);
 249        }
 250        spin_unlock_irqrestore(&queue->list_lock, flags);
 251
 252        wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
 253}
 254EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
 255
 256static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
 257{
 258        /*
 259         * If the core owns the block it can be re-used. This should be the
 260         * default case when enabling the buffer, unless the DMA controller does
 261         * not support abort and has not given back the block yet.
 262         */
 263        switch (block->state) {
 264        case IIO_BLOCK_STATE_DEQUEUED:
 265        case IIO_BLOCK_STATE_QUEUED:
 266        case IIO_BLOCK_STATE_DONE:
 267                return true;
 268        default:
 269                return false;
 270        }
 271}
 272
 273/**
 274 * iio_dma_buffer_request_update() - DMA buffer request_update callback
 275 * @buffer: The buffer which to request an update
 276 *
 277 * Should be used as the iio_dma_buffer_request_update() callback for
 278 * iio_buffer_access_ops struct for DMA buffers.
 279 */
 280int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 281{
 282        struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 283        struct iio_dma_buffer_block *block;
 284        bool try_reuse = false;
 285        size_t size;
 286        int ret = 0;
 287        int i;
 288
 289        /*
 290         * Split the buffer into two even parts. This is used as a double
 291         * buffering scheme with usually one block at a time being used by the
 292         * DMA and the other one by the application.
 293         */
 294        size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
 295                queue->buffer.length, 2);
 296
 297        mutex_lock(&queue->lock);
 298
 299        /* Allocations are page aligned */
 300        if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
 301                try_reuse = true;
 302
 303        queue->fileio.block_size = size;
 304        queue->fileio.active_block = NULL;
 305
 306        spin_lock_irq(&queue->list_lock);
 307        for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 308                block = queue->fileio.blocks[i];
 309
 310                /* If we can't re-use it free it */
 311                if (block && (!iio_dma_block_reusable(block) || !try_reuse))
 312                        block->state = IIO_BLOCK_STATE_DEAD;
 313        }
 314
 315        /*
 316         * At this point all blocks are either owned by the core or marked as
 317         * dead. This means we can reset the lists without having to fear
 318         * corrution.
 319         */
 320        INIT_LIST_HEAD(&queue->outgoing);
 321        spin_unlock_irq(&queue->list_lock);
 322
 323        INIT_LIST_HEAD(&queue->incoming);
 324
 325        for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 326                if (queue->fileio.blocks[i]) {
 327                        block = queue->fileio.blocks[i];
 328                        if (block->state == IIO_BLOCK_STATE_DEAD) {
 329                                /* Could not reuse it */
 330                                iio_buffer_block_put(block);
 331                                block = NULL;
 332                        } else {
 333                                block->size = size;
 334                        }
 335                } else {
 336                        block = NULL;
 337                }
 338
 339                if (!block) {
 340                        block = iio_dma_buffer_alloc_block(queue, size);
 341                        if (!block) {
 342                                ret = -ENOMEM;
 343                                goto out_unlock;
 344                        }
 345                        queue->fileio.blocks[i] = block;
 346                }
 347
 348                block->state = IIO_BLOCK_STATE_QUEUED;
 349                list_add_tail(&block->head, &queue->incoming);
 350        }
 351
 352out_unlock:
 353        mutex_unlock(&queue->lock);
 354
 355        return ret;
 356}
 357EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
 358
 359static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
 360        struct iio_dma_buffer_block *block)
 361{
 362        int ret;
 363
 364        /*
 365         * If the hardware has already been removed we put the block into
 366         * limbo. It will neither be on the incoming nor outgoing list, nor will
 367         * it ever complete. It will just wait to be freed eventually.
 368         */
 369        if (!queue->ops)
 370                return;
 371
 372        block->state = IIO_BLOCK_STATE_ACTIVE;
 373        iio_buffer_block_get(block);
 374        ret = queue->ops->submit(queue, block);
 375        if (ret) {
 376                /*
 377                 * This is a bit of a problem and there is not much we can do
 378                 * other then wait for the buffer to be disabled and re-enabled
 379                 * and try again. But it should not really happen unless we run
 380                 * out of memory or something similar.
 381                 *
 382                 * TODO: Implement support in the IIO core to allow buffers to
 383                 * notify consumers that something went wrong and the buffer
 384                 * should be disabled.
 385                 */
 386                iio_buffer_block_put(block);
 387        }
 388}
 389
 390/**
 391 * iio_dma_buffer_enable() - Enable DMA buffer
 392 * @buffer: IIO buffer to enable
 393 * @indio_dev: IIO device the buffer is attached to
 394 *
 395 * Needs to be called when the device that the buffer is attached to starts
 396 * sampling. Typically should be the iio_buffer_access_ops enable callback.
 397 *
 398 * This will allocate the DMA buffers and start the DMA transfers.
 399 */
 400int iio_dma_buffer_enable(struct iio_buffer *buffer,
 401        struct iio_dev *indio_dev)
 402{
 403        struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 404        struct iio_dma_buffer_block *block, *_block;
 405
 406        mutex_lock(&queue->lock);
 407        queue->active = true;
 408        list_for_each_entry_safe(block, _block, &queue->incoming, head) {
 409                list_del(&block->head);
 410                iio_dma_buffer_submit_block(queue, block);
 411        }
 412        mutex_unlock(&queue->lock);
 413
 414        return 0;
 415}
 416EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
 417
 418/**
 419 * iio_dma_buffer_disable() - Disable DMA buffer
 420 * @buffer: IIO DMA buffer to disable
 421 * @indio_dev: IIO device the buffer is attached to
 422 *
 423 * Needs to be called when the device that the buffer is attached to stops
 424 * sampling. Typically should be the iio_buffer_access_ops disable callback.
 425 */
 426int iio_dma_buffer_disable(struct iio_buffer *buffer,
 427        struct iio_dev *indio_dev)
 428{
 429        struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 430
 431        mutex_lock(&queue->lock);
 432        queue->active = false;
 433
 434        if (queue->ops && queue->ops->abort)
 435                queue->ops->abort(queue);
 436        mutex_unlock(&queue->lock);
 437
 438        return 0;
 439}
 440EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
 441
 442static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
 443        struct iio_dma_buffer_block *block)
 444{
 445        if (block->state == IIO_BLOCK_STATE_DEAD) {
 446                iio_buffer_block_put(block);
 447        } else if (queue->active) {
 448                iio_dma_buffer_submit_block(queue, block);
 449        } else {
 450                block->state = IIO_BLOCK_STATE_QUEUED;
 451                list_add_tail(&block->head, &queue->incoming);
 452        }
 453}
 454
 455static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
 456        struct iio_dma_buffer_queue *queue)
 457{
 458        struct iio_dma_buffer_block *block;
 459
 460        spin_lock_irq(&queue->list_lock);
 461        block = list_first_entry_or_null(&queue->outgoing, struct
 462                iio_dma_buffer_block, head);
 463        if (block != NULL) {
 464                list_del(&block->head);
 465                block->state = IIO_BLOCK_STATE_DEQUEUED;
 466        }
 467        spin_unlock_irq(&queue->list_lock);
 468
 469        return block;
 470}
 471
 472/**
 473 * iio_dma_buffer_read() - DMA buffer read callback
 474 * @buffer: Buffer to read form
 475 * @n: Number of bytes to read
 476 * @user_buffer: Userspace buffer to copy the data to
 477 *
 478 * Should be used as the read callback for iio_buffer_access_ops
 479 * struct for DMA buffers.
 480 */
 481int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
 482        char __user *user_buffer)
 483{
 484        struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 485        struct iio_dma_buffer_block *block;
 486        int ret;
 487
 488        if (n < buffer->bytes_per_datum)
 489                return -EINVAL;
 490
 491        mutex_lock(&queue->lock);
 492
 493        if (!queue->fileio.active_block) {
 494                block = iio_dma_buffer_dequeue(queue);
 495                if (block == NULL) {
 496                        ret = 0;
 497                        goto out_unlock;
 498                }
 499                queue->fileio.pos = 0;
 500                queue->fileio.active_block = block;
 501        } else {
 502                block = queue->fileio.active_block;
 503        }
 504
 505        n = rounddown(n, buffer->bytes_per_datum);
 506        if (n > block->bytes_used - queue->fileio.pos)
 507                n = block->bytes_used - queue->fileio.pos;
 508
 509        if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
 510                ret = -EFAULT;
 511                goto out_unlock;
 512        }
 513
 514        queue->fileio.pos += n;
 515
 516        if (queue->fileio.pos == block->bytes_used) {
 517                queue->fileio.active_block = NULL;
 518                iio_dma_buffer_enqueue(queue, block);
 519        }
 520
 521        ret = n;
 522
 523out_unlock:
 524        mutex_unlock(&queue->lock);
 525
 526        return ret;
 527}
 528EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
 529
 530/**
 531 * iio_dma_buffer_data_available() - DMA buffer data_available callback
 532 * @buf: Buffer to check for data availability
 533 *
 534 * Should be used as the data_available callback for iio_buffer_access_ops
 535 * struct for DMA buffers.
 536 */
 537size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
 538{
 539        struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
 540        struct iio_dma_buffer_block *block;
 541        size_t data_available = 0;
 542
 543        /*
 544         * For counting the available bytes we'll use the size of the block not
 545         * the number of actual bytes available in the block. Otherwise it is
 546         * possible that we end up with a value that is lower than the watermark
 547         * but won't increase since all blocks are in use.
 548         */
 549
 550        mutex_lock(&queue->lock);
 551        if (queue->fileio.active_block)
 552                data_available += queue->fileio.active_block->size;
 553
 554        spin_lock_irq(&queue->list_lock);
 555        list_for_each_entry(block, &queue->outgoing, head)
 556                data_available += block->size;
 557        spin_unlock_irq(&queue->list_lock);
 558        mutex_unlock(&queue->lock);
 559
 560        return data_available;
 561}
 562EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
 563
 564/**
 565 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
 566 * @buffer: Buffer to set the bytes-per-datum for
 567 * @bpd: The new bytes-per-datum value
 568 *
 569 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
 570 * struct for DMA buffers.
 571 */
 572int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
 573{
 574        buffer->bytes_per_datum = bpd;
 575
 576        return 0;
 577}
 578EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
 579
 580/**
 581 * iio_dma_buffer_set_length - DMA buffer set_length callback
 582 * @buffer: Buffer to set the length for
 583 * @length: The new buffer length
 584 *
 585 * Should be used as the set_length callback for iio_buffer_access_ops
 586 * struct for DMA buffers.
 587 */
 588int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
 589{
 590        /* Avoid an invalid state */
 591        if (length < 2)
 592                length = 2;
 593        buffer->length = length;
 594        buffer->watermark = length / 2;
 595
 596        return 0;
 597}
 598EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
 599
 600/**
 601 * iio_dma_buffer_init() - Initialize DMA buffer queue
 602 * @queue: Buffer to initialize
 603 * @dev: DMA device
 604 * @ops: DMA buffer queue callback operations
 605 *
 606 * The DMA device will be used by the queue to do DMA memory allocations. So it
 607 * should refer to the device that will perform the DMA to ensure that
 608 * allocations are done from a memory region that can be accessed by the device.
 609 */
 610int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
 611        struct device *dev, const struct iio_dma_buffer_ops *ops)
 612{
 613        iio_buffer_init(&queue->buffer);
 614        queue->buffer.length = PAGE_SIZE;
 615        queue->buffer.watermark = queue->buffer.length / 2;
 616        queue->dev = dev;
 617        queue->ops = ops;
 618
 619        INIT_LIST_HEAD(&queue->incoming);
 620        INIT_LIST_HEAD(&queue->outgoing);
 621
 622        mutex_init(&queue->lock);
 623        spin_lock_init(&queue->list_lock);
 624
 625        return 0;
 626}
 627EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
 628
 629/**
 630 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
 631 * @queue: Buffer to cleanup
 632 *
 633 * After this function has completed it is safe to free any resources that are
 634 * associated with the buffer and are accessed inside the callback operations.
 635 */
 636void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
 637{
 638        unsigned int i;
 639
 640        mutex_lock(&queue->lock);
 641
 642        spin_lock_irq(&queue->list_lock);
 643        for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 644                if (!queue->fileio.blocks[i])
 645                        continue;
 646                queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
 647        }
 648        INIT_LIST_HEAD(&queue->outgoing);
 649        spin_unlock_irq(&queue->list_lock);
 650
 651        INIT_LIST_HEAD(&queue->incoming);
 652
 653        for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 654                if (!queue->fileio.blocks[i])
 655                        continue;
 656                iio_buffer_block_put(queue->fileio.blocks[i]);
 657                queue->fileio.blocks[i] = NULL;
 658        }
 659        queue->fileio.active_block = NULL;
 660        queue->ops = NULL;
 661
 662        mutex_unlock(&queue->lock);
 663}
 664EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
 665
 666/**
 667 * iio_dma_buffer_release() - Release final buffer resources
 668 * @queue: Buffer to release
 669 *
 670 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
 671 * called in the buffers release callback implementation right before freeing
 672 * the memory associated with the buffer.
 673 */
 674void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
 675{
 676        mutex_destroy(&queue->lock);
 677}
 678EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
 679
 680MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 681MODULE_DESCRIPTION("DMA buffer for the IIO framework");
 682MODULE_LICENSE("GPL v2");
 683