linux/include/linux/iio/buffer-dma.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright 2013-2015 Analog Devices Inc.
   4 *  Author: Lars-Peter Clausen <lars@metafoo.de>
   5 */
   6
   7#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
   8#define __INDUSTRIALIO_DMA_BUFFER_H__
   9
  10#include <linux/list.h>
  11#include <linux/kref.h>
  12#include <linux/spinlock.h>
  13#include <linux/mutex.h>
  14#include <linux/iio/buffer_impl.h>
  15
  16struct iio_dma_buffer_queue;
  17struct iio_dma_buffer_ops;
  18struct device;
  19
  20struct iio_buffer_block {
  21        u32 size;
  22        u32 bytes_used;
  23};
  24
  25/**
  26 * enum iio_block_state - State of a struct iio_dma_buffer_block
  27 * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
  28 * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
  29 * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
  30 * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
  31 * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
  32 */
  33enum iio_block_state {
  34        IIO_BLOCK_STATE_DEQUEUED,
  35        IIO_BLOCK_STATE_QUEUED,
  36        IIO_BLOCK_STATE_ACTIVE,
  37        IIO_BLOCK_STATE_DONE,
  38        IIO_BLOCK_STATE_DEAD,
  39};
  40
  41/**
  42 * struct iio_dma_buffer_block - IIO buffer block
  43 * @head: List head
  44 * @size: Total size of the block in bytes
  45 * @bytes_used: Number of bytes that contain valid data
  46 * @vaddr: Virutal address of the blocks memory
  47 * @phys_addr: Physical address of the blocks memory
  48 * @queue: Parent DMA buffer queue
  49 * @kref: kref used to manage the lifetime of block
  50 * @state: Current state of the block
  51 */
  52struct iio_dma_buffer_block {
  53        /* May only be accessed by the owner of the block */
  54        struct list_head head;
  55        size_t bytes_used;
  56
  57        /*
  58         * Set during allocation, constant thereafter. May be accessed read-only
  59         * by anybody holding a reference to the block.
  60         */
  61        void *vaddr;
  62        dma_addr_t phys_addr;
  63        size_t size;
  64        struct iio_dma_buffer_queue *queue;
  65
  66        /* Must not be accessed outside the core. */
  67        struct kref kref;
  68        /*
  69         * Must not be accessed outside the core. Access needs to hold
  70         * queue->list_lock if the block is not owned by the core.
  71         */
  72        enum iio_block_state state;
  73};
  74
  75/**
  76 * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
  77 * @blocks: Buffer blocks used for fileio
  78 * @active_block: Block being used in read()
  79 * @pos: Read offset in the active block
  80 * @block_size: Size of each block
  81 */
  82struct iio_dma_buffer_queue_fileio {
  83        struct iio_dma_buffer_block *blocks[2];
  84        struct iio_dma_buffer_block *active_block;
  85        size_t pos;
  86        size_t block_size;
  87};
  88
  89/**
  90 * struct iio_dma_buffer_queue - DMA buffer base structure
  91 * @buffer: IIO buffer base structure
  92 * @dev: Parent device
  93 * @ops: DMA buffer callbacks
  94 * @lock: Protects the incoming list, active and the fields in the fileio
  95 *   substruct
  96 * @list_lock: Protects lists that contain blocks which can be modified in
  97 *   atomic context as well as blocks on those lists. This is the outgoing queue
  98 *   list and typically also a list of active blocks in the part that handles
  99 *   the DMA controller
 100 * @incoming: List of buffers on the incoming queue
 101 * @outgoing: List of buffers on the outgoing queue
 102 * @active: Whether the buffer is currently active
 103 * @fileio: FileIO state
 104 */
 105struct iio_dma_buffer_queue {
 106        struct iio_buffer buffer;
 107        struct device *dev;
 108        const struct iio_dma_buffer_ops *ops;
 109
 110        struct mutex lock;
 111        spinlock_t list_lock;
 112        struct list_head incoming;
 113        struct list_head outgoing;
 114
 115        bool active;
 116
 117        struct iio_dma_buffer_queue_fileio fileio;
 118};
 119
 120/**
 121 * struct iio_dma_buffer_ops - DMA buffer callback operations
 122 * @submit: Called when a block is submitted to the DMA controller
 123 * @abort: Should abort all pending transfers
 124 */
 125struct iio_dma_buffer_ops {
 126        int (*submit)(struct iio_dma_buffer_queue *queue,
 127                struct iio_dma_buffer_block *block);
 128        void (*abort)(struct iio_dma_buffer_queue *queue);
 129};
 130
 131void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
 132void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
 133        struct list_head *list);
 134
 135int iio_dma_buffer_enable(struct iio_buffer *buffer,
 136        struct iio_dev *indio_dev);
 137int iio_dma_buffer_disable(struct iio_buffer *buffer,
 138        struct iio_dev *indio_dev);
 139int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
 140        char __user *user_buffer);
 141size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
 142int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
 143int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
 144int iio_dma_buffer_request_update(struct iio_buffer *buffer);
 145
 146int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
 147        struct device *dma_dev, const struct iio_dma_buffer_ops *ops);
 148void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
 149void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
 150
 151#endif
 152