linux/drivers/iio/buffer/kfifo_buf.c
<<
>>
Prefs
   1#include <linux/slab.h>
   2#include <linux/kernel.h>
   3#include <linux/module.h>
   4#include <linux/device.h>
   5#include <linux/workqueue.h>
   6#include <linux/kfifo.h>
   7#include <linux/mutex.h>
   8#include <linux/iio/kfifo_buf.h>
   9#include <linux/sched.h>
  10#include <linux/poll.h>
  11
  12struct iio_kfifo {
  13        struct iio_buffer buffer;
  14        struct kfifo kf;
  15        struct mutex user_lock;
  16        int update_needed;
  17};
  18
  19#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
  20
  21static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
  22                                int bytes_per_datum, int length)
  23{
  24        if ((length == 0) || (bytes_per_datum == 0))
  25                return -EINVAL;
  26
  27        return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
  28                             bytes_per_datum, GFP_KERNEL);
  29}
  30
  31static int iio_request_update_kfifo(struct iio_buffer *r)
  32{
  33        int ret = 0;
  34        struct iio_kfifo *buf = iio_to_kfifo(r);
  35
  36        mutex_lock(&buf->user_lock);
  37        if (buf->update_needed) {
  38                kfifo_free(&buf->kf);
  39                ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
  40                                   buf->buffer.length);
  41                if (ret >= 0)
  42                        buf->update_needed = false;
  43        } else {
  44                kfifo_reset_out(&buf->kf);
  45        }
  46        mutex_unlock(&buf->user_lock);
  47
  48        return ret;
  49}
  50
  51static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
  52{
  53        struct iio_kfifo *kf = iio_to_kfifo(r);
  54        kf->update_needed = true;
  55        return 0;
  56}
  57
  58static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
  59{
  60        if (r->bytes_per_datum != bpd) {
  61                r->bytes_per_datum = bpd;
  62                iio_mark_update_needed_kfifo(r);
  63        }
  64        return 0;
  65}
  66
  67static int iio_set_length_kfifo(struct iio_buffer *r, int length)
  68{
  69        /* Avoid an invalid state */
  70        if (length < 2)
  71                length = 2;
  72        if (r->length != length) {
  73                r->length = length;
  74                iio_mark_update_needed_kfifo(r);
  75        }
  76        return 0;
  77}
  78
  79static int iio_store_to_kfifo(struct iio_buffer *r,
  80                              const void *data)
  81{
  82        int ret;
  83        struct iio_kfifo *kf = iio_to_kfifo(r);
  84        ret = kfifo_in(&kf->kf, data, 1);
  85        if (ret != 1)
  86                return -EBUSY;
  87        return 0;
  88}
  89
  90static int iio_read_first_n_kfifo(struct iio_buffer *r,
  91                           size_t n, char __user *buf)
  92{
  93        int ret, copied;
  94        struct iio_kfifo *kf = iio_to_kfifo(r);
  95
  96        if (mutex_lock_interruptible(&kf->user_lock))
  97                return -ERESTARTSYS;
  98
  99        if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
 100                ret = -EINVAL;
 101        else
 102                ret = kfifo_to_user(&kf->kf, buf, n, &copied);
 103        mutex_unlock(&kf->user_lock);
 104        if (ret < 0)
 105                return ret;
 106
 107        return copied;
 108}
 109
 110static size_t iio_kfifo_buf_data_available(struct iio_buffer *r)
 111{
 112        struct iio_kfifo *kf = iio_to_kfifo(r);
 113        size_t samples;
 114
 115        mutex_lock(&kf->user_lock);
 116        samples = kfifo_len(&kf->kf);
 117        mutex_unlock(&kf->user_lock);
 118
 119        return samples;
 120}
 121
 122static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
 123{
 124        struct iio_kfifo *kf = iio_to_kfifo(buffer);
 125
 126        mutex_destroy(&kf->user_lock);
 127        kfifo_free(&kf->kf);
 128        kfree(kf);
 129}
 130
 131static const struct iio_buffer_access_funcs kfifo_access_funcs = {
 132        .store_to = &iio_store_to_kfifo,
 133        .read_first_n = &iio_read_first_n_kfifo,
 134        .data_available = iio_kfifo_buf_data_available,
 135        .request_update = &iio_request_update_kfifo,
 136        .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
 137        .set_length = &iio_set_length_kfifo,
 138        .release = &iio_kfifo_buffer_release,
 139
 140        .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
 141};
 142
 143struct iio_buffer *iio_kfifo_allocate(void)
 144{
 145        struct iio_kfifo *kf;
 146
 147        kf = kzalloc(sizeof(*kf), GFP_KERNEL);
 148        if (!kf)
 149                return NULL;
 150
 151        kf->update_needed = true;
 152        iio_buffer_init(&kf->buffer);
 153        kf->buffer.access = &kfifo_access_funcs;
 154        kf->buffer.length = 2;
 155        mutex_init(&kf->user_lock);
 156
 157        return &kf->buffer;
 158}
 159EXPORT_SYMBOL(iio_kfifo_allocate);
 160
 161void iio_kfifo_free(struct iio_buffer *r)
 162{
 163        iio_buffer_put(r);
 164}
 165EXPORT_SYMBOL(iio_kfifo_free);
 166
 167static void devm_iio_kfifo_release(struct device *dev, void *res)
 168{
 169        iio_kfifo_free(*(struct iio_buffer **)res);
 170}
 171
 172static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
 173{
 174        struct iio_buffer **r = res;
 175
 176        if (WARN_ON(!r || !*r))
 177                return 0;
 178
 179        return *r == data;
 180}
 181
 182/**
 183 * devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
 184 * @dev:                Device to allocate kfifo buffer for
 185 *
 186 * RETURNS:
 187 * Pointer to allocated iio_buffer on success, NULL on failure.
 188 */
 189struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
 190{
 191        struct iio_buffer **ptr, *r;
 192
 193        ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL);
 194        if (!ptr)
 195                return NULL;
 196
 197        r = iio_kfifo_allocate();
 198        if (r) {
 199                *ptr = r;
 200                devres_add(dev, ptr);
 201        } else {
 202                devres_free(ptr);
 203        }
 204
 205        return r;
 206}
 207EXPORT_SYMBOL(devm_iio_kfifo_allocate);
 208
 209/**
 210 * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
 211 * @dev:                Device the buffer belongs to
 212 * @r:                  The buffer associated with the device
 213 */
 214void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
 215{
 216        WARN_ON(devres_release(dev, devm_iio_kfifo_release,
 217                               devm_iio_kfifo_match, r));
 218}
 219EXPORT_SYMBOL(devm_iio_kfifo_free);
 220
 221MODULE_LICENSE("GPL");
 222