1
2
3
4
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <linux/err.h>
9#include <linux/export.h>
10#include <linux/iio/iio.h>
11#include <linux/iio/buffer_impl.h>
12#include <linux/iio/consumer.h>
13
14struct iio_cb_buffer {
15 struct iio_buffer buffer;
16 int (*cb)(const void *data, void *private);
17 void *private;
18 struct iio_channel *channels;
19 struct iio_dev *indio_dev;
20};
21
22static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
23{
24 return container_of(buffer, struct iio_cb_buffer, buffer);
25}
26
27static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
28{
29 struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
30 return cb_buff->cb(data, cb_buff->private);
31}
32
33static void iio_buffer_cb_release(struct iio_buffer *buffer)
34{
35 struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
36
37 bitmap_free(cb_buff->buffer.scan_mask);
38 kfree(cb_buff);
39}
40
41static const struct iio_buffer_access_funcs iio_cb_access = {
42 .store_to = &iio_buffer_cb_store_to,
43 .release = &iio_buffer_cb_release,
44
45 .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
46};
47
48struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
49 int (*cb)(const void *data,
50 void *private),
51 void *private)
52{
53 int ret;
54 struct iio_cb_buffer *cb_buff;
55 struct iio_channel *chan;
56
57 cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
58 if (cb_buff == NULL)
59 return ERR_PTR(-ENOMEM);
60
61 iio_buffer_init(&cb_buff->buffer);
62
63 cb_buff->private = private;
64 cb_buff->cb = cb;
65 cb_buff->buffer.access = &iio_cb_access;
66 INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
67
68 cb_buff->channels = iio_channel_get_all(dev);
69 if (IS_ERR(cb_buff->channels)) {
70 ret = PTR_ERR(cb_buff->channels);
71 goto error_free_cb_buff;
72 }
73
74 cb_buff->indio_dev = cb_buff->channels[0].indio_dev;
75 cb_buff->buffer.scan_mask = bitmap_zalloc(cb_buff->indio_dev->masklength,
76 GFP_KERNEL);
77 if (cb_buff->buffer.scan_mask == NULL) {
78 ret = -ENOMEM;
79 goto error_release_channels;
80 }
81 chan = &cb_buff->channels[0];
82 while (chan->indio_dev) {
83 if (chan->indio_dev != cb_buff->indio_dev) {
84 ret = -EINVAL;
85 goto error_free_scan_mask;
86 }
87 set_bit(chan->channel->scan_index,
88 cb_buff->buffer.scan_mask);
89 chan++;
90 }
91
92 return cb_buff;
93
94error_free_scan_mask:
95 bitmap_free(cb_buff->buffer.scan_mask);
96error_release_channels:
97 iio_channel_release_all(cb_buff->channels);
98error_free_cb_buff:
99 kfree(cb_buff);
100 return ERR_PTR(ret);
101}
102EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
103
104int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buff,
105 size_t watermark)
106{
107 if (!watermark)
108 return -EINVAL;
109 cb_buff->buffer.watermark = watermark;
110
111 return 0;
112}
113EXPORT_SYMBOL_GPL(iio_channel_cb_set_buffer_watermark);
114
115int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
116{
117 return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
118 NULL);
119}
120EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
121
122void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
123{
124 iio_update_buffers(cb_buff->indio_dev, NULL, &cb_buff->buffer);
125}
126EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
127
128void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
129{
130 iio_channel_release_all(cb_buff->channels);
131 iio_buffer_put(&cb_buff->buffer);
132}
133EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
134
135struct iio_channel
136*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
137{
138 return cb_buffer->channels;
139}
140EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
141
142struct iio_dev
143*iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer)
144{
145 return cb_buffer->indio_dev;
146}
147EXPORT_SYMBOL_GPL(iio_channel_cb_get_iio_dev);
148
149MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
150MODULE_DESCRIPTION("Industrial I/O callback buffer");
151MODULE_LICENSE("GPL");
152