1
2
3
4
5
6
7
8
9
10
11
12#include <linux/interrupt.h>
13#include <linux/fs.h>
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/spi/spi.h>
17#include <linux/sysfs.h>
18#include <linux/sched.h>
19#include <linux/poll.h>
20
21#include <linux/iio/iio.h>
22#include <linux/iio/sysfs.h>
23#include <linux/iio/buffer.h>
24#include "../ring_hw.h"
25#include "sca3000.h"
26
27
28
29
30
31
32
33
34
35
36static int sca3000_read_data(struct sca3000_state *st,
37 uint8_t reg_address_high,
38 u8 **rx_p,
39 int len)
40{
41 int ret;
42 struct spi_transfer xfer[2] = {
43 {
44 .len = 1,
45 .tx_buf = st->tx,
46 }, {
47 .len = len,
48 }
49 };
50 *rx_p = kmalloc(len, GFP_KERNEL);
51 if (*rx_p == NULL) {
52 ret = -ENOMEM;
53 goto error_ret;
54 }
55 xfer[1].rx_buf = *rx_p;
56 st->tx[0] = SCA3000_READ_REG(reg_address_high);
57 ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
58 if (ret) {
59 dev_err(get_device(&st->us->dev), "problem reading register");
60 goto error_free_rx;
61 }
62
63 return 0;
64error_free_rx:
65 kfree(*rx_p);
66error_ret:
67 return ret;
68}
69
70
71
72
73
74
75
76
77
78
79
80static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
81 size_t count, char __user *buf)
82{
83 struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
84 struct iio_dev *indio_dev = hw_ring->private;
85 struct sca3000_state *st = iio_priv(indio_dev);
86 u8 *rx;
87 int ret, i, num_available, num_read = 0;
88 int bytes_per_sample = 1;
89
90 if (st->bpse == 11)
91 bytes_per_sample = 2;
92
93 mutex_lock(&st->lock);
94 if (count % bytes_per_sample) {
95 ret = -EINVAL;
96 goto error_ret;
97 }
98
99 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
100 if (ret)
101 goto error_ret;
102 else
103 num_available = st->rx[0];
104
105
106
107
108 if (count > num_available * bytes_per_sample)
109 num_read = num_available*bytes_per_sample;
110 else
111 num_read = count;
112
113 ret = sca3000_read_data(st,
114 SCA3000_REG_ADDR_RING_OUT,
115 &rx, num_read);
116 if (ret)
117 goto error_ret;
118
119 for (i = 0; i < num_read; i++)
120 *(((u16 *)rx) + i) = be16_to_cpup((u16 *)rx + i);
121
122 if (copy_to_user(buf, rx, num_read))
123 ret = -EFAULT;
124 kfree(rx);
125 r->stufftoread = 0;
126error_ret:
127 mutex_unlock(&st->lock);
128
129 return ret ? ret : num_read;
130}
131
132
133static int sca3000_ring_get_length(struct iio_buffer *r)
134{
135 return 64;
136}
137
138
139static int sca3000_ring_get_bytes_per_datum(struct iio_buffer *r)
140{
141 return 6;
142}
143
144static IIO_BUFFER_ENABLE_ATTR;
145static IIO_BUFFER_LENGTH_ATTR;
146
147
148
149
150static ssize_t sca3000_query_ring_int(struct device *dev,
151 struct device_attribute *attr,
152 char *buf)
153{
154 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
155 int ret, val;
156 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
157 struct sca3000_state *st = iio_priv(indio_dev);
158
159 mutex_lock(&st->lock);
160 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
161 val = st->rx[0];
162 mutex_unlock(&st->lock);
163 if (ret)
164 return ret;
165
166 return sprintf(buf, "%d\n", !!(val & this_attr->address));
167}
168
169
170
171
172static ssize_t sca3000_set_ring_int(struct device *dev,
173 struct device_attribute *attr,
174 const char *buf,
175 size_t len)
176{
177 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
178 struct sca3000_state *st = iio_priv(indio_dev);
179 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
180 long val;
181 int ret;
182
183 mutex_lock(&st->lock);
184 ret = strict_strtol(buf, 10, &val);
185 if (ret)
186 goto error_ret;
187 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
188 if (ret)
189 goto error_ret;
190 if (val)
191 ret = sca3000_write_reg(st,
192 SCA3000_REG_ADDR_INT_MASK,
193 st->rx[0] | this_attr->address);
194 else
195 ret = sca3000_write_reg(st,
196 SCA3000_REG_ADDR_INT_MASK,
197 st->rx[0] & ~this_attr->address);
198error_ret:
199 mutex_unlock(&st->lock);
200
201 return ret ? ret : len;
202}
203
204static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR,
205 sca3000_query_ring_int,
206 sca3000_set_ring_int,
207 SCA3000_INT_MASK_RING_HALF);
208
209static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
210 sca3000_query_ring_int,
211 sca3000_set_ring_int,
212 SCA3000_INT_MASK_RING_THREE_QUARTER);
213
214static ssize_t sca3000_show_buffer_scale(struct device *dev,
215 struct device_attribute *attr,
216 char *buf)
217{
218 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
219 struct sca3000_state *st = iio_priv(indio_dev);
220
221 return sprintf(buf, "0.%06d\n", 4*st->info->scale);
222}
223
224static IIO_DEVICE_ATTR(in_accel_scale,
225 S_IRUGO,
226 sca3000_show_buffer_scale,
227 NULL,
228 0);
229
230
231
232
233
234
235
236static struct attribute *sca3000_ring_attributes[] = {
237 &dev_attr_length.attr,
238 &dev_attr_enable.attr,
239 &iio_dev_attr_50_percent.dev_attr.attr,
240 &iio_dev_attr_75_percent.dev_attr.attr,
241 &iio_dev_attr_in_accel_scale.dev_attr.attr,
242 NULL,
243};
244
245static struct attribute_group sca3000_ring_attr = {
246 .attrs = sca3000_ring_attributes,
247 .name = "buffer",
248};
249
250static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
251{
252 struct iio_buffer *buf;
253 struct iio_hw_buffer *ring;
254
255 ring = kzalloc(sizeof *ring, GFP_KERNEL);
256 if (!ring)
257 return NULL;
258
259 ring->private = indio_dev;
260 buf = &ring->buf;
261 buf->stufftoread = 0;
262 buf->attrs = &sca3000_ring_attr;
263 iio_buffer_init(buf);
264
265 return buf;
266}
267
268static inline void sca3000_rb_free(struct iio_buffer *r)
269{
270 kfree(iio_to_hw_buf(r));
271}
272
273static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
274 .read_first_n = &sca3000_read_first_n_hw_rb,
275 .get_length = &sca3000_ring_get_length,
276 .get_bytes_per_datum = &sca3000_ring_get_bytes_per_datum,
277};
278
279int sca3000_configure_ring(struct iio_dev *indio_dev)
280{
281 indio_dev->buffer = sca3000_rb_allocate(indio_dev);
282 if (indio_dev->buffer == NULL)
283 return -ENOMEM;
284 indio_dev->modes |= INDIO_BUFFER_HARDWARE;
285
286 indio_dev->buffer->access = &sca3000_ring_access_funcs;
287
288 return 0;
289}
290
291void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
292{
293 sca3000_rb_free(indio_dev->buffer);
294}
295
296static inline
297int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
298{
299 struct sca3000_state *st = iio_priv(indio_dev);
300 int ret;
301
302 mutex_lock(&st->lock);
303 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
304 if (ret)
305 goto error_ret;
306 if (state) {
307 printk(KERN_INFO "supposedly enabling ring buffer\n");
308 ret = sca3000_write_reg(st,
309 SCA3000_REG_ADDR_MODE,
310 (st->rx[0] | SCA3000_RING_BUF_ENABLE));
311 } else
312 ret = sca3000_write_reg(st,
313 SCA3000_REG_ADDR_MODE,
314 (st->rx[0] & ~SCA3000_RING_BUF_ENABLE));
315error_ret:
316 mutex_unlock(&st->lock);
317
318 return ret;
319}
320
321
322
323
324
325
326
327static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
328{
329 return __sca3000_hw_ring_state_set(indio_dev, 1);
330}
331
332static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
333{
334 return __sca3000_hw_ring_state_set(indio_dev, 0);
335}
336
337static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
338 .preenable = &sca3000_hw_ring_preenable,
339 .postdisable = &sca3000_hw_ring_postdisable,
340};
341
342void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
343{
344 indio_dev->setup_ops = &sca3000_ring_setup_ops;
345}
346
347
348
349
350
351
352
353void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
354{
355 if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
356 SCA3000_INT_STATUS_HALF)) {
357 ring->stufftoread = true;
358 wake_up_interruptible(&ring->pollq);
359 }
360}
361