1
2
3
4
5
6
7#include <linux/slab.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/workqueue.h>
12#include <linux/mutex.h>
13#include <linux/sched.h>
14#include <linux/poll.h>
15#include <linux/iio/buffer_impl.h>
16#include <linux/iio/buffer-dma.h>
17#include <linux/dma-mapping.h>
18#include <linux/sizes.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93static void iio_buffer_block_release(struct kref *kref)
94{
95 struct iio_dma_buffer_block *block = container_of(kref,
96 struct iio_dma_buffer_block, kref);
97
98 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
99
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
101 block->vaddr, block->phys_addr);
102
103 iio_buffer_put(&block->queue->buffer);
104 kfree(block);
105}
106
107static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
108{
109 kref_get(&block->kref);
110}
111
112static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
113{
114 kref_put(&block->kref, iio_buffer_block_release);
115}
116
117
118
119
120
121static LIST_HEAD(iio_dma_buffer_dead_blocks);
122static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
123
124static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
125{
126 struct iio_dma_buffer_block *block, *_block;
127 LIST_HEAD(block_list);
128
129 spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
130 list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
131 spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
132
133 list_for_each_entry_safe(block, _block, &block_list, head)
134 iio_buffer_block_release(&block->kref);
135}
136static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
137
138static void iio_buffer_block_release_atomic(struct kref *kref)
139{
140 struct iio_dma_buffer_block *block;
141 unsigned long flags;
142
143 block = container_of(kref, struct iio_dma_buffer_block, kref);
144
145 spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
146 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
147 spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
148
149 schedule_work(&iio_dma_buffer_cleanup_work);
150}
151
152
153
154
155static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
156{
157 kref_put(&block->kref, iio_buffer_block_release_atomic);
158}
159
160static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
161{
162 return container_of(buf, struct iio_dma_buffer_queue, buffer);
163}
164
165static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
166 struct iio_dma_buffer_queue *queue, size_t size)
167{
168 struct iio_dma_buffer_block *block;
169
170 block = kzalloc(sizeof(*block), GFP_KERNEL);
171 if (!block)
172 return NULL;
173
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
175 &block->phys_addr, GFP_KERNEL);
176 if (!block->vaddr) {
177 kfree(block);
178 return NULL;
179 }
180
181 block->size = size;
182 block->state = IIO_BLOCK_STATE_DEQUEUED;
183 block->queue = queue;
184 INIT_LIST_HEAD(&block->head);
185 kref_init(&block->kref);
186
187 iio_buffer_get(&queue->buffer);
188
189 return block;
190}
191
192static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
193{
194 struct iio_dma_buffer_queue *queue = block->queue;
195
196
197
198
199
200 if (block->state != IIO_BLOCK_STATE_DEAD) {
201 block->state = IIO_BLOCK_STATE_DONE;
202 list_add_tail(&block->head, &queue->outgoing);
203 }
204}
205
206
207
208
209
210
211
212
213void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
214{
215 struct iio_dma_buffer_queue *queue = block->queue;
216 unsigned long flags;
217
218 spin_lock_irqsave(&queue->list_lock, flags);
219 _iio_dma_buffer_block_done(block);
220 spin_unlock_irqrestore(&queue->list_lock, flags);
221
222 iio_buffer_block_put_atomic(block);
223 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
224}
225EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
226
227
228
229
230
231
232
233
234
235
236
237void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
238 struct list_head *list)
239{
240 struct iio_dma_buffer_block *block, *_block;
241 unsigned long flags;
242
243 spin_lock_irqsave(&queue->list_lock, flags);
244 list_for_each_entry_safe(block, _block, list, head) {
245 list_del(&block->head);
246 block->bytes_used = 0;
247 _iio_dma_buffer_block_done(block);
248 iio_buffer_block_put_atomic(block);
249 }
250 spin_unlock_irqrestore(&queue->list_lock, flags);
251
252 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
253}
254EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
255
256static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
257{
258
259
260
261
262
263 switch (block->state) {
264 case IIO_BLOCK_STATE_DEQUEUED:
265 case IIO_BLOCK_STATE_QUEUED:
266 case IIO_BLOCK_STATE_DONE:
267 return true;
268 default:
269 return false;
270 }
271}
272
273
274
275
276
277
278
279
280int iio_dma_buffer_request_update(struct iio_buffer *buffer)
281{
282 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
283 struct iio_dma_buffer_block *block;
284 bool try_reuse = false;
285 size_t size;
286 int ret = 0;
287 int i;
288
289
290
291
292
293
294 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
295 queue->buffer.length, 2);
296
297 mutex_lock(&queue->lock);
298
299
300 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
301 try_reuse = true;
302
303 queue->fileio.block_size = size;
304 queue->fileio.active_block = NULL;
305
306 spin_lock_irq(&queue->list_lock);
307 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
308 block = queue->fileio.blocks[i];
309
310
311 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
312 block->state = IIO_BLOCK_STATE_DEAD;
313 }
314
315
316
317
318
319
320 INIT_LIST_HEAD(&queue->outgoing);
321 spin_unlock_irq(&queue->list_lock);
322
323 INIT_LIST_HEAD(&queue->incoming);
324
325 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
326 if (queue->fileio.blocks[i]) {
327 block = queue->fileio.blocks[i];
328 if (block->state == IIO_BLOCK_STATE_DEAD) {
329
330 iio_buffer_block_put(block);
331 block = NULL;
332 } else {
333 block->size = size;
334 }
335 } else {
336 block = NULL;
337 }
338
339 if (!block) {
340 block = iio_dma_buffer_alloc_block(queue, size);
341 if (!block) {
342 ret = -ENOMEM;
343 goto out_unlock;
344 }
345 queue->fileio.blocks[i] = block;
346 }
347
348 block->state = IIO_BLOCK_STATE_QUEUED;
349 list_add_tail(&block->head, &queue->incoming);
350 }
351
352out_unlock:
353 mutex_unlock(&queue->lock);
354
355 return ret;
356}
357EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
358
359static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
360 struct iio_dma_buffer_block *block)
361{
362 int ret;
363
364
365
366
367
368
369 if (!queue->ops)
370 return;
371
372 block->state = IIO_BLOCK_STATE_ACTIVE;
373 iio_buffer_block_get(block);
374 ret = queue->ops->submit(queue, block);
375 if (ret) {
376
377
378
379
380
381
382
383
384
385
386 iio_buffer_block_put(block);
387 }
388}
389
390
391
392
393
394
395
396
397
398
399
400int iio_dma_buffer_enable(struct iio_buffer *buffer,
401 struct iio_dev *indio_dev)
402{
403 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
404 struct iio_dma_buffer_block *block, *_block;
405
406 mutex_lock(&queue->lock);
407 queue->active = true;
408 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
409 list_del(&block->head);
410 iio_dma_buffer_submit_block(queue, block);
411 }
412 mutex_unlock(&queue->lock);
413
414 return 0;
415}
416EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
417
418
419
420
421
422
423
424
425
426int iio_dma_buffer_disable(struct iio_buffer *buffer,
427 struct iio_dev *indio_dev)
428{
429 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
430
431 mutex_lock(&queue->lock);
432 queue->active = false;
433
434 if (queue->ops && queue->ops->abort)
435 queue->ops->abort(queue);
436 mutex_unlock(&queue->lock);
437
438 return 0;
439}
440EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
441
442static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
443 struct iio_dma_buffer_block *block)
444{
445 if (block->state == IIO_BLOCK_STATE_DEAD) {
446 iio_buffer_block_put(block);
447 } else if (queue->active) {
448 iio_dma_buffer_submit_block(queue, block);
449 } else {
450 block->state = IIO_BLOCK_STATE_QUEUED;
451 list_add_tail(&block->head, &queue->incoming);
452 }
453}
454
455static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
456 struct iio_dma_buffer_queue *queue)
457{
458 struct iio_dma_buffer_block *block;
459
460 spin_lock_irq(&queue->list_lock);
461 block = list_first_entry_or_null(&queue->outgoing, struct
462 iio_dma_buffer_block, head);
463 if (block != NULL) {
464 list_del(&block->head);
465 block->state = IIO_BLOCK_STATE_DEQUEUED;
466 }
467 spin_unlock_irq(&queue->list_lock);
468
469 return block;
470}
471
472
473
474
475
476
477
478
479
480
481int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
482 char __user *user_buffer)
483{
484 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
485 struct iio_dma_buffer_block *block;
486 int ret;
487
488 if (n < buffer->bytes_per_datum)
489 return -EINVAL;
490
491 mutex_lock(&queue->lock);
492
493 if (!queue->fileio.active_block) {
494 block = iio_dma_buffer_dequeue(queue);
495 if (block == NULL) {
496 ret = 0;
497 goto out_unlock;
498 }
499 queue->fileio.pos = 0;
500 queue->fileio.active_block = block;
501 } else {
502 block = queue->fileio.active_block;
503 }
504
505 n = rounddown(n, buffer->bytes_per_datum);
506 if (n > block->bytes_used - queue->fileio.pos)
507 n = block->bytes_used - queue->fileio.pos;
508
509 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
510 ret = -EFAULT;
511 goto out_unlock;
512 }
513
514 queue->fileio.pos += n;
515
516 if (queue->fileio.pos == block->bytes_used) {
517 queue->fileio.active_block = NULL;
518 iio_dma_buffer_enqueue(queue, block);
519 }
520
521 ret = n;
522
523out_unlock:
524 mutex_unlock(&queue->lock);
525
526 return ret;
527}
528EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
529
530
531
532
533
534
535
536
537size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
538{
539 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
540 struct iio_dma_buffer_block *block;
541 size_t data_available = 0;
542
543
544
545
546
547
548
549
550 mutex_lock(&queue->lock);
551 if (queue->fileio.active_block)
552 data_available += queue->fileio.active_block->size;
553
554 spin_lock_irq(&queue->list_lock);
555 list_for_each_entry(block, &queue->outgoing, head)
556 data_available += block->size;
557 spin_unlock_irq(&queue->list_lock);
558 mutex_unlock(&queue->lock);
559
560 return data_available;
561}
562EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
563
564
565
566
567
568
569
570
571
572int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
573{
574 buffer->bytes_per_datum = bpd;
575
576 return 0;
577}
578EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
579
580
581
582
583
584
585
586
587
588int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
589{
590
591 if (length < 2)
592 length = 2;
593 buffer->length = length;
594 buffer->watermark = length / 2;
595
596 return 0;
597}
598EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
599
600
601
602
603
604
605
606
607
608
609
610int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
611 struct device *dev, const struct iio_dma_buffer_ops *ops)
612{
613 iio_buffer_init(&queue->buffer);
614 queue->buffer.length = PAGE_SIZE;
615 queue->buffer.watermark = queue->buffer.length / 2;
616 queue->dev = dev;
617 queue->ops = ops;
618
619 INIT_LIST_HEAD(&queue->incoming);
620 INIT_LIST_HEAD(&queue->outgoing);
621
622 mutex_init(&queue->lock);
623 spin_lock_init(&queue->list_lock);
624
625 return 0;
626}
627EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
628
629
630
631
632
633
634
635
636void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
637{
638 unsigned int i;
639
640 mutex_lock(&queue->lock);
641
642 spin_lock_irq(&queue->list_lock);
643 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
644 if (!queue->fileio.blocks[i])
645 continue;
646 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
647 }
648 INIT_LIST_HEAD(&queue->outgoing);
649 spin_unlock_irq(&queue->list_lock);
650
651 INIT_LIST_HEAD(&queue->incoming);
652
653 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
654 if (!queue->fileio.blocks[i])
655 continue;
656 iio_buffer_block_put(queue->fileio.blocks[i]);
657 queue->fileio.blocks[i] = NULL;
658 }
659 queue->fileio.active_block = NULL;
660 queue->ops = NULL;
661
662 mutex_unlock(&queue->lock);
663}
664EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
665
666
667
668
669
670
671
672
673
674void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
675{
676 mutex_destroy(&queue->lock);
677}
678EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
679
680MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
681MODULE_DESCRIPTION("DMA buffer for the IIO framework");
682MODULE_LICENSE("GPL v2");
683