1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/cdev.h>
21#include <linux/poll.h>
22#include <linux/kfifo.h>
23#include <linux/uaccess.h>
24#include <linux/idr.h>
25#include "mostcore.h"
26
27static dev_t aim_devno;
28static struct class *aim_class;
29static struct ida minor_id;
30static unsigned int major;
31static struct most_aim cdev_aim;
32
33struct aim_channel {
34 wait_queue_head_t wq;
35 spinlock_t unlink;
36 struct cdev cdev;
37 struct device *dev;
38 struct mutex io_mutex;
39 struct most_interface *iface;
40 struct most_channel_config *cfg;
41 unsigned int channel_id;
42 dev_t devno;
43 size_t mbo_offs;
44 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
45 int access_ref;
46 struct list_head list;
47};
48
49#define to_channel(d) container_of(d, struct aim_channel, cdev)
50static struct list_head channel_list;
51static spinlock_t ch_list_lock;
52
53static inline bool ch_has_mbo(struct aim_channel *c)
54{
55 return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
56}
57
58static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
59{
60 if (!kfifo_peek(&c->fifo, mbo)) {
61 *mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
62 if (*mbo)
63 kfifo_in(&c->fifo, mbo, 1);
64 }
65 return *mbo;
66}
67
68static struct aim_channel *get_channel(struct most_interface *iface, int id)
69{
70 struct aim_channel *c, *tmp;
71 unsigned long flags;
72 int found_channel = 0;
73
74 spin_lock_irqsave(&ch_list_lock, flags);
75 list_for_each_entry_safe(c, tmp, &channel_list, list) {
76 if ((c->iface == iface) && (c->channel_id == id)) {
77 found_channel = 1;
78 break;
79 }
80 }
81 spin_unlock_irqrestore(&ch_list_lock, flags);
82 if (!found_channel)
83 return NULL;
84 return c;
85}
86
87static void stop_channel(struct aim_channel *c)
88{
89 struct mbo *mbo;
90
91 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
92 most_put_mbo(mbo);
93 most_stop_channel(c->iface, c->channel_id, &cdev_aim);
94}
95
96static void destroy_cdev(struct aim_channel *c)
97{
98 unsigned long flags;
99
100 device_destroy(aim_class, c->devno);
101 cdev_del(&c->cdev);
102 spin_lock_irqsave(&ch_list_lock, flags);
103 list_del(&c->list);
104 spin_unlock_irqrestore(&ch_list_lock, flags);
105}
106
107static void destroy_channel(struct aim_channel *c)
108{
109 ida_simple_remove(&minor_id, MINOR(c->devno));
110 kfifo_free(&c->fifo);
111 kfree(c);
112}
113
114
115
116
117
118
119
120
121
122static int aim_open(struct inode *inode, struct file *filp)
123{
124 struct aim_channel *c;
125 int ret;
126
127 c = to_channel(inode->i_cdev);
128 filp->private_data = c;
129
130 if (((c->cfg->direction == MOST_CH_RX) &&
131 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
132 ((c->cfg->direction == MOST_CH_TX) &&
133 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
134 pr_info("WARN: Access flags mismatch\n");
135 return -EACCES;
136 }
137
138 mutex_lock(&c->io_mutex);
139 if (!c->dev) {
140 pr_info("WARN: Device is destroyed\n");
141 mutex_unlock(&c->io_mutex);
142 return -ENODEV;
143 }
144
145 if (c->access_ref) {
146 pr_info("WARN: Device is busy\n");
147 mutex_unlock(&c->io_mutex);
148 return -EBUSY;
149 }
150
151 c->mbo_offs = 0;
152 ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
153 if (!ret)
154 c->access_ref = 1;
155 mutex_unlock(&c->io_mutex);
156 return ret;
157}
158
159
160
161
162
163
164
165
166static int aim_close(struct inode *inode, struct file *filp)
167{
168 struct aim_channel *c = to_channel(inode->i_cdev);
169
170 mutex_lock(&c->io_mutex);
171 spin_lock(&c->unlink);
172 c->access_ref = 0;
173 spin_unlock(&c->unlink);
174 if (c->dev) {
175 stop_channel(c);
176 mutex_unlock(&c->io_mutex);
177 } else {
178 mutex_unlock(&c->io_mutex);
179 destroy_channel(c);
180 }
181 return 0;
182}
183
184
185
186
187
188
189
190
191static ssize_t aim_write(struct file *filp, const char __user *buf,
192 size_t count, loff_t *offset)
193{
194 int ret;
195 size_t to_copy, left;
196 struct mbo *mbo = NULL;
197 struct aim_channel *c = filp->private_data;
198
199 mutex_lock(&c->io_mutex);
200 while (c->dev && !ch_get_mbo(c, &mbo)) {
201 mutex_unlock(&c->io_mutex);
202
203 if ((filp->f_flags & O_NONBLOCK))
204 return -EAGAIN;
205 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
206 return -ERESTARTSYS;
207 mutex_lock(&c->io_mutex);
208 }
209
210 if (unlikely(!c->dev)) {
211 ret = -ENODEV;
212 goto unlock;
213 }
214
215 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
216 left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
217 if (left == to_copy) {
218 ret = -EFAULT;
219 goto unlock;
220 }
221
222 c->mbo_offs += to_copy - left;
223 if (c->mbo_offs >= c->cfg->buffer_size ||
224 c->cfg->data_type == MOST_CH_CONTROL ||
225 c->cfg->data_type == MOST_CH_ASYNC) {
226 kfifo_skip(&c->fifo);
227 mbo->buffer_length = c->mbo_offs;
228 c->mbo_offs = 0;
229 most_submit_mbo(mbo);
230 }
231
232 ret = to_copy - left;
233unlock:
234 mutex_unlock(&c->io_mutex);
235 return ret;
236}
237
238
239
240
241
242
243
244
245static ssize_t
246aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
247{
248 size_t to_copy, not_copied, copied;
249 struct mbo *mbo;
250 struct aim_channel *c = filp->private_data;
251
252 mutex_lock(&c->io_mutex);
253 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
254 mutex_unlock(&c->io_mutex);
255 if (filp->f_flags & O_NONBLOCK)
256 return -EAGAIN;
257 if (wait_event_interruptible(c->wq,
258 (!kfifo_is_empty(&c->fifo) ||
259 (!c->dev))))
260 return -ERESTARTSYS;
261 mutex_lock(&c->io_mutex);
262 }
263
264
265 if (unlikely(!c->dev)) {
266 mutex_unlock(&c->io_mutex);
267 return -ENODEV;
268 }
269
270 to_copy = min_t(size_t,
271 count,
272 mbo->processed_length - c->mbo_offs);
273
274 not_copied = copy_to_user(buf,
275 mbo->virt_address + c->mbo_offs,
276 to_copy);
277
278 copied = to_copy - not_copied;
279
280 c->mbo_offs += copied;
281 if (c->mbo_offs >= mbo->processed_length) {
282 kfifo_skip(&c->fifo);
283 most_put_mbo(mbo);
284 c->mbo_offs = 0;
285 }
286 mutex_unlock(&c->io_mutex);
287 return copied;
288}
289
290static unsigned int aim_poll(struct file *filp, poll_table *wait)
291{
292 struct aim_channel *c = filp->private_data;
293 unsigned int mask = 0;
294
295 poll_wait(filp, &c->wq, wait);
296
297 if (c->cfg->direction == MOST_CH_RX) {
298 if (!kfifo_is_empty(&c->fifo))
299 mask |= POLLIN | POLLRDNORM;
300 } else {
301 if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
302 mask |= POLLOUT | POLLWRNORM;
303 }
304 return mask;
305}
306
307
308
309
310static const struct file_operations channel_fops = {
311 .owner = THIS_MODULE,
312 .read = aim_read,
313 .write = aim_write,
314 .open = aim_open,
315 .release = aim_close,
316 .poll = aim_poll,
317};
318
319
320
321
322
323
324
325
326
327static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
328{
329 struct aim_channel *c;
330
331 if (!iface) {
332 pr_info("Bad interface pointer\n");
333 return -EINVAL;
334 }
335
336 c = get_channel(iface, channel_id);
337 if (!c)
338 return -ENXIO;
339
340 mutex_lock(&c->io_mutex);
341 spin_lock(&c->unlink);
342 c->dev = NULL;
343 spin_unlock(&c->unlink);
344 destroy_cdev(c);
345 if (c->access_ref) {
346 stop_channel(c);
347 wake_up_interruptible(&c->wq);
348 mutex_unlock(&c->io_mutex);
349 } else {
350 mutex_unlock(&c->io_mutex);
351 destroy_channel(c);
352 }
353 return 0;
354}
355
356
357
358
359
360
361
362
363static int aim_rx_completion(struct mbo *mbo)
364{
365 struct aim_channel *c;
366
367 if (!mbo)
368 return -EINVAL;
369
370 c = get_channel(mbo->ifp, mbo->hdm_channel_id);
371 if (!c)
372 return -ENXIO;
373
374 spin_lock(&c->unlink);
375 if (!c->access_ref || !c->dev) {
376 spin_unlock(&c->unlink);
377 return -ENODEV;
378 }
379 kfifo_in(&c->fifo, &mbo, 1);
380 spin_unlock(&c->unlink);
381#ifdef DEBUG_MESG
382 if (kfifo_is_full(&c->fifo))
383 pr_info("WARN: Fifo is full\n");
384#endif
385 wake_up_interruptible(&c->wq);
386 return 0;
387}
388
389
390
391
392
393
394
395
396static int aim_tx_completion(struct most_interface *iface, int channel_id)
397{
398 struct aim_channel *c;
399
400 if (!iface) {
401 pr_info("Bad interface pointer\n");
402 return -EINVAL;
403 }
404 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
405 pr_info("Channel ID out of range\n");
406 return -EINVAL;
407 }
408
409 c = get_channel(iface, channel_id);
410 if (!c)
411 return -ENXIO;
412 wake_up_interruptible(&c->wq);
413 return 0;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428static int aim_probe(struct most_interface *iface, int channel_id,
429 struct most_channel_config *cfg,
430 struct kobject *parent, char *name)
431{
432 struct aim_channel *c;
433 unsigned long cl_flags;
434 int retval;
435 int current_minor;
436
437 if ((!iface) || (!cfg) || (!parent) || (!name)) {
438 pr_info("Probing AIM with bad arguments");
439 return -EINVAL;
440 }
441 c = get_channel(iface, channel_id);
442 if (c)
443 return -EEXIST;
444
445 current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
446 if (current_minor < 0)
447 return current_minor;
448
449 c = kzalloc(sizeof(*c), GFP_KERNEL);
450 if (!c) {
451 retval = -ENOMEM;
452 goto error_alloc_channel;
453 }
454
455 c->devno = MKDEV(major, current_minor);
456 cdev_init(&c->cdev, &channel_fops);
457 c->cdev.owner = THIS_MODULE;
458 cdev_add(&c->cdev, c->devno, 1);
459 c->iface = iface;
460 c->cfg = cfg;
461 c->channel_id = channel_id;
462 c->access_ref = 0;
463 spin_lock_init(&c->unlink);
464 INIT_KFIFO(c->fifo);
465 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
466 if (retval) {
467 pr_info("failed to alloc channel kfifo");
468 goto error_alloc_kfifo;
469 }
470 init_waitqueue_head(&c->wq);
471 mutex_init(&c->io_mutex);
472 spin_lock_irqsave(&ch_list_lock, cl_flags);
473 list_add_tail(&c->list, &channel_list);
474 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
475 c->dev = device_create(aim_class,
476 NULL,
477 c->devno,
478 NULL,
479 "%s", name);
480
481 if (IS_ERR(c->dev)) {
482 retval = PTR_ERR(c->dev);
483 pr_info("failed to create new device node %s\n", name);
484 goto error_create_device;
485 }
486 kobject_uevent(&c->dev->kobj, KOBJ_ADD);
487 return 0;
488
489error_create_device:
490 kfifo_free(&c->fifo);
491 list_del(&c->list);
492error_alloc_kfifo:
493 cdev_del(&c->cdev);
494 kfree(c);
495error_alloc_channel:
496 ida_simple_remove(&minor_id, current_minor);
497 return retval;
498}
499
500static struct most_aim cdev_aim = {
501 .name = "cdev",
502 .probe_channel = aim_probe,
503 .disconnect_channel = aim_disconnect_channel,
504 .rx_completion = aim_rx_completion,
505 .tx_completion = aim_tx_completion,
506};
507
508static int __init mod_init(void)
509{
510 int err;
511
512 pr_info("init()\n");
513
514 INIT_LIST_HEAD(&channel_list);
515 spin_lock_init(&ch_list_lock);
516 ida_init(&minor_id);
517
518 err = alloc_chrdev_region(&aim_devno, 0, 50, "cdev");
519 if (err < 0)
520 goto dest_ida;
521 major = MAJOR(aim_devno);
522
523 aim_class = class_create(THIS_MODULE, "most_cdev_aim");
524 if (IS_ERR(aim_class)) {
525 pr_err("no udev support\n");
526 err = PTR_ERR(aim_class);
527 goto free_cdev;
528 }
529 err = most_register_aim(&cdev_aim);
530 if (err)
531 goto dest_class;
532 return 0;
533
534dest_class:
535 class_destroy(aim_class);
536free_cdev:
537 unregister_chrdev_region(aim_devno, 1);
538dest_ida:
539 ida_destroy(&minor_id);
540 return err;
541}
542
543static void __exit mod_exit(void)
544{
545 struct aim_channel *c, *tmp;
546
547 pr_info("exit module\n");
548
549 most_deregister_aim(&cdev_aim);
550
551 list_for_each_entry_safe(c, tmp, &channel_list, list) {
552 destroy_cdev(c);
553 destroy_channel(c);
554 }
555 class_destroy(aim_class);
556 unregister_chrdev_region(aim_devno, 1);
557 ida_destroy(&minor_id);
558}
559
560module_init(mod_init);
561module_exit(mod_exit);
562MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
563MODULE_LICENSE("GPL");
564MODULE_DESCRIPTION("character device AIM for mostcore");
565