1
2
3
4
5
6
7
8#include <linux/errno.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/kmod.h>
13#include <linux/ktime.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/uaccess.h>
19#include <linux/version.h>
20
21#include <media/cec-pin.h>
22#include "cec-priv.h"
23#include "cec-pin-priv.h"
24
25static inline struct cec_devnode *cec_devnode_data(struct file *filp)
26{
27 struct cec_fh *fh = filp->private_data;
28
29 return &fh->adap->devnode;
30}
31
32
33
34static __poll_t cec_poll(struct file *filp,
35 struct poll_table_struct *poll)
36{
37 struct cec_fh *fh = filp->private_data;
38 struct cec_adapter *adap = fh->adap;
39 __poll_t res = 0;
40
41 poll_wait(filp, &fh->wait, poll);
42 if (!cec_is_registered(adap))
43 return EPOLLERR | EPOLLHUP;
44 mutex_lock(&adap->lock);
45 if (adap->is_configured &&
46 adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
47 res |= EPOLLOUT | EPOLLWRNORM;
48 if (fh->queued_msgs)
49 res |= EPOLLIN | EPOLLRDNORM;
50 if (fh->total_queued_events)
51 res |= EPOLLPRI;
52 mutex_unlock(&adap->lock);
53 return res;
54}
55
56static bool cec_is_busy(const struct cec_adapter *adap,
57 const struct cec_fh *fh)
58{
59 bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
60 bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
61
62
63
64
65 if (valid_initiator || valid_follower)
66 return false;
67
68
69
70
71 return adap->cec_initiator ||
72 fh->mode_initiator == CEC_MODE_NO_INITIATOR;
73}
74
75static long cec_adap_g_caps(struct cec_adapter *adap,
76 struct cec_caps __user *parg)
77{
78 struct cec_caps caps = {};
79
80 strscpy(caps.driver, adap->devnode.dev.parent->driver->name,
81 sizeof(caps.driver));
82 strscpy(caps.name, adap->name, sizeof(caps.name));
83 caps.available_log_addrs = adap->available_log_addrs;
84 caps.capabilities = adap->capabilities;
85 caps.version = LINUX_VERSION_CODE;
86 if (copy_to_user(parg, &caps, sizeof(caps)))
87 return -EFAULT;
88 return 0;
89}
90
91static long cec_adap_g_phys_addr(struct cec_adapter *adap,
92 __u16 __user *parg)
93{
94 u16 phys_addr;
95
96 mutex_lock(&adap->lock);
97 phys_addr = adap->phys_addr;
98 mutex_unlock(&adap->lock);
99 if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
100 return -EFAULT;
101 return 0;
102}
103
104static int cec_validate_phys_addr(u16 phys_addr)
105{
106 int i;
107
108 if (phys_addr == CEC_PHYS_ADDR_INVALID)
109 return 0;
110 for (i = 0; i < 16; i += 4)
111 if (phys_addr & (0xf << i))
112 break;
113 if (i == 16)
114 return 0;
115 for (i += 4; i < 16; i += 4)
116 if ((phys_addr & (0xf << i)) == 0)
117 return -EINVAL;
118 return 0;
119}
120
121static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
122 bool block, __u16 __user *parg)
123{
124 u16 phys_addr;
125 long err;
126
127 if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
128 return -ENOTTY;
129 if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
130 return -EFAULT;
131
132 err = cec_validate_phys_addr(phys_addr);
133 if (err)
134 return err;
135 mutex_lock(&adap->lock);
136 if (cec_is_busy(adap, fh))
137 err = -EBUSY;
138 else
139 __cec_s_phys_addr(adap, phys_addr, block);
140 mutex_unlock(&adap->lock);
141 return err;
142}
143
144static long cec_adap_g_log_addrs(struct cec_adapter *adap,
145 struct cec_log_addrs __user *parg)
146{
147 struct cec_log_addrs log_addrs;
148
149 mutex_lock(&adap->lock);
150 log_addrs = adap->log_addrs;
151 if (!adap->is_configured)
152 memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
153 sizeof(log_addrs.log_addr));
154 mutex_unlock(&adap->lock);
155
156 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
157 return -EFAULT;
158 return 0;
159}
160
161static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
162 bool block, struct cec_log_addrs __user *parg)
163{
164 struct cec_log_addrs log_addrs;
165 long err = -EBUSY;
166
167 if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
168 return -ENOTTY;
169 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
170 return -EFAULT;
171 log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
172 CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
173 CEC_LOG_ADDRS_FL_CDC_ONLY;
174 mutex_lock(&adap->lock);
175 if (!adap->is_configuring &&
176 (!log_addrs.num_log_addrs || !adap->is_configured) &&
177 !cec_is_busy(adap, fh)) {
178 err = __cec_s_log_addrs(adap, &log_addrs, block);
179 if (!err)
180 log_addrs = adap->log_addrs;
181 }
182 mutex_unlock(&adap->lock);
183 if (err)
184 return err;
185 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
186 return -EFAULT;
187 return 0;
188}
189
190static long cec_adap_g_connector_info(struct cec_adapter *adap,
191 struct cec_log_addrs __user *parg)
192{
193 int ret = 0;
194
195 if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
196 return -ENOTTY;
197
198 mutex_lock(&adap->lock);
199 if (copy_to_user(parg, &adap->conn_info, sizeof(adap->conn_info)))
200 ret = -EFAULT;
201 mutex_unlock(&adap->lock);
202 return ret;
203}
204
205static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
206 bool block, struct cec_msg __user *parg)
207{
208 struct cec_msg msg = {};
209 long err = 0;
210
211 if (!(adap->capabilities & CEC_CAP_TRANSMIT))
212 return -ENOTTY;
213 if (copy_from_user(&msg, parg, sizeof(msg)))
214 return -EFAULT;
215
216 mutex_lock(&adap->lock);
217 if (adap->log_addrs.num_log_addrs == 0)
218 err = -EPERM;
219 else if (adap->is_configuring)
220 err = -ENONET;
221 else if (cec_is_busy(adap, fh))
222 err = -EBUSY;
223 else
224 err = cec_transmit_msg_fh(adap, &msg, fh, block);
225 mutex_unlock(&adap->lock);
226 if (err)
227 return err;
228 if (copy_to_user(parg, &msg, sizeof(msg)))
229 return -EFAULT;
230 return 0;
231}
232
233
234static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
235{
236 u32 timeout = msg->timeout;
237 int res;
238
239 do {
240 mutex_lock(&fh->lock);
241
242 if (fh->queued_msgs) {
243
244 struct cec_msg_entry *entry =
245 list_first_entry(&fh->msgs,
246 struct cec_msg_entry, list);
247
248 list_del(&entry->list);
249 *msg = entry->msg;
250 kfree(entry);
251 fh->queued_msgs--;
252 mutex_unlock(&fh->lock);
253
254 msg->timeout = timeout;
255 return 0;
256 }
257
258
259 mutex_unlock(&fh->lock);
260
261
262 if (!block)
263 return -EAGAIN;
264
265 if (msg->timeout) {
266
267 res = wait_event_interruptible_timeout(fh->wait,
268 fh->queued_msgs,
269 msecs_to_jiffies(msg->timeout));
270 if (res == 0)
271 res = -ETIMEDOUT;
272 else if (res > 0)
273 res = 0;
274 } else {
275
276 res = wait_event_interruptible(fh->wait,
277 fh->queued_msgs);
278 }
279
280 } while (!res);
281 return res;
282}
283
284static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
285 bool block, struct cec_msg __user *parg)
286{
287 struct cec_msg msg = {};
288 long err;
289
290 if (copy_from_user(&msg, parg, sizeof(msg)))
291 return -EFAULT;
292
293 err = cec_receive_msg(fh, &msg, block);
294 if (err)
295 return err;
296 msg.flags = 0;
297 if (copy_to_user(parg, &msg, sizeof(msg)))
298 return -EFAULT;
299 return 0;
300}
301
302static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
303 bool block, struct cec_event __user *parg)
304{
305 struct cec_event_entry *ev = NULL;
306 u64 ts = ~0ULL;
307 unsigned int i;
308 unsigned int ev_idx;
309 long err = 0;
310
311 mutex_lock(&fh->lock);
312 while (!fh->total_queued_events && block) {
313 mutex_unlock(&fh->lock);
314 err = wait_event_interruptible(fh->wait,
315 fh->total_queued_events);
316 if (err)
317 return err;
318 mutex_lock(&fh->lock);
319 }
320
321
322 for (i = 0; i < CEC_NUM_EVENTS; i++) {
323 struct cec_event_entry *entry =
324 list_first_entry_or_null(&fh->events[i],
325 struct cec_event_entry, list);
326
327 if (entry && entry->ev.ts <= ts) {
328 ev = entry;
329 ev_idx = i;
330 ts = ev->ev.ts;
331 }
332 }
333
334 if (!ev) {
335 err = -EAGAIN;
336 goto unlock;
337 }
338 list_del(&ev->list);
339
340 if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
341 err = -EFAULT;
342 if (ev_idx >= CEC_NUM_CORE_EVENTS)
343 kfree(ev);
344 fh->queued_events[ev_idx]--;
345 fh->total_queued_events--;
346
347unlock:
348 mutex_unlock(&fh->lock);
349 return err;
350}
351
352static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
353 u32 __user *parg)
354{
355 u32 mode = fh->mode_initiator | fh->mode_follower;
356
357 if (copy_to_user(parg, &mode, sizeof(mode)))
358 return -EFAULT;
359 return 0;
360}
361
362static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
363 u32 __user *parg)
364{
365 u32 mode;
366 u8 mode_initiator;
367 u8 mode_follower;
368 bool send_pin_event = false;
369 long err = 0;
370
371 if (copy_from_user(&mode, parg, sizeof(mode)))
372 return -EFAULT;
373 if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
374 dprintk(1, "%s: invalid mode bits set\n", __func__);
375 return -EINVAL;
376 }
377
378 mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
379 mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
380
381 if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
382 mode_follower > CEC_MODE_MONITOR_ALL) {
383 dprintk(1, "%s: unknown mode\n", __func__);
384 return -EINVAL;
385 }
386
387 if (mode_follower == CEC_MODE_MONITOR_ALL &&
388 !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
389 dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
390 return -EINVAL;
391 }
392
393 if (mode_follower == CEC_MODE_MONITOR_PIN &&
394 !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
395 dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
396 return -EINVAL;
397 }
398
399
400 if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
401 !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
402 mode_follower >= CEC_MODE_FOLLOWER &&
403 mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
404 dprintk(1, "%s: cannot transmit\n", __func__);
405 return -EINVAL;
406 }
407
408
409 if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
410 dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
411 __func__);
412 return -EINVAL;
413 }
414
415
416 if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
417 return -EPERM;
418
419 mutex_lock(&adap->lock);
420
421
422
423
424 if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
425 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
426 adap->cec_follower && adap->cec_follower != fh)
427 err = -EBUSY;
428
429
430
431
432 if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
433 adap->cec_initiator && adap->cec_initiator != fh)
434 err = -EBUSY;
435
436 if (!err) {
437 bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
438 bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
439
440 if (old_mon_all != new_mon_all) {
441 if (new_mon_all)
442 err = cec_monitor_all_cnt_inc(adap);
443 else
444 cec_monitor_all_cnt_dec(adap);
445 }
446 }
447
448 if (!err) {
449 bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
450 bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
451
452 if (old_mon_pin != new_mon_pin) {
453 send_pin_event = new_mon_pin;
454 if (new_mon_pin)
455 err = cec_monitor_pin_cnt_inc(adap);
456 else
457 cec_monitor_pin_cnt_dec(adap);
458 }
459 }
460
461 if (err) {
462 mutex_unlock(&adap->lock);
463 return err;
464 }
465
466 if (fh->mode_follower == CEC_MODE_FOLLOWER)
467 adap->follower_cnt--;
468 if (mode_follower == CEC_MODE_FOLLOWER)
469 adap->follower_cnt++;
470 if (send_pin_event) {
471 struct cec_event ev = {
472 .flags = CEC_EVENT_FL_INITIAL_STATE,
473 };
474
475 ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
476 CEC_EVENT_PIN_CEC_LOW;
477 cec_queue_event_fh(fh, &ev, 0);
478 }
479 if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
480 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
481 adap->passthrough =
482 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
483 adap->cec_follower = fh;
484 } else if (adap->cec_follower == fh) {
485 adap->passthrough = false;
486 adap->cec_follower = NULL;
487 }
488 if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
489 adap->cec_initiator = fh;
490 else if (adap->cec_initiator == fh)
491 adap->cec_initiator = NULL;
492 fh->mode_initiator = mode_initiator;
493 fh->mode_follower = mode_follower;
494 mutex_unlock(&adap->lock);
495 return 0;
496}
497
498static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
499{
500 struct cec_fh *fh = filp->private_data;
501 struct cec_adapter *adap = fh->adap;
502 bool block = !(filp->f_flags & O_NONBLOCK);
503 void __user *parg = (void __user *)arg;
504
505 if (!cec_is_registered(adap))
506 return -ENODEV;
507
508 switch (cmd) {
509 case CEC_ADAP_G_CAPS:
510 return cec_adap_g_caps(adap, parg);
511
512 case CEC_ADAP_G_PHYS_ADDR:
513 return cec_adap_g_phys_addr(adap, parg);
514
515 case CEC_ADAP_S_PHYS_ADDR:
516 return cec_adap_s_phys_addr(adap, fh, block, parg);
517
518 case CEC_ADAP_G_LOG_ADDRS:
519 return cec_adap_g_log_addrs(adap, parg);
520
521 case CEC_ADAP_S_LOG_ADDRS:
522 return cec_adap_s_log_addrs(adap, fh, block, parg);
523
524 case CEC_ADAP_G_CONNECTOR_INFO:
525 return cec_adap_g_connector_info(adap, parg);
526
527 case CEC_TRANSMIT:
528 return cec_transmit(adap, fh, block, parg);
529
530 case CEC_RECEIVE:
531 return cec_receive(adap, fh, block, parg);
532
533 case CEC_DQEVENT:
534 return cec_dqevent(adap, fh, block, parg);
535
536 case CEC_G_MODE:
537 return cec_g_mode(adap, fh, parg);
538
539 case CEC_S_MODE:
540 return cec_s_mode(adap, fh, parg);
541
542 default:
543 return -ENOTTY;
544 }
545}
546
547static int cec_open(struct inode *inode, struct file *filp)
548{
549 struct cec_devnode *devnode =
550 container_of(inode->i_cdev, struct cec_devnode, cdev);
551 struct cec_adapter *adap = to_cec_adapter(devnode);
552 struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
553
554
555
556
557 struct cec_event ev = {
558 .event = CEC_EVENT_STATE_CHANGE,
559 .flags = CEC_EVENT_FL_INITIAL_STATE,
560 };
561 unsigned int i;
562 int err;
563
564 if (!fh)
565 return -ENOMEM;
566
567 INIT_LIST_HEAD(&fh->msgs);
568 INIT_LIST_HEAD(&fh->xfer_list);
569 for (i = 0; i < CEC_NUM_EVENTS; i++)
570 INIT_LIST_HEAD(&fh->events[i]);
571 mutex_init(&fh->lock);
572 init_waitqueue_head(&fh->wait);
573
574 fh->mode_initiator = CEC_MODE_INITIATOR;
575 fh->adap = adap;
576
577 err = cec_get_device(devnode);
578 if (err) {
579 kfree(fh);
580 return err;
581 }
582
583 mutex_lock(&devnode->lock);
584 if (list_empty(&devnode->fhs) &&
585 !adap->needs_hpd &&
586 adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
587 err = adap->ops->adap_enable(adap, true);
588 if (err) {
589 mutex_unlock(&devnode->lock);
590 kfree(fh);
591 return err;
592 }
593 }
594 filp->private_data = fh;
595
596
597 ev.state_change.phys_addr = adap->phys_addr;
598 ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
599 ev.state_change.have_conn_info =
600 adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
601 cec_queue_event_fh(fh, &ev, 0);
602#ifdef CONFIG_CEC_PIN
603 if (adap->pin && adap->pin->ops->read_hpd) {
604 err = adap->pin->ops->read_hpd(adap);
605 if (err >= 0) {
606 ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
607 CEC_EVENT_PIN_HPD_LOW;
608 cec_queue_event_fh(fh, &ev, 0);
609 }
610 }
611 if (adap->pin && adap->pin->ops->read_5v) {
612 err = adap->pin->ops->read_5v(adap);
613 if (err >= 0) {
614 ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
615 CEC_EVENT_PIN_5V_LOW;
616 cec_queue_event_fh(fh, &ev, 0);
617 }
618 }
619#endif
620
621 list_add(&fh->list, &devnode->fhs);
622 mutex_unlock(&devnode->lock);
623
624 return 0;
625}
626
627
628static int cec_release(struct inode *inode, struct file *filp)
629{
630 struct cec_devnode *devnode = cec_devnode_data(filp);
631 struct cec_adapter *adap = to_cec_adapter(devnode);
632 struct cec_fh *fh = filp->private_data;
633 unsigned int i;
634
635 mutex_lock(&adap->lock);
636 if (adap->cec_initiator == fh)
637 adap->cec_initiator = NULL;
638 if (adap->cec_follower == fh) {
639 adap->cec_follower = NULL;
640 adap->passthrough = false;
641 }
642 if (fh->mode_follower == CEC_MODE_FOLLOWER)
643 adap->follower_cnt--;
644 if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
645 cec_monitor_pin_cnt_dec(adap);
646 if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
647 cec_monitor_all_cnt_dec(adap);
648 mutex_unlock(&adap->lock);
649
650 mutex_lock(&devnode->lock);
651 list_del(&fh->list);
652 if (cec_is_registered(adap) && list_empty(&devnode->fhs) &&
653 !adap->needs_hpd && adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
654 WARN_ON(adap->ops->adap_enable(adap, false));
655 }
656 mutex_unlock(&devnode->lock);
657
658
659 mutex_lock(&adap->lock);
660 while (!list_empty(&fh->xfer_list)) {
661 struct cec_data *data =
662 list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
663
664 data->blocking = false;
665 data->fh = NULL;
666 list_del(&data->xfer_list);
667 }
668 mutex_unlock(&adap->lock);
669 while (!list_empty(&fh->msgs)) {
670 struct cec_msg_entry *entry =
671 list_first_entry(&fh->msgs, struct cec_msg_entry, list);
672
673 list_del(&entry->list);
674 kfree(entry);
675 }
676 for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
677 while (!list_empty(&fh->events[i])) {
678 struct cec_event_entry *entry =
679 list_first_entry(&fh->events[i],
680 struct cec_event_entry, list);
681
682 list_del(&entry->list);
683 kfree(entry);
684 }
685 }
686 kfree(fh);
687
688 cec_put_device(devnode);
689 filp->private_data = NULL;
690 return 0;
691}
692
693const struct file_operations cec_devnode_fops = {
694 .owner = THIS_MODULE,
695 .open = cec_open,
696 .unlocked_ioctl = cec_ioctl,
697 .compat_ioctl = cec_ioctl,
698 .release = cec_release,
699 .poll = cec_poll,
700 .llseek = no_llseek,
701};
702