1
2
3
4
5
6
7
8#include <linux/errno.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/kmod.h>
13#include <linux/ktime.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/string.h>
17#include <linux/types.h>
18
19#include <drm/drm_edid.h>
20
21#include "cec-priv.h"
22
23static void cec_fill_msg_report_features(struct cec_adapter *adap,
24 struct cec_msg *msg,
25 unsigned int la_idx);
26
27
28
29
30
31
32
33
34
35
36
37#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
38
39#define call_op(adap, op, arg...) \
40 (adap->ops->op ? adap->ops->op(adap, ## arg) : 0)
41
42#define call_void_op(adap, op, arg...) \
43 do { \
44 if (adap->ops->op) \
45 adap->ops->op(adap, ## arg); \
46 } while (0)
47
48static int cec_log_addr2idx(const struct cec_adapter *adap, u8 log_addr)
49{
50 int i;
51
52 for (i = 0; i < adap->log_addrs.num_log_addrs; i++)
53 if (adap->log_addrs.log_addr[i] == log_addr)
54 return i;
55 return -1;
56}
57
58static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr)
59{
60 int i = cec_log_addr2idx(adap, log_addr);
61
62 return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
63}
64
65
66
67
68
69
70
71
72
73void cec_queue_event_fh(struct cec_fh *fh,
74 const struct cec_event *new_ev, u64 ts)
75{
76 static const u16 max_events[CEC_NUM_EVENTS] = {
77 1, 1, 800, 800, 8, 8,
78 };
79 struct cec_event_entry *entry;
80 unsigned int ev_idx = new_ev->event - 1;
81
82 if (WARN_ON(ev_idx >= ARRAY_SIZE(fh->events)))
83 return;
84
85 if (ts == 0)
86 ts = ktime_get_ns();
87
88 mutex_lock(&fh->lock);
89 if (ev_idx < CEC_NUM_CORE_EVENTS)
90 entry = &fh->core_events[ev_idx];
91 else
92 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
93 if (entry) {
94 if (new_ev->event == CEC_EVENT_LOST_MSGS &&
95 fh->queued_events[ev_idx]) {
96 entry->ev.lost_msgs.lost_msgs +=
97 new_ev->lost_msgs.lost_msgs;
98 goto unlock;
99 }
100 entry->ev = *new_ev;
101 entry->ev.ts = ts;
102
103 if (fh->queued_events[ev_idx] < max_events[ev_idx]) {
104
105 list_add_tail(&entry->list, &fh->events[ev_idx]);
106 fh->queued_events[ev_idx]++;
107 fh->total_queued_events++;
108 goto unlock;
109 }
110
111 if (ev_idx >= CEC_NUM_CORE_EVENTS) {
112 list_add_tail(&entry->list, &fh->events[ev_idx]);
113
114 entry = list_first_entry(&fh->events[ev_idx],
115 struct cec_event_entry, list);
116 list_del(&entry->list);
117 kfree(entry);
118 }
119 }
120
121 entry = list_first_entry_or_null(&fh->events[ev_idx],
122 struct cec_event_entry, list);
123 if (entry)
124 entry->ev.flags |= CEC_EVENT_FL_DROPPED_EVENTS;
125
126unlock:
127 mutex_unlock(&fh->lock);
128 wake_up_interruptible(&fh->wait);
129}
130
131
132static void cec_queue_event(struct cec_adapter *adap,
133 const struct cec_event *ev)
134{
135 u64 ts = ktime_get_ns();
136 struct cec_fh *fh;
137
138 mutex_lock(&adap->devnode.lock);
139 list_for_each_entry(fh, &adap->devnode.fhs, list)
140 cec_queue_event_fh(fh, ev, ts);
141 mutex_unlock(&adap->devnode.lock);
142}
143
144
145void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high,
146 bool dropped_events, ktime_t ts)
147{
148 struct cec_event ev = {
149 .event = is_high ? CEC_EVENT_PIN_CEC_HIGH :
150 CEC_EVENT_PIN_CEC_LOW,
151 .flags = dropped_events ? CEC_EVENT_FL_DROPPED_EVENTS : 0,
152 };
153 struct cec_fh *fh;
154
155 mutex_lock(&adap->devnode.lock);
156 list_for_each_entry(fh, &adap->devnode.fhs, list)
157 if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
158 cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
159 mutex_unlock(&adap->devnode.lock);
160}
161EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event);
162
163
164void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
165{
166 struct cec_event ev = {
167 .event = is_high ? CEC_EVENT_PIN_HPD_HIGH :
168 CEC_EVENT_PIN_HPD_LOW,
169 };
170 struct cec_fh *fh;
171
172 mutex_lock(&adap->devnode.lock);
173 list_for_each_entry(fh, &adap->devnode.fhs, list)
174 cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
175 mutex_unlock(&adap->devnode.lock);
176}
177EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event);
178
179
180
181
182
183
184
185
186static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg)
187{
188 static const struct cec_event ev_lost_msgs = {
189 .event = CEC_EVENT_LOST_MSGS,
190 .flags = 0,
191 {
192 .lost_msgs = { 1 },
193 },
194 };
195 struct cec_msg_entry *entry;
196
197 mutex_lock(&fh->lock);
198 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
199 if (entry) {
200 entry->msg = *msg;
201
202 list_add_tail(&entry->list, &fh->msgs);
203
204 if (fh->queued_msgs < CEC_MAX_MSG_RX_QUEUE_SZ) {
205
206 fh->queued_msgs++;
207 mutex_unlock(&fh->lock);
208 wake_up_interruptible(&fh->wait);
209 return;
210 }
211
212
213
214
215
216 entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list);
217 list_del(&entry->list);
218 kfree(entry);
219 }
220 mutex_unlock(&fh->lock);
221
222
223
224
225
226 cec_queue_event_fh(fh, &ev_lost_msgs, ktime_get_ns());
227}
228
229
230
231
232
233
234
235
236
237
238
239static void cec_queue_msg_monitor(struct cec_adapter *adap,
240 const struct cec_msg *msg,
241 bool valid_la)
242{
243 struct cec_fh *fh;
244 u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
245 CEC_MODE_MONITOR_ALL;
246
247 mutex_lock(&adap->devnode.lock);
248 list_for_each_entry(fh, &adap->devnode.fhs, list) {
249 if (fh->mode_follower >= monitor_mode)
250 cec_queue_msg_fh(fh, msg);
251 }
252 mutex_unlock(&adap->devnode.lock);
253}
254
255
256
257
258static void cec_queue_msg_followers(struct cec_adapter *adap,
259 const struct cec_msg *msg)
260{
261 struct cec_fh *fh;
262
263 mutex_lock(&adap->devnode.lock);
264 list_for_each_entry(fh, &adap->devnode.fhs, list) {
265 if (fh->mode_follower == CEC_MODE_FOLLOWER)
266 cec_queue_msg_fh(fh, msg);
267 }
268 mutex_unlock(&adap->devnode.lock);
269}
270
271
272static void cec_post_state_event(struct cec_adapter *adap)
273{
274 struct cec_event ev = {
275 .event = CEC_EVENT_STATE_CHANGE,
276 };
277
278 ev.state_change.phys_addr = adap->phys_addr;
279 ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
280 cec_queue_event(adap, &ev);
281}
282
283
284
285
286
287
288
289
290static void cec_data_completed(struct cec_data *data)
291{
292
293
294
295
296
297
298
299
300 if (data->fh)
301 list_del(&data->xfer_list);
302
303 if (data->blocking) {
304
305
306
307
308 data->completed = true;
309 complete(&data->c);
310 } else {
311
312
313
314
315 if (data->fh)
316 cec_queue_msg_fh(data->fh, &data->msg);
317 kfree(data);
318 }
319}
320
321
322
323
324
325
326
327
328static void cec_data_cancel(struct cec_data *data)
329{
330
331
332
333
334 if (data->adap->transmitting == data) {
335 data->adap->transmitting = NULL;
336 } else {
337 list_del_init(&data->list);
338 if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
339 data->adap->transmit_queue_sz--;
340 }
341
342 if (data->msg.tx_status & CEC_TX_STATUS_OK) {
343
344 data->msg.rx_ts = ktime_get_ns();
345 data->msg.rx_status = CEC_RX_STATUS_TIMEOUT;
346 } else {
347
348 data->msg.tx_ts = ktime_get_ns();
349 data->msg.tx_status |= CEC_TX_STATUS_ERROR |
350 CEC_TX_STATUS_MAX_RETRIES;
351 data->msg.tx_error_cnt++;
352 data->attempts = 0;
353 }
354
355
356 cec_queue_msg_monitor(data->adap, &data->msg, 1);
357
358 cec_data_completed(data);
359}
360
361
362
363
364
365
366static void cec_flush(struct cec_adapter *adap)
367{
368 struct cec_data *data, *n;
369
370
371
372
373
374 while (!list_empty(&adap->transmit_queue)) {
375 data = list_first_entry(&adap->transmit_queue,
376 struct cec_data, list);
377 cec_data_cancel(data);
378 }
379 if (adap->transmitting)
380 cec_data_cancel(adap->transmitting);
381
382
383 list_for_each_entry_safe(data, n, &adap->wait_queue, list) {
384 if (cancel_delayed_work(&data->work))
385 cec_data_cancel(data);
386
387
388
389
390
391
392 }
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407int cec_thread_func(void *_adap)
408{
409 struct cec_adapter *adap = _adap;
410
411 for (;;) {
412 unsigned int signal_free_time;
413 struct cec_data *data;
414 bool timeout = false;
415 u8 attempts;
416
417 if (adap->transmitting) {
418 int err;
419
420
421
422
423
424
425
426
427 err = wait_event_interruptible_timeout(adap->kthread_waitq,
428 (adap->needs_hpd &&
429 (!adap->is_configured && !adap->is_configuring)) ||
430 kthread_should_stop() ||
431 (!adap->transmitting &&
432 !list_empty(&adap->transmit_queue)),
433 msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
434 timeout = err == 0;
435 } else {
436
437 wait_event_interruptible(adap->kthread_waitq,
438 kthread_should_stop() ||
439 (!adap->transmitting &&
440 !list_empty(&adap->transmit_queue)));
441 }
442
443 mutex_lock(&adap->lock);
444
445 if ((adap->needs_hpd &&
446 (!adap->is_configured && !adap->is_configuring)) ||
447 kthread_should_stop()) {
448 cec_flush(adap);
449 goto unlock;
450 }
451
452 if (adap->transmitting && timeout) {
453
454
455
456
457
458
459
460
461 dprintk(1, "%s: message %*ph timed out\n", __func__,
462 adap->transmitting->msg.len,
463 adap->transmitting->msg.msg);
464 adap->tx_timeouts++;
465
466 cec_data_cancel(adap->transmitting);
467 goto unlock;
468 }
469
470
471
472
473
474 if (adap->transmitting || list_empty(&adap->transmit_queue))
475 goto unlock;
476
477
478 data = list_first_entry(&adap->transmit_queue,
479 struct cec_data, list);
480 list_del_init(&data->list);
481 adap->transmit_queue_sz--;
482
483
484 adap->transmitting = data;
485
486
487
488
489
490
491
492 if (data->msg.len == 1 && adap->is_configured)
493 attempts = 2;
494 else
495 attempts = 4;
496
497
498 if (data->attempts) {
499
500 signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY;
501 } else if (data->new_initiator) {
502
503 signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
504 } else {
505
506
507
508
509 signal_free_time = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
510 }
511 if (data->attempts == 0)
512 data->attempts = attempts;
513
514
515 if (adap->ops->adap_transmit(adap, data->attempts,
516 signal_free_time, &data->msg))
517 cec_data_cancel(data);
518
519unlock:
520 mutex_unlock(&adap->lock);
521
522 if (kthread_should_stop())
523 break;
524 }
525 return 0;
526}
527
528
529
530
531void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
532 u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt,
533 u8 error_cnt, ktime_t ts)
534{
535 struct cec_data *data;
536 struct cec_msg *msg;
537 unsigned int attempts_made = arb_lost_cnt + nack_cnt +
538 low_drive_cnt + error_cnt;
539
540 dprintk(2, "%s: status 0x%02x\n", __func__, status);
541 if (attempts_made < 1)
542 attempts_made = 1;
543
544 mutex_lock(&adap->lock);
545 data = adap->transmitting;
546 if (!data) {
547
548
549
550
551
552 dprintk(1, "%s was called without an ongoing transmit!\n",
553 __func__);
554 goto unlock;
555 }
556
557 msg = &data->msg;
558
559
560 WARN_ON(status == 0);
561 msg->tx_ts = ktime_to_ns(ts);
562 msg->tx_status |= status;
563 msg->tx_arb_lost_cnt += arb_lost_cnt;
564 msg->tx_nack_cnt += nack_cnt;
565 msg->tx_low_drive_cnt += low_drive_cnt;
566 msg->tx_error_cnt += error_cnt;
567
568
569 adap->transmitting = NULL;
570
571
572
573
574
575
576 if (data->attempts > attempts_made &&
577 !(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) {
578
579 data->attempts -= attempts_made;
580 if (msg->timeout)
581 dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n",
582 msg->len, msg->msg, data->attempts, msg->reply);
583 else
584 dprintk(2, "retransmit: %*ph (attempts: %d)\n",
585 msg->len, msg->msg, data->attempts);
586
587 list_add(&data->list, &adap->transmit_queue);
588 adap->transmit_queue_sz++;
589 goto wake_thread;
590 }
591
592 data->attempts = 0;
593
594
595 if (!(status & CEC_TX_STATUS_OK))
596 msg->tx_status |= CEC_TX_STATUS_MAX_RETRIES;
597
598
599 cec_queue_msg_monitor(adap, msg, 1);
600
601 if ((status & CEC_TX_STATUS_OK) && adap->is_configured &&
602 msg->timeout) {
603
604
605
606
607 list_add_tail(&data->list, &adap->wait_queue);
608 schedule_delayed_work(&data->work,
609 msecs_to_jiffies(msg->timeout));
610 } else {
611
612 cec_data_completed(data);
613 }
614
615wake_thread:
616
617
618
619
620 wake_up_interruptible(&adap->kthread_waitq);
621unlock:
622 mutex_unlock(&adap->lock);
623}
624EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
625
626void cec_transmit_attempt_done_ts(struct cec_adapter *adap,
627 u8 status, ktime_t ts)
628{
629 switch (status & ~CEC_TX_STATUS_MAX_RETRIES) {
630 case CEC_TX_STATUS_OK:
631 cec_transmit_done_ts(adap, status, 0, 0, 0, 0, ts);
632 return;
633 case CEC_TX_STATUS_ARB_LOST:
634 cec_transmit_done_ts(adap, status, 1, 0, 0, 0, ts);
635 return;
636 case CEC_TX_STATUS_NACK:
637 cec_transmit_done_ts(adap, status, 0, 1, 0, 0, ts);
638 return;
639 case CEC_TX_STATUS_LOW_DRIVE:
640 cec_transmit_done_ts(adap, status, 0, 0, 1, 0, ts);
641 return;
642 case CEC_TX_STATUS_ERROR:
643 cec_transmit_done_ts(adap, status, 0, 0, 0, 1, ts);
644 return;
645 default:
646
647 WARN(1, "cec-%s: invalid status 0x%02x\n", adap->name, status);
648 return;
649 }
650}
651EXPORT_SYMBOL_GPL(cec_transmit_attempt_done_ts);
652
653
654
655
656static void cec_wait_timeout(struct work_struct *work)
657{
658 struct cec_data *data = container_of(work, struct cec_data, work.work);
659 struct cec_adapter *adap = data->adap;
660
661 mutex_lock(&adap->lock);
662
663
664
665
666 if (list_empty(&data->list))
667 goto unlock;
668
669
670 list_del_init(&data->list);
671 data->msg.rx_ts = ktime_get_ns();
672 data->msg.rx_status = CEC_RX_STATUS_TIMEOUT;
673 cec_data_completed(data);
674unlock:
675 mutex_unlock(&adap->lock);
676}
677
678
679
680
681
682
683
684int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
685 struct cec_fh *fh, bool block)
686{
687 struct cec_data *data;
688 u8 last_initiator = 0xff;
689 unsigned int timeout;
690 int res = 0;
691
692 msg->rx_ts = 0;
693 msg->tx_ts = 0;
694 msg->rx_status = 0;
695 msg->tx_status = 0;
696 msg->tx_arb_lost_cnt = 0;
697 msg->tx_nack_cnt = 0;
698 msg->tx_low_drive_cnt = 0;
699 msg->tx_error_cnt = 0;
700 msg->sequence = 0;
701
702 if (msg->reply && msg->timeout == 0) {
703
704 msg->timeout = 1000;
705 }
706 if (msg->timeout)
707 msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS;
708 else
709 msg->flags = 0;
710
711 if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
712 msg->msg[2] = adap->phys_addr >> 8;
713 msg->msg[3] = adap->phys_addr & 0xff;
714 }
715
716
717 if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
718 dprintk(1, "%s: invalid length %d\n", __func__, msg->len);
719 return -EINVAL;
720 }
721
722 memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
723
724 if (msg->timeout)
725 dprintk(2, "%s: %*ph (wait for 0x%02x%s)\n",
726 __func__, msg->len, msg->msg, msg->reply,
727 !block ? ", nb" : "");
728 else
729 dprintk(2, "%s: %*ph%s\n",
730 __func__, msg->len, msg->msg, !block ? " (nb)" : "");
731
732 if (msg->timeout && msg->len == 1) {
733 dprintk(1, "%s: can't reply to poll msg\n", __func__);
734 return -EINVAL;
735 }
736 if (msg->len == 1) {
737 if (cec_msg_destination(msg) == 0xf) {
738 dprintk(1, "%s: invalid poll message\n", __func__);
739 return -EINVAL;
740 }
741 if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
742
743
744
745
746
747
748
749
750 msg->tx_ts = ktime_get_ns();
751 msg->tx_status = CEC_TX_STATUS_NACK |
752 CEC_TX_STATUS_MAX_RETRIES;
753 msg->tx_nack_cnt = 1;
754 msg->sequence = ++adap->sequence;
755 if (!msg->sequence)
756 msg->sequence = ++adap->sequence;
757 return 0;
758 }
759 }
760 if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
761 cec_has_log_addr(adap, cec_msg_destination(msg))) {
762 dprintk(1, "%s: destination is the adapter itself\n", __func__);
763 return -EINVAL;
764 }
765 if (msg->len > 1 && adap->is_configured &&
766 !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
767 dprintk(1, "%s: initiator has unknown logical address %d\n",
768 __func__, cec_msg_initiator(msg));
769 return -EINVAL;
770 }
771 if (!adap->is_configured && !adap->is_configuring) {
772 if (adap->needs_hpd || msg->msg[0] != 0xf0) {
773 dprintk(1, "%s: adapter is unconfigured\n", __func__);
774 return -ENONET;
775 }
776 if (msg->reply) {
777 dprintk(1, "%s: invalid msg->reply\n", __func__);
778 return -EINVAL;
779 }
780 }
781
782 if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
783 dprintk(1, "%s: transmit queue full\n", __func__);
784 return -EBUSY;
785 }
786
787 data = kzalloc(sizeof(*data), GFP_KERNEL);
788 if (!data)
789 return -ENOMEM;
790
791 msg->sequence = ++adap->sequence;
792 if (!msg->sequence)
793 msg->sequence = ++adap->sequence;
794
795 data->msg = *msg;
796 data->fh = fh;
797 data->adap = adap;
798 data->blocking = block;
799
800
801
802
803
804 if (msg->len > 1) {
805 if (!(list_empty(&adap->transmit_queue))) {
806 const struct cec_data *last;
807
808 last = list_last_entry(&adap->transmit_queue,
809 const struct cec_data, list);
810 last_initiator = cec_msg_initiator(&last->msg);
811 } else if (adap->transmitting) {
812 last_initiator =
813 cec_msg_initiator(&adap->transmitting->msg);
814 }
815 }
816 data->new_initiator = last_initiator != cec_msg_initiator(msg);
817 init_completion(&data->c);
818 INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
819
820 if (fh)
821 list_add_tail(&data->xfer_list, &fh->xfer_list);
822
823 list_add_tail(&data->list, &adap->transmit_queue);
824 adap->transmit_queue_sz++;
825 if (!adap->transmitting)
826 wake_up_interruptible(&adap->kthread_waitq);
827
828
829 if (!block)
830 return 0;
831
832
833
834
835
836 timeout = CEC_XFER_TIMEOUT_MS;
837
838 if (msg->timeout)
839 timeout += msg->timeout;
840
841
842
843
844 mutex_unlock(&adap->lock);
845 res = wait_for_completion_killable_timeout(&data->c,
846 msecs_to_jiffies(timeout));
847 mutex_lock(&adap->lock);
848
849 if (data->completed) {
850
851 *msg = data->msg;
852 kfree(data);
853 return 0;
854 }
855
856
857
858
859
860
861 data->blocking = false;
862 if (data->fh)
863 list_del(&data->xfer_list);
864 data->fh = NULL;
865
866 if (res == 0) {
867
868 if (msg->timeout && (msg->tx_status & CEC_TX_STATUS_OK))
869 msg->rx_status = CEC_RX_STATUS_TIMEOUT;
870 else
871 msg->tx_status = CEC_TX_STATUS_MAX_RETRIES;
872 }
873 return res > 0 ? 0 : res;
874}
875
876
877int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
878 bool block)
879{
880 int ret;
881
882 mutex_lock(&adap->lock);
883 ret = cec_transmit_msg_fh(adap, msg, NULL, block);
884 mutex_unlock(&adap->lock);
885 return ret;
886}
887EXPORT_SYMBOL_GPL(cec_transmit_msg);
888
889
890
891
892
893
894static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
895 bool is_reply);
896
897#define DIRECTED 0x80
898#define BCAST1_4 0x40
899#define BCAST2_0 0x20
900#define BCAST (BCAST1_4 | BCAST2_0)
901#define BOTH (BCAST | DIRECTED)
902
903
904
905
906
907
908static const u8 cec_msg_size[256] = {
909 [CEC_MSG_ACTIVE_SOURCE] = 4 | BCAST,
910 [CEC_MSG_IMAGE_VIEW_ON] = 2 | DIRECTED,
911 [CEC_MSG_TEXT_VIEW_ON] = 2 | DIRECTED,
912 [CEC_MSG_INACTIVE_SOURCE] = 4 | DIRECTED,
913 [CEC_MSG_REQUEST_ACTIVE_SOURCE] = 2 | BCAST,
914 [CEC_MSG_ROUTING_CHANGE] = 6 | BCAST,
915 [CEC_MSG_ROUTING_INFORMATION] = 4 | BCAST,
916 [CEC_MSG_SET_STREAM_PATH] = 4 | BCAST,
917 [CEC_MSG_STANDBY] = 2 | BOTH,
918 [CEC_MSG_RECORD_OFF] = 2 | DIRECTED,
919 [CEC_MSG_RECORD_ON] = 3 | DIRECTED,
920 [CEC_MSG_RECORD_STATUS] = 3 | DIRECTED,
921 [CEC_MSG_RECORD_TV_SCREEN] = 2 | DIRECTED,
922 [CEC_MSG_CLEAR_ANALOGUE_TIMER] = 13 | DIRECTED,
923 [CEC_MSG_CLEAR_DIGITAL_TIMER] = 16 | DIRECTED,
924 [CEC_MSG_CLEAR_EXT_TIMER] = 13 | DIRECTED,
925 [CEC_MSG_SET_ANALOGUE_TIMER] = 13 | DIRECTED,
926 [CEC_MSG_SET_DIGITAL_TIMER] = 16 | DIRECTED,
927 [CEC_MSG_SET_EXT_TIMER] = 13 | DIRECTED,
928 [CEC_MSG_SET_TIMER_PROGRAM_TITLE] = 2 | DIRECTED,
929 [CEC_MSG_TIMER_CLEARED_STATUS] = 3 | DIRECTED,
930 [CEC_MSG_TIMER_STATUS] = 3 | DIRECTED,
931 [CEC_MSG_CEC_VERSION] = 3 | DIRECTED,
932 [CEC_MSG_GET_CEC_VERSION] = 2 | DIRECTED,
933 [CEC_MSG_GIVE_PHYSICAL_ADDR] = 2 | DIRECTED,
934 [CEC_MSG_GET_MENU_LANGUAGE] = 2 | DIRECTED,
935 [CEC_MSG_REPORT_PHYSICAL_ADDR] = 5 | BCAST,
936 [CEC_MSG_SET_MENU_LANGUAGE] = 5 | BCAST,
937 [CEC_MSG_REPORT_FEATURES] = 6 | BCAST,
938 [CEC_MSG_GIVE_FEATURES] = 2 | DIRECTED,
939 [CEC_MSG_DECK_CONTROL] = 3 | DIRECTED,
940 [CEC_MSG_DECK_STATUS] = 3 | DIRECTED,
941 [CEC_MSG_GIVE_DECK_STATUS] = 3 | DIRECTED,
942 [CEC_MSG_PLAY] = 3 | DIRECTED,
943 [CEC_MSG_GIVE_TUNER_DEVICE_STATUS] = 3 | DIRECTED,
944 [CEC_MSG_SELECT_ANALOGUE_SERVICE] = 6 | DIRECTED,
945 [CEC_MSG_SELECT_DIGITAL_SERVICE] = 9 | DIRECTED,
946 [CEC_MSG_TUNER_DEVICE_STATUS] = 7 | DIRECTED,
947 [CEC_MSG_TUNER_STEP_DECREMENT] = 2 | DIRECTED,
948 [CEC_MSG_TUNER_STEP_INCREMENT] = 2 | DIRECTED,
949 [CEC_MSG_DEVICE_VENDOR_ID] = 5 | BCAST,
950 [CEC_MSG_GIVE_DEVICE_VENDOR_ID] = 2 | DIRECTED,
951 [CEC_MSG_VENDOR_COMMAND] = 2 | DIRECTED,
952 [CEC_MSG_VENDOR_COMMAND_WITH_ID] = 5 | BOTH,
953 [CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN] = 2 | BOTH,
954 [CEC_MSG_VENDOR_REMOTE_BUTTON_UP] = 2 | BOTH,
955 [CEC_MSG_SET_OSD_STRING] = 3 | DIRECTED,
956 [CEC_MSG_GIVE_OSD_NAME] = 2 | DIRECTED,
957 [CEC_MSG_SET_OSD_NAME] = 2 | DIRECTED,
958 [CEC_MSG_MENU_REQUEST] = 3 | DIRECTED,
959 [CEC_MSG_MENU_STATUS] = 3 | DIRECTED,
960 [CEC_MSG_USER_CONTROL_PRESSED] = 3 | DIRECTED,
961 [CEC_MSG_USER_CONTROL_RELEASED] = 2 | DIRECTED,
962 [CEC_MSG_GIVE_DEVICE_POWER_STATUS] = 2 | DIRECTED,
963 [CEC_MSG_REPORT_POWER_STATUS] = 3 | DIRECTED | BCAST2_0,
964 [CEC_MSG_FEATURE_ABORT] = 4 | DIRECTED,
965 [CEC_MSG_ABORT] = 2 | DIRECTED,
966 [CEC_MSG_GIVE_AUDIO_STATUS] = 2 | DIRECTED,
967 [CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS] = 2 | DIRECTED,
968 [CEC_MSG_REPORT_AUDIO_STATUS] = 3 | DIRECTED,
969 [CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
970 [CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
971 [CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH,
972 [CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED,
973 [CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED,
974 [CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED,
975 [CEC_MSG_INITIATE_ARC] = 2 | DIRECTED,
976 [CEC_MSG_REPORT_ARC_INITIATED] = 2 | DIRECTED,
977 [CEC_MSG_REPORT_ARC_TERMINATED] = 2 | DIRECTED,
978 [CEC_MSG_REQUEST_ARC_INITIATION] = 2 | DIRECTED,
979 [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
980 [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
981 [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
982 [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
983 [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
984};
985
986
987void cec_received_msg_ts(struct cec_adapter *adap,
988 struct cec_msg *msg, ktime_t ts)
989{
990 struct cec_data *data;
991 u8 msg_init = cec_msg_initiator(msg);
992 u8 msg_dest = cec_msg_destination(msg);
993 u8 cmd = msg->msg[1];
994 bool is_reply = false;
995 bool valid_la = true;
996 u8 min_len = 0;
997
998 if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
999 return;
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 if (msg_init != CEC_LOG_ADDR_UNREGISTERED &&
1013 cec_has_log_addr(adap, msg_init))
1014 return;
1015
1016 msg->rx_ts = ktime_to_ns(ts);
1017 msg->rx_status = CEC_RX_STATUS_OK;
1018 msg->sequence = msg->reply = msg->timeout = 0;
1019 msg->tx_status = 0;
1020 msg->tx_ts = 0;
1021 msg->tx_arb_lost_cnt = 0;
1022 msg->tx_nack_cnt = 0;
1023 msg->tx_low_drive_cnt = 0;
1024 msg->tx_error_cnt = 0;
1025 msg->flags = 0;
1026 memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
1027
1028 mutex_lock(&adap->lock);
1029 dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
1030
1031
1032 if (!cec_msg_is_broadcast(msg))
1033 valid_la = cec_has_log_addr(adap, msg_dest);
1034
1035
1036
1037
1038
1039
1040
1041 if (valid_la && msg->len > 1 && cec_msg_size[cmd]) {
1042 u8 dir_fl = cec_msg_size[cmd] & BOTH;
1043
1044 min_len = cec_msg_size[cmd] & 0x1f;
1045 if (msg->len < min_len)
1046 valid_la = false;
1047 else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
1048 valid_la = false;
1049 else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
1050 valid_la = false;
1051 else if (cec_msg_is_broadcast(msg) &&
1052 adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
1053 !(dir_fl & BCAST2_0))
1054 valid_la = false;
1055 }
1056 if (valid_la && min_len) {
1057
1058 switch (cmd) {
1059 case CEC_MSG_TIMER_STATUS:
1060 if (msg->msg[2] & 0x10) {
1061 switch (msg->msg[2] & 0xf) {
1062 case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
1063 case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
1064 if (msg->len < 5)
1065 valid_la = false;
1066 break;
1067 }
1068 } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
1069 if (msg->len < 5)
1070 valid_la = false;
1071 }
1072 break;
1073 case CEC_MSG_RECORD_ON:
1074 switch (msg->msg[2]) {
1075 case CEC_OP_RECORD_SRC_OWN:
1076 break;
1077 case CEC_OP_RECORD_SRC_DIGITAL:
1078 if (msg->len < 10)
1079 valid_la = false;
1080 break;
1081 case CEC_OP_RECORD_SRC_ANALOG:
1082 if (msg->len < 7)
1083 valid_la = false;
1084 break;
1085 case CEC_OP_RECORD_SRC_EXT_PLUG:
1086 if (msg->len < 4)
1087 valid_la = false;
1088 break;
1089 case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
1090 if (msg->len < 5)
1091 valid_la = false;
1092 break;
1093 }
1094 break;
1095 }
1096 }
1097
1098
1099 if (valid_la && msg->len > 1 && cmd != CEC_MSG_CDC_MESSAGE) {
1100 bool abort = cmd == CEC_MSG_FEATURE_ABORT;
1101
1102
1103 if (abort)
1104 cmd = msg->msg[2];
1105
1106
1107
1108
1109
1110 list_for_each_entry(data, &adap->wait_queue, list) {
1111 struct cec_msg *dst = &data->msg;
1112
1113
1114
1115
1116
1117
1118 if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC &&
1119 (cmd == CEC_MSG_REPORT_ARC_INITIATED ||
1120 cmd == CEC_MSG_REPORT_ARC_TERMINATED) &&
1121 (dst->reply == CEC_MSG_REPORT_ARC_INITIATED ||
1122 dst->reply == CEC_MSG_REPORT_ARC_TERMINATED))
1123 dst->reply = cmd;
1124
1125
1126 if ((abort && cmd != dst->msg[1]) ||
1127 (!abort && cmd != dst->reply))
1128 continue;
1129
1130
1131 if (msg_init != cec_msg_destination(dst) &&
1132 !cec_msg_is_broadcast(dst))
1133 continue;
1134
1135
1136 memcpy(dst->msg, msg->msg, msg->len);
1137 dst->len = msg->len;
1138 dst->rx_ts = msg->rx_ts;
1139 dst->rx_status = msg->rx_status;
1140 if (abort)
1141 dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
1142 msg->flags = dst->flags;
1143
1144 list_del_init(&data->list);
1145
1146
1147 if (!cancel_delayed_work(&data->work)) {
1148 mutex_unlock(&adap->lock);
1149 flush_scheduled_work();
1150 mutex_lock(&adap->lock);
1151 }
1152
1153
1154
1155
1156 if (data->fh)
1157 is_reply = true;
1158 cec_data_completed(data);
1159 break;
1160 }
1161 }
1162 mutex_unlock(&adap->lock);
1163
1164
1165 cec_queue_msg_monitor(adap, msg, valid_la);
1166
1167
1168 if (!valid_la || msg->len <= 1)
1169 return;
1170
1171 if (adap->log_addrs.log_addr_mask == 0)
1172 return;
1173
1174
1175
1176
1177
1178
1179 cec_receive_notify(adap, msg, is_reply);
1180}
1181EXPORT_SYMBOL_GPL(cec_received_msg_ts);
1182
1183
1184
1185
1186
1187
1188
1189
1190static int cec_config_log_addr(struct cec_adapter *adap,
1191 unsigned int idx,
1192 unsigned int log_addr)
1193{
1194 struct cec_log_addrs *las = &adap->log_addrs;
1195 struct cec_msg msg = { };
1196 int err;
1197
1198 if (cec_has_log_addr(adap, log_addr))
1199 return 0;
1200
1201
1202 msg.len = 1;
1203 msg.msg[0] = (log_addr << 4) | log_addr;
1204 err = cec_transmit_msg_fh(adap, &msg, NULL, true);
1205
1206
1207
1208
1209
1210 if (!adap->is_configuring)
1211 return -EINTR;
1212
1213 if (err)
1214 return err;
1215
1216 if (msg.tx_status & CEC_TX_STATUS_OK)
1217 return 0;
1218
1219
1220
1221
1222
1223 err = adap->ops->adap_log_addr(adap, log_addr);
1224 if (err)
1225 return err;
1226
1227 las->log_addr[idx] = log_addr;
1228 las->log_addr_mask |= 1 << log_addr;
1229 adap->phys_addrs[log_addr] = adap->phys_addr;
1230 return 1;
1231}
1232
1233
1234
1235
1236
1237
1238
1239static void cec_adap_unconfigure(struct cec_adapter *adap)
1240{
1241 if (!adap->needs_hpd ||
1242 adap->phys_addr != CEC_PHYS_ADDR_INVALID)
1243 WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
1244 adap->log_addrs.log_addr_mask = 0;
1245 adap->is_configuring = false;
1246 adap->is_configured = false;
1247 memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
1248 cec_flush(adap);
1249 wake_up_interruptible(&adap->kthread_waitq);
1250 cec_post_state_event(adap);
1251}
1252
1253
1254
1255
1256static int cec_config_thread_func(void *arg)
1257{
1258
1259 static const u8 tv_log_addrs[] = {
1260 CEC_LOG_ADDR_TV, CEC_LOG_ADDR_SPECIFIC,
1261 CEC_LOG_ADDR_INVALID
1262 };
1263 static const u8 record_log_addrs[] = {
1264 CEC_LOG_ADDR_RECORD_1, CEC_LOG_ADDR_RECORD_2,
1265 CEC_LOG_ADDR_RECORD_3,
1266 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
1267 CEC_LOG_ADDR_INVALID
1268 };
1269 static const u8 tuner_log_addrs[] = {
1270 CEC_LOG_ADDR_TUNER_1, CEC_LOG_ADDR_TUNER_2,
1271 CEC_LOG_ADDR_TUNER_3, CEC_LOG_ADDR_TUNER_4,
1272 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
1273 CEC_LOG_ADDR_INVALID
1274 };
1275 static const u8 playback_log_addrs[] = {
1276 CEC_LOG_ADDR_PLAYBACK_1, CEC_LOG_ADDR_PLAYBACK_2,
1277 CEC_LOG_ADDR_PLAYBACK_3,
1278 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
1279 CEC_LOG_ADDR_INVALID
1280 };
1281 static const u8 audiosystem_log_addrs[] = {
1282 CEC_LOG_ADDR_AUDIOSYSTEM,
1283 CEC_LOG_ADDR_INVALID
1284 };
1285 static const u8 specific_use_log_addrs[] = {
1286 CEC_LOG_ADDR_SPECIFIC,
1287 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
1288 CEC_LOG_ADDR_INVALID
1289 };
1290 static const u8 *type2addrs[6] = {
1291 [CEC_LOG_ADDR_TYPE_TV] = tv_log_addrs,
1292 [CEC_LOG_ADDR_TYPE_RECORD] = record_log_addrs,
1293 [CEC_LOG_ADDR_TYPE_TUNER] = tuner_log_addrs,
1294 [CEC_LOG_ADDR_TYPE_PLAYBACK] = playback_log_addrs,
1295 [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = audiosystem_log_addrs,
1296 [CEC_LOG_ADDR_TYPE_SPECIFIC] = specific_use_log_addrs,
1297 };
1298 static const u16 type2mask[] = {
1299 [CEC_LOG_ADDR_TYPE_TV] = CEC_LOG_ADDR_MASK_TV,
1300 [CEC_LOG_ADDR_TYPE_RECORD] = CEC_LOG_ADDR_MASK_RECORD,
1301 [CEC_LOG_ADDR_TYPE_TUNER] = CEC_LOG_ADDR_MASK_TUNER,
1302 [CEC_LOG_ADDR_TYPE_PLAYBACK] = CEC_LOG_ADDR_MASK_PLAYBACK,
1303 [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = CEC_LOG_ADDR_MASK_AUDIOSYSTEM,
1304 [CEC_LOG_ADDR_TYPE_SPECIFIC] = CEC_LOG_ADDR_MASK_SPECIFIC,
1305 };
1306 struct cec_adapter *adap = arg;
1307 struct cec_log_addrs *las = &adap->log_addrs;
1308 int err;
1309 int i, j;
1310
1311 mutex_lock(&adap->lock);
1312 dprintk(1, "physical address: %x.%x.%x.%x, claim %d logical addresses\n",
1313 cec_phys_addr_exp(adap->phys_addr), las->num_log_addrs);
1314 las->log_addr_mask = 0;
1315
1316 if (las->log_addr_type[0] == CEC_LOG_ADDR_TYPE_UNREGISTERED)
1317 goto configured;
1318
1319 for (i = 0; i < las->num_log_addrs; i++) {
1320 unsigned int type = las->log_addr_type[i];
1321 const u8 *la_list;
1322 u8 last_la;
1323
1324
1325
1326
1327
1328
1329 if (adap->phys_addr && type == CEC_LOG_ADDR_TYPE_TV)
1330 type = CEC_LOG_ADDR_TYPE_SPECIFIC;
1331
1332 la_list = type2addrs[type];
1333 last_la = las->log_addr[i];
1334 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1335 if (last_la == CEC_LOG_ADDR_INVALID ||
1336 last_la == CEC_LOG_ADDR_UNREGISTERED ||
1337 !((1 << last_la) & type2mask[type]))
1338 last_la = la_list[0];
1339
1340 err = cec_config_log_addr(adap, i, last_la);
1341 if (err > 0)
1342 continue;
1343
1344 if (err < 0)
1345 goto unconfigure;
1346
1347 for (j = 0; la_list[j] != CEC_LOG_ADDR_INVALID; j++) {
1348
1349 if (la_list[j] == last_la)
1350 continue;
1351
1352 if ((la_list[j] == CEC_LOG_ADDR_BACKUP_1 ||
1353 la_list[j] == CEC_LOG_ADDR_BACKUP_2) &&
1354 las->cec_version < CEC_OP_CEC_VERSION_2_0)
1355 continue;
1356
1357 err = cec_config_log_addr(adap, i, la_list[j]);
1358 if (err == 0)
1359 continue;
1360 if (err < 0)
1361 goto unconfigure;
1362
1363 break;
1364 }
1365
1366 if (la_list[j] == CEC_LOG_ADDR_INVALID)
1367 dprintk(1, "could not claim LA %d\n", i);
1368 }
1369
1370 if (adap->log_addrs.log_addr_mask == 0 &&
1371 !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
1372 goto unconfigure;
1373
1374configured:
1375 if (adap->log_addrs.log_addr_mask == 0) {
1376
1377 las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
1378 las->log_addr_mask = 1 << las->log_addr[0];
1379 for (i = 1; i < las->num_log_addrs; i++)
1380 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1381 }
1382 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1383 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1384 adap->is_configured = true;
1385 adap->is_configuring = false;
1386 cec_post_state_event(adap);
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 for (i = 0; i < las->num_log_addrs; i++) {
1399 struct cec_msg msg = {};
1400
1401 if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
1402 (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
1403 continue;
1404
1405 msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
1406
1407
1408 if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
1409 adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
1410 cec_fill_msg_report_features(adap, &msg, i);
1411 cec_transmit_msg_fh(adap, &msg, NULL, false);
1412 }
1413
1414
1415 cec_msg_report_physical_addr(&msg, adap->phys_addr,
1416 las->primary_device_type[i]);
1417 dprintk(1, "config: la %d pa %x.%x.%x.%x\n",
1418 las->log_addr[i],
1419 cec_phys_addr_exp(adap->phys_addr));
1420 cec_transmit_msg_fh(adap, &msg, NULL, false);
1421 }
1422 adap->kthread_config = NULL;
1423 complete(&adap->config_completion);
1424 mutex_unlock(&adap->lock);
1425 return 0;
1426
1427unconfigure:
1428 for (i = 0; i < las->num_log_addrs; i++)
1429 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1430 cec_adap_unconfigure(adap);
1431 adap->kthread_config = NULL;
1432 mutex_unlock(&adap->lock);
1433 complete(&adap->config_completion);
1434 return 0;
1435}
1436
1437
1438
1439
1440
1441
1442
1443static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
1444{
1445 if (WARN_ON(adap->is_configuring || adap->is_configured))
1446 return;
1447
1448 init_completion(&adap->config_completion);
1449
1450
1451 adap->is_configuring = true;
1452 adap->kthread_config = kthread_run(cec_config_thread_func, adap,
1453 "ceccfg-%s", adap->name);
1454 if (IS_ERR(adap->kthread_config)) {
1455 adap->kthread_config = NULL;
1456 } else if (block) {
1457 mutex_unlock(&adap->lock);
1458 wait_for_completion(&adap->config_completion);
1459 mutex_lock(&adap->lock);
1460 }
1461}
1462
1463
1464
1465
1466
1467void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1468{
1469 if (phys_addr == adap->phys_addr)
1470 return;
1471 if (phys_addr != CEC_PHYS_ADDR_INVALID && adap->devnode.unregistered)
1472 return;
1473
1474 dprintk(1, "new physical address %x.%x.%x.%x\n",
1475 cec_phys_addr_exp(phys_addr));
1476 if (phys_addr == CEC_PHYS_ADDR_INVALID ||
1477 adap->phys_addr != CEC_PHYS_ADDR_INVALID) {
1478 adap->phys_addr = CEC_PHYS_ADDR_INVALID;
1479 cec_post_state_event(adap);
1480 cec_adap_unconfigure(adap);
1481
1482 if (adap->monitor_all_cnt)
1483 WARN_ON(call_op(adap, adap_monitor_all_enable, false));
1484 mutex_lock(&adap->devnode.lock);
1485 if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
1486 WARN_ON(adap->ops->adap_enable(adap, false));
1487 mutex_unlock(&adap->devnode.lock);
1488 if (phys_addr == CEC_PHYS_ADDR_INVALID)
1489 return;
1490 }
1491
1492 mutex_lock(&adap->devnode.lock);
1493 if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
1494 adap->ops->adap_enable(adap, true)) {
1495 mutex_unlock(&adap->devnode.lock);
1496 return;
1497 }
1498
1499 if (adap->monitor_all_cnt &&
1500 call_op(adap, adap_monitor_all_enable, true)) {
1501 if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
1502 WARN_ON(adap->ops->adap_enable(adap, false));
1503 mutex_unlock(&adap->devnode.lock);
1504 return;
1505 }
1506 mutex_unlock(&adap->devnode.lock);
1507
1508 adap->phys_addr = phys_addr;
1509 cec_post_state_event(adap);
1510 if (adap->log_addrs.num_log_addrs)
1511 cec_claim_log_addrs(adap, block);
1512}
1513
1514void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1515{
1516 if (IS_ERR_OR_NULL(adap))
1517 return;
1518
1519 mutex_lock(&adap->lock);
1520 __cec_s_phys_addr(adap, phys_addr, block);
1521 mutex_unlock(&adap->lock);
1522}
1523EXPORT_SYMBOL_GPL(cec_s_phys_addr);
1524
1525void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
1526 const struct edid *edid)
1527{
1528 u16 pa = CEC_PHYS_ADDR_INVALID;
1529
1530 if (edid && edid->extensions)
1531 pa = cec_get_edid_phys_addr((const u8 *)edid,
1532 EDID_LENGTH * (edid->extensions + 1), NULL);
1533 cec_s_phys_addr(adap, pa, false);
1534}
1535EXPORT_SYMBOL_GPL(cec_s_phys_addr_from_edid);
1536
1537
1538
1539
1540
1541
1542int __cec_s_log_addrs(struct cec_adapter *adap,
1543 struct cec_log_addrs *log_addrs, bool block)
1544{
1545 u16 type_mask = 0;
1546 int i;
1547
1548 if (adap->devnode.unregistered)
1549 return -ENODEV;
1550
1551 if (!log_addrs || log_addrs->num_log_addrs == 0) {
1552 cec_adap_unconfigure(adap);
1553 adap->log_addrs.num_log_addrs = 0;
1554 for (i = 0; i < CEC_MAX_LOG_ADDRS; i++)
1555 adap->log_addrs.log_addr[i] = CEC_LOG_ADDR_INVALID;
1556 adap->log_addrs.osd_name[0] = '\0';
1557 adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
1558 adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
1559 return 0;
1560 }
1561
1562 if (log_addrs->flags & CEC_LOG_ADDRS_FL_CDC_ONLY) {
1563
1564
1565
1566
1567 log_addrs->num_log_addrs = 1;
1568 log_addrs->osd_name[0] = '\0';
1569 log_addrs->vendor_id = CEC_VENDOR_ID_NONE;
1570 log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
1571
1572
1573
1574
1575
1576
1577
1578
1579 log_addrs->primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_SWITCH;
1580 log_addrs->all_device_types[0] = 0;
1581 log_addrs->features[0][0] = 0;
1582 log_addrs->features[0][1] = 0;
1583 }
1584
1585
1586 log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
1587
1588
1589 if (log_addrs->num_log_addrs > adap->available_log_addrs) {
1590 dprintk(1, "num_log_addrs > %d\n", adap->available_log_addrs);
1591 return -EINVAL;
1592 }
1593
1594
1595
1596
1597
1598 if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE &&
1599 (log_addrs->vendor_id & 0xff000000) != 0) {
1600 dprintk(1, "invalid vendor ID\n");
1601 return -EINVAL;
1602 }
1603
1604 if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 &&
1605 log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0) {
1606 dprintk(1, "invalid CEC version\n");
1607 return -EINVAL;
1608 }
1609
1610 if (log_addrs->num_log_addrs > 1)
1611 for (i = 0; i < log_addrs->num_log_addrs; i++)
1612 if (log_addrs->log_addr_type[i] ==
1613 CEC_LOG_ADDR_TYPE_UNREGISTERED) {
1614 dprintk(1, "num_log_addrs > 1 can't be combined with unregistered LA\n");
1615 return -EINVAL;
1616 }
1617
1618 for (i = 0; i < log_addrs->num_log_addrs; i++) {
1619 const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
1620 u8 *features = log_addrs->features[i];
1621 bool op_is_dev_features = false;
1622 unsigned j;
1623
1624 log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
1625 if (type_mask & (1 << log_addrs->log_addr_type[i])) {
1626 dprintk(1, "duplicate logical address type\n");
1627 return -EINVAL;
1628 }
1629 type_mask |= 1 << log_addrs->log_addr_type[i];
1630 if ((type_mask & (1 << CEC_LOG_ADDR_TYPE_RECORD)) &&
1631 (type_mask & (1 << CEC_LOG_ADDR_TYPE_PLAYBACK))) {
1632
1633 dprintk(1, "invalid record + playback combination\n");
1634 return -EINVAL;
1635 }
1636 if (log_addrs->primary_device_type[i] >
1637 CEC_OP_PRIM_DEVTYPE_PROCESSOR) {
1638 dprintk(1, "unknown primary device type\n");
1639 return -EINVAL;
1640 }
1641 if (log_addrs->primary_device_type[i] == 2) {
1642 dprintk(1, "invalid primary device type\n");
1643 return -EINVAL;
1644 }
1645 if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
1646 dprintk(1, "unknown logical address type\n");
1647 return -EINVAL;
1648 }
1649 for (j = 0; j < feature_sz; j++) {
1650 if ((features[j] & 0x80) == 0) {
1651 if (op_is_dev_features)
1652 break;
1653 op_is_dev_features = true;
1654 }
1655 }
1656 if (!op_is_dev_features || j == feature_sz) {
1657 dprintk(1, "malformed features\n");
1658 return -EINVAL;
1659 }
1660
1661 memset(features + j + 1, 0, feature_sz - j - 1);
1662 }
1663
1664 if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
1665 if (log_addrs->num_log_addrs > 2) {
1666 dprintk(1, "CEC 2.0 allows no more than 2 logical addresses\n");
1667 return -EINVAL;
1668 }
1669 if (log_addrs->num_log_addrs == 2) {
1670 if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) |
1671 (1 << CEC_LOG_ADDR_TYPE_TV)))) {
1672 dprintk(1, "two LAs is only allowed for audiosystem and TV\n");
1673 return -EINVAL;
1674 }
1675 if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) |
1676 (1 << CEC_LOG_ADDR_TYPE_RECORD)))) {
1677 dprintk(1, "an audiosystem/TV can only be combined with record or playback\n");
1678 return -EINVAL;
1679 }
1680 }
1681 }
1682
1683
1684 for (i = log_addrs->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) {
1685 log_addrs->primary_device_type[i] = 0;
1686 log_addrs->log_addr_type[i] = 0;
1687 log_addrs->all_device_types[i] = 0;
1688 memset(log_addrs->features[i], 0,
1689 sizeof(log_addrs->features[i]));
1690 }
1691
1692 log_addrs->log_addr_mask = adap->log_addrs.log_addr_mask;
1693 adap->log_addrs = *log_addrs;
1694 if (adap->phys_addr != CEC_PHYS_ADDR_INVALID)
1695 cec_claim_log_addrs(adap, block);
1696 return 0;
1697}
1698
1699int cec_s_log_addrs(struct cec_adapter *adap,
1700 struct cec_log_addrs *log_addrs, bool block)
1701{
1702 int err;
1703
1704 mutex_lock(&adap->lock);
1705 err = __cec_s_log_addrs(adap, log_addrs, block);
1706 mutex_unlock(&adap->lock);
1707 return err;
1708}
1709EXPORT_SYMBOL_GPL(cec_s_log_addrs);
1710
1711
1712
1713
1714static void cec_fill_msg_report_features(struct cec_adapter *adap,
1715 struct cec_msg *msg,
1716 unsigned int la_idx)
1717{
1718 const struct cec_log_addrs *las = &adap->log_addrs;
1719 const u8 *features = las->features[la_idx];
1720 bool op_is_dev_features = false;
1721 unsigned int idx;
1722
1723
1724 msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1725 msg->len = 4;
1726 msg->msg[1] = CEC_MSG_REPORT_FEATURES;
1727 msg->msg[2] = adap->log_addrs.cec_version;
1728 msg->msg[3] = las->all_device_types[la_idx];
1729
1730
1731 for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
1732 msg->msg[msg->len++] = features[idx];
1733 if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
1734 if (op_is_dev_features)
1735 break;
1736 op_is_dev_features = true;
1737 }
1738 }
1739}
1740
1741
1742static int cec_feature_abort_reason(struct cec_adapter *adap,
1743 struct cec_msg *msg, u8 reason)
1744{
1745 struct cec_msg tx_msg = { };
1746
1747
1748
1749
1750
1751 if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
1752 return 0;
1753
1754 if (cec_msg_initiator(msg) == CEC_LOG_ADDR_UNREGISTERED)
1755 return 0;
1756 cec_msg_set_reply_to(&tx_msg, msg);
1757 cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
1758 return cec_transmit_msg(adap, &tx_msg, false);
1759}
1760
1761static int cec_feature_abort(struct cec_adapter *adap, struct cec_msg *msg)
1762{
1763 return cec_feature_abort_reason(adap, msg,
1764 CEC_OP_ABORT_UNRECOGNIZED_OP);
1765}
1766
1767static int cec_feature_refused(struct cec_adapter *adap, struct cec_msg *msg)
1768{
1769 return cec_feature_abort_reason(adap, msg,
1770 CEC_OP_ABORT_REFUSED);
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1781 bool is_reply)
1782{
1783 bool is_broadcast = cec_msg_is_broadcast(msg);
1784 u8 dest_laddr = cec_msg_destination(msg);
1785 u8 init_laddr = cec_msg_initiator(msg);
1786 u8 devtype = cec_log_addr2dev(adap, dest_laddr);
1787 int la_idx = cec_log_addr2idx(adap, dest_laddr);
1788 bool from_unregistered = init_laddr == 0xf;
1789 struct cec_msg tx_cec_msg = { };
1790
1791 dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
1792
1793
1794 if (cec_is_cdc_only(&adap->log_addrs) &&
1795 msg->msg[1] != CEC_MSG_CDC_MESSAGE)
1796 return 0;
1797
1798 if (adap->ops->received) {
1799
1800 if (adap->ops->received(adap, msg) != -ENOMSG)
1801 return 0;
1802 }
1803
1804
1805
1806
1807
1808
1809
1810 switch (msg->msg[1]) {
1811 case CEC_MSG_GET_CEC_VERSION:
1812 case CEC_MSG_ABORT:
1813 case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
1814 case CEC_MSG_GIVE_OSD_NAME:
1815
1816
1817
1818
1819 if (!adap->passthrough && from_unregistered)
1820 return 0;
1821
1822 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1823 case CEC_MSG_GIVE_FEATURES:
1824 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1825
1826
1827
1828
1829 if (adap->passthrough)
1830 goto skip_processing;
1831
1832 if (is_broadcast)
1833 return 0;
1834 break;
1835
1836 case CEC_MSG_USER_CONTROL_PRESSED:
1837 case CEC_MSG_USER_CONTROL_RELEASED:
1838
1839 if (is_broadcast || from_unregistered)
1840 goto skip_processing;
1841 break;
1842
1843 case CEC_MSG_REPORT_PHYSICAL_ADDR:
1844
1845
1846
1847
1848
1849
1850 if (!is_broadcast)
1851 goto skip_processing;
1852 break;
1853
1854 default:
1855 break;
1856 }
1857
1858 cec_msg_set_reply_to(&tx_cec_msg, msg);
1859
1860 switch (msg->msg[1]) {
1861
1862 case CEC_MSG_REPORT_PHYSICAL_ADDR: {
1863 u16 pa = (msg->msg[2] << 8) | msg->msg[3];
1864
1865 if (!from_unregistered)
1866 adap->phys_addrs[init_laddr] = pa;
1867 dprintk(1, "reported physical address %x.%x.%x.%x for logical address %d\n",
1868 cec_phys_addr_exp(pa), init_laddr);
1869 break;
1870 }
1871
1872 case CEC_MSG_USER_CONTROL_PRESSED:
1873 if (!(adap->capabilities & CEC_CAP_RC) ||
1874 !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
1875 break;
1876
1877#ifdef CONFIG_MEDIA_CEC_RC
1878 switch (msg->msg[2]) {
1879
1880
1881
1882
1883 case 0x60:
1884 if (msg->len == 2)
1885 rc_keydown(adap->rc, RC_PROTO_CEC,
1886 msg->msg[2], 0);
1887 else
1888 rc_keydown(adap->rc, RC_PROTO_CEC,
1889 msg->msg[2] << 8 | msg->msg[3], 0);
1890 break;
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 case 0x56: case 0x57:
1901 case 0x67: case 0x68: case 0x69: case 0x6a:
1902 break;
1903 default:
1904 rc_keydown(adap->rc, RC_PROTO_CEC, msg->msg[2], 0);
1905 break;
1906 }
1907#endif
1908 break;
1909
1910 case CEC_MSG_USER_CONTROL_RELEASED:
1911 if (!(adap->capabilities & CEC_CAP_RC) ||
1912 !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
1913 break;
1914#ifdef CONFIG_MEDIA_CEC_RC
1915 rc_keyup(adap->rc);
1916#endif
1917 break;
1918
1919
1920
1921
1922
1923 case CEC_MSG_GET_CEC_VERSION:
1924 cec_msg_cec_version(&tx_cec_msg, adap->log_addrs.cec_version);
1925 return cec_transmit_msg(adap, &tx_cec_msg, false);
1926
1927 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1928
1929 if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH && dest_laddr == 15)
1930 return 0;
1931 cec_msg_report_physical_addr(&tx_cec_msg, adap->phys_addr, devtype);
1932 return cec_transmit_msg(adap, &tx_cec_msg, false);
1933
1934 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1935 if (adap->log_addrs.vendor_id == CEC_VENDOR_ID_NONE)
1936 return cec_feature_abort(adap, msg);
1937 cec_msg_device_vendor_id(&tx_cec_msg, adap->log_addrs.vendor_id);
1938 return cec_transmit_msg(adap, &tx_cec_msg, false);
1939
1940 case CEC_MSG_ABORT:
1941
1942 if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH)
1943 return 0;
1944 return cec_feature_refused(adap, msg);
1945
1946 case CEC_MSG_GIVE_OSD_NAME: {
1947 if (adap->log_addrs.osd_name[0] == 0)
1948 return cec_feature_abort(adap, msg);
1949 cec_msg_set_osd_name(&tx_cec_msg, adap->log_addrs.osd_name);
1950 return cec_transmit_msg(adap, &tx_cec_msg, false);
1951 }
1952
1953 case CEC_MSG_GIVE_FEATURES:
1954 if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
1955 return cec_feature_abort(adap, msg);
1956 cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
1957 return cec_transmit_msg(adap, &tx_cec_msg, false);
1958
1959 default:
1960
1961
1962
1963
1964 if (!is_broadcast && !is_reply && !adap->follower_cnt &&
1965 !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
1966 return cec_feature_abort(adap, msg);
1967 break;
1968 }
1969
1970skip_processing:
1971
1972 if (is_reply && !(msg->flags & CEC_MSG_FL_REPLY_TO_FOLLOWERS))
1973 return 0;
1974
1975
1976
1977
1978
1979 if (adap->cec_follower)
1980 cec_queue_msg_fh(adap->cec_follower, msg);
1981 else
1982 cec_queue_msg_followers(adap, msg);
1983 return 0;
1984}
1985
1986
1987
1988
1989
1990
1991int cec_monitor_all_cnt_inc(struct cec_adapter *adap)
1992{
1993 int ret = 0;
1994
1995 if (adap->monitor_all_cnt == 0)
1996 ret = call_op(adap, adap_monitor_all_enable, 1);
1997 if (ret == 0)
1998 adap->monitor_all_cnt++;
1999 return ret;
2000}
2001
2002void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
2003{
2004 adap->monitor_all_cnt--;
2005 if (adap->monitor_all_cnt == 0)
2006 WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
2007}
2008
2009
2010
2011
2012
2013
2014int cec_monitor_pin_cnt_inc(struct cec_adapter *adap)
2015{
2016 int ret = 0;
2017
2018 if (adap->monitor_pin_cnt == 0)
2019 ret = call_op(adap, adap_monitor_pin_enable, 1);
2020 if (ret == 0)
2021 adap->monitor_pin_cnt++;
2022 return ret;
2023}
2024
2025void cec_monitor_pin_cnt_dec(struct cec_adapter *adap)
2026{
2027 adap->monitor_pin_cnt--;
2028 if (adap->monitor_pin_cnt == 0)
2029 WARN_ON(call_op(adap, adap_monitor_pin_enable, 0));
2030}
2031
2032#ifdef CONFIG_DEBUG_FS
2033
2034
2035
2036
2037int cec_adap_status(struct seq_file *file, void *priv)
2038{
2039 struct cec_adapter *adap = dev_get_drvdata(file->private);
2040 struct cec_data *data;
2041
2042 mutex_lock(&adap->lock);
2043 seq_printf(file, "configured: %d\n", adap->is_configured);
2044 seq_printf(file, "configuring: %d\n", adap->is_configuring);
2045 seq_printf(file, "phys_addr: %x.%x.%x.%x\n",
2046 cec_phys_addr_exp(adap->phys_addr));
2047 seq_printf(file, "number of LAs: %d\n", adap->log_addrs.num_log_addrs);
2048 seq_printf(file, "LA mask: 0x%04x\n", adap->log_addrs.log_addr_mask);
2049 if (adap->cec_follower)
2050 seq_printf(file, "has CEC follower%s\n",
2051 adap->passthrough ? " (in passthrough mode)" : "");
2052 if (adap->cec_initiator)
2053 seq_puts(file, "has CEC initiator\n");
2054 if (adap->monitor_all_cnt)
2055 seq_printf(file, "file handles in Monitor All mode: %u\n",
2056 adap->monitor_all_cnt);
2057 if (adap->tx_timeouts) {
2058 seq_printf(file, "transmit timeouts: %u\n",
2059 adap->tx_timeouts);
2060 adap->tx_timeouts = 0;
2061 }
2062 data = adap->transmitting;
2063 if (data)
2064 seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
2065 data->msg.len, data->msg.msg, data->msg.reply,
2066 data->msg.timeout);
2067 seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
2068 list_for_each_entry(data, &adap->transmit_queue, list) {
2069 seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
2070 data->msg.len, data->msg.msg, data->msg.reply,
2071 data->msg.timeout);
2072 }
2073 list_for_each_entry(data, &adap->wait_queue, list) {
2074 seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
2075 data->msg.len, data->msg.msg, data->msg.reply,
2076 data->msg.timeout);
2077 }
2078
2079 call_void_op(adap, adap_status, file);
2080 mutex_unlock(&adap->lock);
2081 return 0;
2082}
2083#endif
2084