1
2
3
4
5
6#include <linux/export.h>
7#include <linux/kthread.h>
8#include <linux/mutex.h>
9#include <linux/kmod.h>
10#include <linux/sched.h>
11#include "rc-core-priv.h"
12
13
14static LIST_HEAD(ir_raw_client_list);
15
16
17DEFINE_MUTEX(ir_raw_handler_lock);
18static LIST_HEAD(ir_raw_handler_list);
19static atomic64_t available_protocols = ATOMIC64_INIT(0);
20
21static int ir_raw_event_thread(void *data)
22{
23 struct ir_raw_event ev;
24 struct ir_raw_handler *handler;
25 struct ir_raw_event_ctrl *raw = data;
26 struct rc_dev *dev = raw->dev;
27
28 while (1) {
29 mutex_lock(&ir_raw_handler_lock);
30 while (kfifo_out(&raw->kfifo, &ev, 1)) {
31 if (is_timing_event(ev)) {
32 if (ev.duration == 0)
33 dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
34 if (is_timing_event(raw->prev_ev) &&
35 !is_transition(&ev, &raw->prev_ev))
36 dev_warn_once(&dev->dev, "two consecutive events of type %s",
37 TO_STR(ev.pulse));
38 }
39 list_for_each_entry(handler, &ir_raw_handler_list, list)
40 if (dev->enabled_protocols &
41 handler->protocols || !handler->protocols)
42 handler->decode(dev, ev);
43 lirc_raw_event(dev, ev);
44 raw->prev_ev = ev;
45 }
46 mutex_unlock(&ir_raw_handler_lock);
47
48 set_current_state(TASK_INTERRUPTIBLE);
49
50 if (kthread_should_stop()) {
51 __set_current_state(TASK_RUNNING);
52 break;
53 } else if (!kfifo_is_empty(&raw->kfifo))
54 set_current_state(TASK_RUNNING);
55
56 schedule();
57 }
58
59 return 0;
60}
61
62
63
64
65
66
67
68
69
70
71
72int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
73{
74 if (!dev->raw)
75 return -EINVAL;
76
77 dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
78 ev->duration, TO_STR(ev->pulse));
79
80 if (!kfifo_put(&dev->raw->kfifo, *ev)) {
81 dev_err(&dev->dev, "IR event FIFO is full!\n");
82 return -ENOSPC;
83 }
84
85 return 0;
86}
87EXPORT_SYMBOL_GPL(ir_raw_event_store);
88
89
90
91
92
93
94
95
96
97
98
99
100int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
101{
102 ktime_t now;
103 struct ir_raw_event ev = {};
104
105 if (!dev->raw)
106 return -EINVAL;
107
108 now = ktime_get();
109 ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event));
110 ev.pulse = !pulse;
111
112 return ir_raw_event_store_with_timeout(dev, &ev);
113}
114EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
115
116
117
118
119
120
121
122
123
124
125
126
127int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
128{
129 ktime_t now;
130 int rc = 0;
131
132 if (!dev->raw)
133 return -EINVAL;
134
135 now = ktime_get();
136
137 spin_lock(&dev->raw->edge_spinlock);
138 rc = ir_raw_event_store(dev, ev);
139
140 dev->raw->last_event = now;
141
142
143 if (!timer_pending(&dev->raw->edge_handle) ||
144 time_after(dev->raw->edge_handle.expires,
145 jiffies + msecs_to_jiffies(15))) {
146 mod_timer(&dev->raw->edge_handle,
147 jiffies + msecs_to_jiffies(15));
148 }
149 spin_unlock(&dev->raw->edge_spinlock);
150
151 return rc;
152}
153EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
154
155
156
157
158
159
160
161
162
163
164
165
166
167int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
168{
169 if (!dev->raw)
170 return -EINVAL;
171
172
173 if (dev->idle && !ev->pulse)
174 return 0;
175 else if (dev->idle)
176 ir_raw_event_set_idle(dev, false);
177
178 if (!dev->raw->this_ev.duration)
179 dev->raw->this_ev = *ev;
180 else if (ev->pulse == dev->raw->this_ev.pulse)
181 dev->raw->this_ev.duration += ev->duration;
182 else {
183 ir_raw_event_store(dev, &dev->raw->this_ev);
184 dev->raw->this_ev = *ev;
185 }
186
187
188 if (!ev->pulse && dev->timeout &&
189 dev->raw->this_ev.duration >= dev->timeout)
190 ir_raw_event_set_idle(dev, true);
191
192 return 1;
193}
194EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
195
196
197
198
199
200
201void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
202{
203 if (!dev->raw)
204 return;
205
206 dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
207
208 if (idle) {
209 dev->raw->this_ev.timeout = true;
210 ir_raw_event_store(dev, &dev->raw->this_ev);
211 dev->raw->this_ev = (struct ir_raw_event) {};
212 }
213
214 if (dev->s_idle)
215 dev->s_idle(dev, idle);
216
217 dev->idle = idle;
218}
219EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
220
221
222
223
224
225
226
227void ir_raw_event_handle(struct rc_dev *dev)
228{
229 if (!dev->raw || !dev->raw->thread)
230 return;
231
232 wake_up_process(dev->raw->thread);
233}
234EXPORT_SYMBOL_GPL(ir_raw_event_handle);
235
236
237u64
238ir_raw_get_allowed_protocols(void)
239{
240 return atomic64_read(&available_protocols);
241}
242
243static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
244{
245 struct ir_raw_handler *handler;
246 u32 timeout = 0;
247
248 mutex_lock(&ir_raw_handler_lock);
249 list_for_each_entry(handler, &ir_raw_handler_list, list) {
250 if (!(dev->enabled_protocols & handler->protocols) &&
251 (*rc_proto & handler->protocols) && handler->raw_register)
252 handler->raw_register(dev);
253
254 if ((dev->enabled_protocols & handler->protocols) &&
255 !(*rc_proto & handler->protocols) &&
256 handler->raw_unregister)
257 handler->raw_unregister(dev);
258 }
259 mutex_unlock(&ir_raw_handler_lock);
260
261 if (!dev->max_timeout)
262 return 0;
263
264 mutex_lock(&ir_raw_handler_lock);
265 list_for_each_entry(handler, &ir_raw_handler_list, list) {
266 if (handler->protocols & *rc_proto) {
267 if (timeout < handler->min_timeout)
268 timeout = handler->min_timeout;
269 }
270 }
271 mutex_unlock(&ir_raw_handler_lock);
272
273 if (timeout == 0)
274 timeout = IR_DEFAULT_TIMEOUT;
275 else
276 timeout += MS_TO_US(10);
277
278 if (timeout < dev->min_timeout)
279 timeout = dev->min_timeout;
280 else if (timeout > dev->max_timeout)
281 timeout = dev->max_timeout;
282
283 if (dev->s_timeout)
284 dev->s_timeout(dev, timeout);
285 else
286 dev->timeout = timeout;
287
288 return 0;
289}
290
291static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
292{
293 mutex_lock(&dev->lock);
294 dev->enabled_protocols &= ~protocols;
295 mutex_unlock(&dev->lock);
296}
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
317 const struct ir_raw_timings_manchester *timings,
318 unsigned int n, u64 data)
319{
320 bool need_pulse;
321 u64 i;
322 int ret = -ENOBUFS;
323
324 i = BIT_ULL(n - 1);
325
326 if (timings->leader_pulse) {
327 if (!max--)
328 return ret;
329 init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
330 if (timings->leader_space) {
331 if (!max--)
332 return ret;
333 init_ir_raw_event_duration(++(*ev), 0,
334 timings->leader_space);
335 }
336 } else {
337
338 --(*ev);
339 }
340
341
342 while (n && i > 0) {
343 need_pulse = !(data & i);
344 if (timings->invert)
345 need_pulse = !need_pulse;
346 if (need_pulse == !!(*ev)->pulse) {
347 (*ev)->duration += timings->clock;
348 } else {
349 if (!max--)
350 goto nobufs;
351 init_ir_raw_event_duration(++(*ev), need_pulse,
352 timings->clock);
353 }
354
355 if (!max--)
356 goto nobufs;
357 init_ir_raw_event_duration(++(*ev), !need_pulse,
358 timings->clock);
359 i >>= 1;
360 }
361
362 if (timings->trailer_space) {
363 if (!(*ev)->pulse)
364 (*ev)->duration += timings->trailer_space;
365 else if (!max--)
366 goto nobufs;
367 else
368 init_ir_raw_event_duration(++(*ev), 0,
369 timings->trailer_space);
370 }
371
372 ret = 0;
373nobufs:
374
375 ++(*ev);
376 return ret;
377}
378EXPORT_SYMBOL(ir_raw_gen_manchester);
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
399 const struct ir_raw_timings_pd *timings,
400 unsigned int n, u64 data)
401{
402 int i;
403 int ret;
404 unsigned int space;
405
406 if (timings->header_pulse) {
407 ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
408 timings->header_space);
409 if (ret)
410 return ret;
411 }
412
413 if (timings->msb_first) {
414 for (i = n - 1; i >= 0; --i) {
415 space = timings->bit_space[(data >> i) & 1];
416 ret = ir_raw_gen_pulse_space(ev, &max,
417 timings->bit_pulse,
418 space);
419 if (ret)
420 return ret;
421 }
422 } else {
423 for (i = 0; i < n; ++i, data >>= 1) {
424 space = timings->bit_space[data & 1];
425 ret = ir_raw_gen_pulse_space(ev, &max,
426 timings->bit_pulse,
427 space);
428 if (ret)
429 return ret;
430 }
431 }
432
433 ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
434 timings->trailer_space);
435 return ret;
436}
437EXPORT_SYMBOL(ir_raw_gen_pd);
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
458 const struct ir_raw_timings_pl *timings,
459 unsigned int n, u64 data)
460{
461 int i;
462 int ret = -ENOBUFS;
463 unsigned int pulse;
464
465 if (!max--)
466 return ret;
467
468 init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
469
470 if (timings->msb_first) {
471 for (i = n - 1; i >= 0; --i) {
472 if (!max--)
473 return ret;
474 init_ir_raw_event_duration((*ev)++, 0,
475 timings->bit_space);
476 if (!max--)
477 return ret;
478 pulse = timings->bit_pulse[(data >> i) & 1];
479 init_ir_raw_event_duration((*ev)++, 1, pulse);
480 }
481 } else {
482 for (i = 0; i < n; ++i, data >>= 1) {
483 if (!max--)
484 return ret;
485 init_ir_raw_event_duration((*ev)++, 0,
486 timings->bit_space);
487 if (!max--)
488 return ret;
489 pulse = timings->bit_pulse[data & 1];
490 init_ir_raw_event_duration((*ev)++, 1, pulse);
491 }
492 }
493
494 if (!max--)
495 return ret;
496
497 init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
498
499 return 0;
500}
501EXPORT_SYMBOL(ir_raw_gen_pl);
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
520 struct ir_raw_event *events, unsigned int max)
521{
522 struct ir_raw_handler *handler;
523 int ret = -EINVAL;
524 u64 mask = 1ULL << protocol;
525
526 ir_raw_load_modules(&mask);
527
528 mutex_lock(&ir_raw_handler_lock);
529 list_for_each_entry(handler, &ir_raw_handler_list, list) {
530 if (handler->protocols & mask && handler->encode) {
531 ret = handler->encode(protocol, scancode, events, max);
532 if (ret >= 0 || ret == -ENOBUFS)
533 break;
534 }
535 }
536 mutex_unlock(&ir_raw_handler_lock);
537
538 return ret;
539}
540EXPORT_SYMBOL(ir_raw_encode_scancode);
541
542
543
544
545
546
547
548
549
550
551
552
553static void ir_raw_edge_handle(struct timer_list *t)
554{
555 struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
556 struct rc_dev *dev = raw->dev;
557 unsigned long flags;
558 ktime_t interval;
559
560 spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
561 interval = ktime_sub(ktime_get(), dev->raw->last_event);
562 if (ktime_to_us(interval) >= dev->timeout) {
563 struct ir_raw_event ev = {
564 .timeout = true,
565 .duration = ktime_to_us(interval)
566 };
567
568 ir_raw_event_store(dev, &ev);
569 } else {
570 mod_timer(&dev->raw->edge_handle,
571 jiffies + usecs_to_jiffies(dev->timeout -
572 ktime_to_us(interval)));
573 }
574 spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
575
576 ir_raw_event_handle(dev);
577}
578
579
580
581
582
583
584
585
586
587
588
589
590int ir_raw_encode_carrier(enum rc_proto protocol)
591{
592 struct ir_raw_handler *handler;
593 int ret = -EINVAL;
594 u64 mask = BIT_ULL(protocol);
595
596 mutex_lock(&ir_raw_handler_lock);
597 list_for_each_entry(handler, &ir_raw_handler_list, list) {
598 if (handler->protocols & mask && handler->encode) {
599 ret = handler->carrier;
600 break;
601 }
602 }
603 mutex_unlock(&ir_raw_handler_lock);
604
605 return ret;
606}
607EXPORT_SYMBOL(ir_raw_encode_carrier);
608
609
610
611
612int ir_raw_event_prepare(struct rc_dev *dev)
613{
614 if (!dev)
615 return -EINVAL;
616
617 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
618 if (!dev->raw)
619 return -ENOMEM;
620
621 dev->raw->dev = dev;
622 dev->change_protocol = change_protocol;
623 dev->idle = true;
624 spin_lock_init(&dev->raw->edge_spinlock);
625 timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
626 INIT_KFIFO(dev->raw->kfifo);
627
628 return 0;
629}
630
631int ir_raw_event_register(struct rc_dev *dev)
632{
633 struct task_struct *thread;
634
635 thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
636 if (IS_ERR(thread))
637 return PTR_ERR(thread);
638
639 dev->raw->thread = thread;
640
641 mutex_lock(&ir_raw_handler_lock);
642 list_add_tail(&dev->raw->list, &ir_raw_client_list);
643 mutex_unlock(&ir_raw_handler_lock);
644
645 return 0;
646}
647
648void ir_raw_event_free(struct rc_dev *dev)
649{
650 if (!dev)
651 return;
652
653 kfree(dev->raw);
654 dev->raw = NULL;
655}
656
657void ir_raw_event_unregister(struct rc_dev *dev)
658{
659 struct ir_raw_handler *handler;
660
661 if (!dev || !dev->raw)
662 return;
663
664 kthread_stop(dev->raw->thread);
665 del_timer_sync(&dev->raw->edge_handle);
666
667 mutex_lock(&ir_raw_handler_lock);
668 list_del(&dev->raw->list);
669 list_for_each_entry(handler, &ir_raw_handler_list, list)
670 if (handler->raw_unregister &&
671 (handler->protocols & dev->enabled_protocols))
672 handler->raw_unregister(dev);
673
674 lirc_bpf_free(dev);
675
676 ir_raw_event_free(dev);
677
678
679
680
681
682
683 mutex_unlock(&ir_raw_handler_lock);
684}
685
686
687
688
689
690int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
691{
692 mutex_lock(&ir_raw_handler_lock);
693 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
694 atomic64_or(ir_raw_handler->protocols, &available_protocols);
695 mutex_unlock(&ir_raw_handler_lock);
696
697 return 0;
698}
699EXPORT_SYMBOL(ir_raw_handler_register);
700
701void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
702{
703 struct ir_raw_event_ctrl *raw;
704 u64 protocols = ir_raw_handler->protocols;
705
706 mutex_lock(&ir_raw_handler_lock);
707 list_del(&ir_raw_handler->list);
708 list_for_each_entry(raw, &ir_raw_client_list, list) {
709 if (ir_raw_handler->raw_unregister &&
710 (raw->dev->enabled_protocols & protocols))
711 ir_raw_handler->raw_unregister(raw->dev);
712 ir_raw_disable_protocols(raw->dev, protocols);
713 }
714 atomic64_andnot(protocols, &available_protocols);
715 mutex_unlock(&ir_raw_handler_lock);
716}
717EXPORT_SYMBOL(ir_raw_handler_unregister);
718