1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/export.h>
16#include <linux/kthread.h>
17#include <linux/mutex.h>
18#include <linux/kmod.h>
19#include <linux/sched.h>
20#include <linux/freezer.h>
21#include "rc-core-priv.h"
22
23
24#define MAX_IR_EVENT_SIZE 512
25
26
27static LIST_HEAD(ir_raw_client_list);
28
29
30static DEFINE_MUTEX(ir_raw_handler_lock);
31static LIST_HEAD(ir_raw_handler_list);
32static u64 available_protocols;
33
34static int ir_raw_event_thread(void *data)
35{
36 struct ir_raw_event ev;
37 struct ir_raw_handler *handler;
38 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
39 int retval;
40
41 while (!kthread_should_stop()) {
42
43 spin_lock_irq(&raw->lock);
44 retval = kfifo_len(&raw->kfifo);
45
46 if (retval < sizeof(ev)) {
47 set_current_state(TASK_INTERRUPTIBLE);
48
49 if (kthread_should_stop())
50 set_current_state(TASK_RUNNING);
51
52 spin_unlock_irq(&raw->lock);
53 schedule();
54 continue;
55 }
56
57 retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
58 spin_unlock_irq(&raw->lock);
59
60 mutex_lock(&ir_raw_handler_lock);
61 list_for_each_entry(handler, &ir_raw_handler_list, list)
62 handler->decode(raw->dev, ev);
63 raw->prev_ev = ev;
64 mutex_unlock(&ir_raw_handler_lock);
65 }
66
67 return 0;
68}
69
70
71
72
73
74
75
76
77
78
79
80int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
81{
82 if (!dev->raw)
83 return -EINVAL;
84
85 IR_dprintk(2, "sample: (%05dus %s)\n",
86 TO_US(ev->duration), TO_STR(ev->pulse));
87
88 if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
89 return -ENOMEM;
90
91 return 0;
92}
93EXPORT_SYMBOL_GPL(ir_raw_event_store);
94
95
96
97
98
99
100
101
102
103
104
105
106int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
107{
108 ktime_t now;
109 s64 delta;
110 DEFINE_IR_RAW_EVENT(ev);
111 int rc = 0;
112 int delay;
113
114 if (!dev->raw)
115 return -EINVAL;
116
117 now = ktime_get();
118 delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
119 delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
120
121
122
123
124
125 if (delta > delay || !dev->raw->last_type)
126 type |= IR_START_EVENT;
127 else
128 ev.duration = delta;
129
130 if (type & IR_START_EVENT)
131 ir_raw_event_reset(dev);
132 else if (dev->raw->last_type & IR_SPACE) {
133 ev.pulse = false;
134 rc = ir_raw_event_store(dev, &ev);
135 } else if (dev->raw->last_type & IR_PULSE) {
136 ev.pulse = true;
137 rc = ir_raw_event_store(dev, &ev);
138 } else
139 return 0;
140
141 dev->raw->last_event = now;
142 dev->raw->last_type = type;
143 return rc;
144}
145EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
146
147
148
149
150
151
152
153
154
155
156
157
158
159int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
160{
161 if (!dev->raw)
162 return -EINVAL;
163
164
165 if (dev->idle && !ev->pulse)
166 return 0;
167 else if (dev->idle)
168 ir_raw_event_set_idle(dev, false);
169
170 if (!dev->raw->this_ev.duration)
171 dev->raw->this_ev = *ev;
172 else if (ev->pulse == dev->raw->this_ev.pulse)
173 dev->raw->this_ev.duration += ev->duration;
174 else {
175 ir_raw_event_store(dev, &dev->raw->this_ev);
176 dev->raw->this_ev = *ev;
177 }
178
179
180 if (!ev->pulse && dev->timeout &&
181 dev->raw->this_ev.duration >= dev->timeout)
182 ir_raw_event_set_idle(dev, true);
183
184 return 1;
185}
186EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
187
188
189
190
191
192
193void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
194{
195 if (!dev->raw)
196 return;
197
198 IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
199
200 if (idle) {
201 dev->raw->this_ev.timeout = true;
202 ir_raw_event_store(dev, &dev->raw->this_ev);
203 init_ir_raw_event(&dev->raw->this_ev);
204 }
205
206 if (dev->s_idle)
207 dev->s_idle(dev, idle);
208
209 dev->idle = idle;
210}
211EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
212
213
214
215
216
217
218
219void ir_raw_event_handle(struct rc_dev *dev)
220{
221 unsigned long flags;
222
223 if (!dev->raw)
224 return;
225
226 spin_lock_irqsave(&dev->raw->lock, flags);
227 wake_up_process(dev->raw->thread);
228 spin_unlock_irqrestore(&dev->raw->lock, flags);
229}
230EXPORT_SYMBOL_GPL(ir_raw_event_handle);
231
232
233u64
234ir_raw_get_allowed_protocols(void)
235{
236 u64 protocols;
237 mutex_lock(&ir_raw_handler_lock);
238 protocols = available_protocols;
239 mutex_unlock(&ir_raw_handler_lock);
240 return protocols;
241}
242
243static int change_protocol(struct rc_dev *dev, u64 *rc_type)
244{
245
246 return 0;
247}
248
249
250
251
252int ir_raw_event_register(struct rc_dev *dev)
253{
254 int rc;
255 struct ir_raw_handler *handler;
256
257 if (!dev)
258 return -EINVAL;
259
260 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
261 if (!dev->raw)
262 return -ENOMEM;
263
264 dev->raw->dev = dev;
265 dev->change_protocol = change_protocol;
266 rc = kfifo_alloc(&dev->raw->kfifo,
267 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
268 GFP_KERNEL);
269 if (rc < 0)
270 goto out;
271
272 spin_lock_init(&dev->raw->lock);
273 dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
274 "rc%u", dev->minor);
275
276 if (IS_ERR(dev->raw->thread)) {
277 rc = PTR_ERR(dev->raw->thread);
278 goto out;
279 }
280
281 mutex_lock(&ir_raw_handler_lock);
282 list_add_tail(&dev->raw->list, &ir_raw_client_list);
283 list_for_each_entry(handler, &ir_raw_handler_list, list)
284 if (handler->raw_register)
285 handler->raw_register(dev);
286 mutex_unlock(&ir_raw_handler_lock);
287
288 return 0;
289
290out:
291 kfree(dev->raw);
292 dev->raw = NULL;
293 return rc;
294}
295
296void ir_raw_event_unregister(struct rc_dev *dev)
297{
298 struct ir_raw_handler *handler;
299
300 if (!dev || !dev->raw)
301 return;
302
303 kthread_stop(dev->raw->thread);
304
305 mutex_lock(&ir_raw_handler_lock);
306 list_del(&dev->raw->list);
307 list_for_each_entry(handler, &ir_raw_handler_list, list)
308 if (handler->raw_unregister)
309 handler->raw_unregister(dev);
310 mutex_unlock(&ir_raw_handler_lock);
311
312 kfifo_free(&dev->raw->kfifo);
313 kfree(dev->raw);
314 dev->raw = NULL;
315}
316
317
318
319
320
321int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
322{
323 struct ir_raw_event_ctrl *raw;
324
325 mutex_lock(&ir_raw_handler_lock);
326 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
327 if (ir_raw_handler->raw_register)
328 list_for_each_entry(raw, &ir_raw_client_list, list)
329 ir_raw_handler->raw_register(raw->dev);
330 available_protocols |= ir_raw_handler->protocols;
331 mutex_unlock(&ir_raw_handler_lock);
332
333 return 0;
334}
335EXPORT_SYMBOL(ir_raw_handler_register);
336
337void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
338{
339 struct ir_raw_event_ctrl *raw;
340
341 mutex_lock(&ir_raw_handler_lock);
342 list_del(&ir_raw_handler->list);
343 if (ir_raw_handler->raw_unregister)
344 list_for_each_entry(raw, &ir_raw_client_list, list)
345 ir_raw_handler->raw_unregister(raw->dev);
346 available_protocols &= ~ir_raw_handler->protocols;
347 mutex_unlock(&ir_raw_handler_lock);
348}
349EXPORT_SYMBOL(ir_raw_handler_unregister);
350
351void ir_raw_init(void)
352{
353
354
355 load_nec_decode();
356 load_rc5_decode();
357 load_rc6_decode();
358 load_jvc_decode();
359 load_sony_decode();
360 load_sanyo_decode();
361 load_sharp_decode();
362 load_mce_kbd_decode();
363 load_lirc_codec();
364 load_xmp_decode();
365
366
367
368
369}
370