1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/kthread.h>
16#include <linux/mutex.h>
17#include <linux/sched.h>
18#include <linux/freezer.h>
19#include "rc-core-priv.h"
20
21
22#define MAX_IR_EVENT_SIZE 512
23
24
25static LIST_HEAD(ir_raw_client_list);
26
27
28static DEFINE_MUTEX(ir_raw_handler_lock);
29static LIST_HEAD(ir_raw_handler_list);
30static u64 available_protocols;
31
32#ifdef MODULE
33
34static struct work_struct wq_load;
35#endif
36
37static int ir_raw_event_thread(void *data)
38{
39 struct ir_raw_event ev;
40 struct ir_raw_handler *handler;
41 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
42 int retval;
43
44 while (!kthread_should_stop()) {
45
46 spin_lock_irq(&raw->lock);
47 retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
48
49 if (!retval) {
50 set_current_state(TASK_INTERRUPTIBLE);
51
52 if (kthread_should_stop())
53 set_current_state(TASK_RUNNING);
54
55 spin_unlock_irq(&raw->lock);
56 schedule();
57 continue;
58 }
59
60 spin_unlock_irq(&raw->lock);
61
62
63 BUG_ON(retval != sizeof(ev));
64
65 mutex_lock(&ir_raw_handler_lock);
66 list_for_each_entry(handler, &ir_raw_handler_list, list)
67 handler->decode(raw->dev, ev);
68 raw->prev_ev = ev;
69 mutex_unlock(&ir_raw_handler_lock);
70 }
71
72 return 0;
73}
74
75
76
77
78
79
80
81
82
83
84
85int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
86{
87 if (!dev->raw)
88 return -EINVAL;
89
90 IR_dprintk(2, "sample: (%05dus %s)\n",
91 TO_US(ev->duration), TO_STR(ev->pulse));
92
93 if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
94 return -ENOMEM;
95
96 return 0;
97}
98EXPORT_SYMBOL_GPL(ir_raw_event_store);
99
100
101
102
103
104
105
106
107
108
109
110
111int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
112{
113 ktime_t now;
114 s64 delta;
115 DEFINE_IR_RAW_EVENT(ev);
116 int rc = 0;
117 int delay;
118
119 if (!dev->raw)
120 return -EINVAL;
121
122 now = ktime_get();
123 delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
124 delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
125
126
127
128
129
130 if (delta > delay || !dev->raw->last_type)
131 type |= IR_START_EVENT;
132 else
133 ev.duration = delta;
134
135 if (type & IR_START_EVENT)
136 ir_raw_event_reset(dev);
137 else if (dev->raw->last_type & IR_SPACE) {
138 ev.pulse = false;
139 rc = ir_raw_event_store(dev, &ev);
140 } else if (dev->raw->last_type & IR_PULSE) {
141 ev.pulse = true;
142 rc = ir_raw_event_store(dev, &ev);
143 } else
144 return 0;
145
146 dev->raw->last_event = now;
147 dev->raw->last_type = type;
148 return rc;
149}
150EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
151
152
153
154
155
156
157
158
159
160
161
162int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
163{
164 if (!dev->raw)
165 return -EINVAL;
166
167
168 if (dev->idle && !ev->pulse)
169 return 0;
170 else if (dev->idle)
171 ir_raw_event_set_idle(dev, false);
172
173 if (!dev->raw->this_ev.duration)
174 dev->raw->this_ev = *ev;
175 else if (ev->pulse == dev->raw->this_ev.pulse)
176 dev->raw->this_ev.duration += ev->duration;
177 else {
178 ir_raw_event_store(dev, &dev->raw->this_ev);
179 dev->raw->this_ev = *ev;
180 }
181
182
183 if (!ev->pulse && dev->timeout &&
184 dev->raw->this_ev.duration >= dev->timeout)
185 ir_raw_event_set_idle(dev, true);
186
187 return 0;
188}
189EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
190
191
192
193
194
195
196void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
197{
198 if (!dev->raw)
199 return;
200
201 IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
202
203 if (idle) {
204 dev->raw->this_ev.timeout = true;
205 ir_raw_event_store(dev, &dev->raw->this_ev);
206 init_ir_raw_event(&dev->raw->this_ev);
207 }
208
209 if (dev->s_idle)
210 dev->s_idle(dev, idle);
211
212 dev->idle = idle;
213}
214EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
215
216
217
218
219
220
221
222void ir_raw_event_handle(struct rc_dev *dev)
223{
224 unsigned long flags;
225
226 if (!dev->raw)
227 return;
228
229 spin_lock_irqsave(&dev->raw->lock, flags);
230 wake_up_process(dev->raw->thread);
231 spin_unlock_irqrestore(&dev->raw->lock, flags);
232}
233EXPORT_SYMBOL_GPL(ir_raw_event_handle);
234
235
236u64
237ir_raw_get_allowed_protocols(void)
238{
239 u64 protocols;
240 mutex_lock(&ir_raw_handler_lock);
241 protocols = available_protocols;
242 mutex_unlock(&ir_raw_handler_lock);
243 return protocols;
244}
245
246
247
248
249int ir_raw_event_register(struct rc_dev *dev)
250{
251 int rc;
252 struct ir_raw_handler *handler;
253
254 if (!dev)
255 return -EINVAL;
256
257 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
258 if (!dev->raw)
259 return -ENOMEM;
260
261 dev->raw->dev = dev;
262 dev->raw->enabled_protocols = ~0;
263 rc = kfifo_alloc(&dev->raw->kfifo,
264 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
265 GFP_KERNEL);
266 if (rc < 0)
267 goto out;
268
269 spin_lock_init(&dev->raw->lock);
270 dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
271 "rc%ld", dev->devno);
272
273 if (IS_ERR(dev->raw->thread)) {
274 rc = PTR_ERR(dev->raw->thread);
275 goto out;
276 }
277
278 mutex_lock(&ir_raw_handler_lock);
279 list_add_tail(&dev->raw->list, &ir_raw_client_list);
280 list_for_each_entry(handler, &ir_raw_handler_list, list)
281 if (handler->raw_register)
282 handler->raw_register(dev);
283 mutex_unlock(&ir_raw_handler_lock);
284
285 return 0;
286
287out:
288 kfree(dev->raw);
289 dev->raw = NULL;
290 return rc;
291}
292
293void ir_raw_event_unregister(struct rc_dev *dev)
294{
295 struct ir_raw_handler *handler;
296
297 if (!dev || !dev->raw)
298 return;
299
300 kthread_stop(dev->raw->thread);
301
302 mutex_lock(&ir_raw_handler_lock);
303 list_del(&dev->raw->list);
304 list_for_each_entry(handler, &ir_raw_handler_list, list)
305 if (handler->raw_unregister)
306 handler->raw_unregister(dev);
307 mutex_unlock(&ir_raw_handler_lock);
308
309 kfifo_free(&dev->raw->kfifo);
310 kfree(dev->raw);
311 dev->raw = NULL;
312}
313
314
315
316
317
318int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
319{
320 struct ir_raw_event_ctrl *raw;
321
322 mutex_lock(&ir_raw_handler_lock);
323 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
324 if (ir_raw_handler->raw_register)
325 list_for_each_entry(raw, &ir_raw_client_list, list)
326 ir_raw_handler->raw_register(raw->dev);
327 available_protocols |= ir_raw_handler->protocols;
328 mutex_unlock(&ir_raw_handler_lock);
329
330 return 0;
331}
332EXPORT_SYMBOL(ir_raw_handler_register);
333
334void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
335{
336 struct ir_raw_event_ctrl *raw;
337
338 mutex_lock(&ir_raw_handler_lock);
339 list_del(&ir_raw_handler->list);
340 if (ir_raw_handler->raw_unregister)
341 list_for_each_entry(raw, &ir_raw_client_list, list)
342 ir_raw_handler->raw_unregister(raw->dev);
343 available_protocols &= ~ir_raw_handler->protocols;
344 mutex_unlock(&ir_raw_handler_lock);
345}
346EXPORT_SYMBOL(ir_raw_handler_unregister);
347
348#ifdef MODULE
349static void init_decoders(struct work_struct *work)
350{
351
352
353 load_nec_decode();
354 load_rc5_decode();
355 load_rc6_decode();
356 load_jvc_decode();
357 load_sony_decode();
358 load_lirc_codec();
359
360
361
362
363}
364#endif
365
366void ir_raw_init(void)
367{
368#ifdef MODULE
369 INIT_WORK(&wq_load, init_decoders);
370 schedule_work(&wq_load);
371#endif
372}
373