linux/drivers/media/rc/rc-ir-raw.c
<<
>>
Prefs
   1/* rc-ir-raw.c - handle IR pulse/space events
   2 *
   3 * Copyright (C) 2010 by Mauro Carvalho Chehab
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 *  it under the terms of the GNU General Public License as published by
   7 *  the Free Software Foundation version 2 of the License.
   8 *
   9 *  This program is distributed in the hope that it will be useful,
  10 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 *  GNU General Public License for more details.
  13 */
  14
  15#include <linux/export.h>
  16#include <linux/kthread.h>
  17#include <linux/mutex.h>
  18#include <linux/kmod.h>
  19#include <linux/sched.h>
  20#include <linux/freezer.h>
  21#include "rc-core-priv.h"
  22
  23/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
  24static LIST_HEAD(ir_raw_client_list);
  25
  26/* Used to handle IR raw handler extensions */
  27static DEFINE_MUTEX(ir_raw_handler_lock);
  28static LIST_HEAD(ir_raw_handler_list);
  29static DEFINE_MUTEX(available_protocols_lock);
  30static u64 available_protocols;
  31
  32static int ir_raw_event_thread(void *data)
  33{
  34        struct ir_raw_event ev;
  35        struct ir_raw_handler *handler;
  36        struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
  37
  38        while (!kthread_should_stop()) {
  39
  40                spin_lock_irq(&raw->lock);
  41
  42                if (!kfifo_len(&raw->kfifo)) {
  43                        set_current_state(TASK_INTERRUPTIBLE);
  44
  45                        if (kthread_should_stop())
  46                                set_current_state(TASK_RUNNING);
  47
  48                        spin_unlock_irq(&raw->lock);
  49                        schedule();
  50                        continue;
  51                }
  52
  53                if(!kfifo_out(&raw->kfifo, &ev, 1))
  54                        dev_err(&raw->dev->dev, "IR event FIFO is empty!\n");
  55                spin_unlock_irq(&raw->lock);
  56
  57                mutex_lock(&ir_raw_handler_lock);
  58                list_for_each_entry(handler, &ir_raw_handler_list, list)
  59                        if (raw->dev->enabled_protocols & handler->protocols ||
  60                            !handler->protocols)
  61                                handler->decode(raw->dev, ev);
  62                raw->prev_ev = ev;
  63                mutex_unlock(&ir_raw_handler_lock);
  64        }
  65
  66        return 0;
  67}
  68
  69/**
  70 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
  71 * @dev:        the struct rc_dev device descriptor
  72 * @ev:         the struct ir_raw_event descriptor of the pulse/space
  73 *
  74 * This routine (which may be called from an interrupt context) stores a
  75 * pulse/space duration for the raw ir decoding state machines. Pulses are
  76 * signalled as positive values and spaces as negative values. A zero value
  77 * will reset the decoding state machines.
  78 */
  79int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
  80{
  81        if (!dev->raw)
  82                return -EINVAL;
  83
  84        IR_dprintk(2, "sample: (%05dus %s)\n",
  85                   TO_US(ev->duration), TO_STR(ev->pulse));
  86
  87        if (!kfifo_put(&dev->raw->kfifo, *ev)) {
  88                dev_err(&dev->dev, "IR event FIFO is full!\n");
  89                return -ENOSPC;
  90        }
  91
  92        return 0;
  93}
  94EXPORT_SYMBOL_GPL(ir_raw_event_store);
  95
  96/**
  97 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
  98 * @dev:        the struct rc_dev device descriptor
  99 * @type:       the type of the event that has occurred
 100 *
 101 * This routine (which may be called from an interrupt context) is used to
 102 * store the beginning of an ir pulse or space (or the start/end of ir
 103 * reception) for the raw ir decoding state machines. This is used by
 104 * hardware which does not provide durations directly but only interrupts
 105 * (or similar events) on state change.
 106 */
 107int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
 108{
 109        ktime_t                 now;
 110        s64                     delta; /* ns */
 111        DEFINE_IR_RAW_EVENT(ev);
 112        int                     rc = 0;
 113        int                     delay;
 114
 115        if (!dev->raw)
 116                return -EINVAL;
 117
 118        now = ktime_get();
 119        delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
 120        delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
 121
 122        /* Check for a long duration since last event or if we're
 123         * being called for the first time, note that delta can't
 124         * possibly be negative.
 125         */
 126        if (delta > delay || !dev->raw->last_type)
 127                type |= IR_START_EVENT;
 128        else
 129                ev.duration = delta;
 130
 131        if (type & IR_START_EVENT)
 132                ir_raw_event_reset(dev);
 133        else if (dev->raw->last_type & IR_SPACE) {
 134                ev.pulse = false;
 135                rc = ir_raw_event_store(dev, &ev);
 136        } else if (dev->raw->last_type & IR_PULSE) {
 137                ev.pulse = true;
 138                rc = ir_raw_event_store(dev, &ev);
 139        } else
 140                return 0;
 141
 142        dev->raw->last_event = now;
 143        dev->raw->last_type = type;
 144        return rc;
 145}
 146EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
 147
 148/**
 149 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
 150 * @dev:        the struct rc_dev device descriptor
 151 * @type:       the type of the event that has occurred
 152 *
 153 * This routine (which may be called from an interrupt context) works
 154 * in similar manner to ir_raw_event_store_edge.
 155 * This routine is intended for devices with limited internal buffer
 156 * It automerges samples of same type, and handles timeouts. Returns non-zero
 157 * if the event was added, and zero if the event was ignored due to idle
 158 * processing.
 159 */
 160int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
 161{
 162        if (!dev->raw)
 163                return -EINVAL;
 164
 165        /* Ignore spaces in idle mode */
 166        if (dev->idle && !ev->pulse)
 167                return 0;
 168        else if (dev->idle)
 169                ir_raw_event_set_idle(dev, false);
 170
 171        if (!dev->raw->this_ev.duration)
 172                dev->raw->this_ev = *ev;
 173        else if (ev->pulse == dev->raw->this_ev.pulse)
 174                dev->raw->this_ev.duration += ev->duration;
 175        else {
 176                ir_raw_event_store(dev, &dev->raw->this_ev);
 177                dev->raw->this_ev = *ev;
 178        }
 179
 180        /* Enter idle mode if nessesary */
 181        if (!ev->pulse && dev->timeout &&
 182            dev->raw->this_ev.duration >= dev->timeout)
 183                ir_raw_event_set_idle(dev, true);
 184
 185        return 1;
 186}
 187EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
 188
 189/**
 190 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
 191 * @dev:        the struct rc_dev device descriptor
 192 * @idle:       whether the device is idle or not
 193 */
 194void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
 195{
 196        if (!dev->raw)
 197                return;
 198
 199        IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
 200
 201        if (idle) {
 202                dev->raw->this_ev.timeout = true;
 203                ir_raw_event_store(dev, &dev->raw->this_ev);
 204                init_ir_raw_event(&dev->raw->this_ev);
 205        }
 206
 207        if (dev->s_idle)
 208                dev->s_idle(dev, idle);
 209
 210        dev->idle = idle;
 211}
 212EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
 213
 214/**
 215 * ir_raw_event_handle() - schedules the decoding of stored ir data
 216 * @dev:        the struct rc_dev device descriptor
 217 *
 218 * This routine will tell rc-core to start decoding stored ir data.
 219 */
 220void ir_raw_event_handle(struct rc_dev *dev)
 221{
 222        unsigned long flags;
 223
 224        if (!dev->raw)
 225                return;
 226
 227        spin_lock_irqsave(&dev->raw->lock, flags);
 228        wake_up_process(dev->raw->thread);
 229        spin_unlock_irqrestore(&dev->raw->lock, flags);
 230}
 231EXPORT_SYMBOL_GPL(ir_raw_event_handle);
 232
 233/* used internally by the sysfs interface */
 234u64
 235ir_raw_get_allowed_protocols(void)
 236{
 237        u64 protocols;
 238        mutex_lock(&available_protocols_lock);
 239        protocols = available_protocols;
 240        mutex_unlock(&available_protocols_lock);
 241        return protocols;
 242}
 243
 244static int change_protocol(struct rc_dev *dev, u64 *rc_type)
 245{
 246        /* the caller will update dev->enabled_protocols */
 247        return 0;
 248}
 249
 250static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
 251{
 252        mutex_lock(&dev->lock);
 253        dev->enabled_protocols &= ~protocols;
 254        dev->enabled_wakeup_protocols &= ~protocols;
 255        mutex_unlock(&dev->lock);
 256}
 257
 258/*
 259 * Used to (un)register raw event clients
 260 */
 261int ir_raw_event_register(struct rc_dev *dev)
 262{
 263        int rc;
 264        struct ir_raw_handler *handler;
 265
 266        if (!dev)
 267                return -EINVAL;
 268
 269        dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
 270        if (!dev->raw)
 271                return -ENOMEM;
 272
 273        dev->raw->dev = dev;
 274        dev->change_protocol = change_protocol;
 275        INIT_KFIFO(dev->raw->kfifo);
 276
 277        spin_lock_init(&dev->raw->lock);
 278        dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
 279                                       "rc%u", dev->minor);
 280
 281        if (IS_ERR(dev->raw->thread)) {
 282                rc = PTR_ERR(dev->raw->thread);
 283                goto out;
 284        }
 285
 286        mutex_lock(&ir_raw_handler_lock);
 287        list_add_tail(&dev->raw->list, &ir_raw_client_list);
 288        list_for_each_entry(handler, &ir_raw_handler_list, list)
 289                if (handler->raw_register)
 290                        handler->raw_register(dev);
 291        mutex_unlock(&ir_raw_handler_lock);
 292
 293        return 0;
 294
 295out:
 296        kfree(dev->raw);
 297        dev->raw = NULL;
 298        return rc;
 299}
 300
 301void ir_raw_event_unregister(struct rc_dev *dev)
 302{
 303        struct ir_raw_handler *handler;
 304
 305        if (!dev || !dev->raw)
 306                return;
 307
 308        kthread_stop(dev->raw->thread);
 309
 310        mutex_lock(&ir_raw_handler_lock);
 311        list_del(&dev->raw->list);
 312        list_for_each_entry(handler, &ir_raw_handler_list, list)
 313                if (handler->raw_unregister)
 314                        handler->raw_unregister(dev);
 315        mutex_unlock(&ir_raw_handler_lock);
 316
 317        kfree(dev->raw);
 318        dev->raw = NULL;
 319}
 320
 321/*
 322 * Extension interface - used to register the IR decoders
 323 */
 324
 325int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
 326{
 327        struct ir_raw_event_ctrl *raw;
 328
 329        mutex_lock(&ir_raw_handler_lock);
 330        list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
 331        if (ir_raw_handler->raw_register)
 332                list_for_each_entry(raw, &ir_raw_client_list, list)
 333                        ir_raw_handler->raw_register(raw->dev);
 334        mutex_lock(&available_protocols_lock);
 335        available_protocols |= ir_raw_handler->protocols;
 336        mutex_unlock(&available_protocols_lock);
 337        mutex_unlock(&ir_raw_handler_lock);
 338
 339        return 0;
 340}
 341EXPORT_SYMBOL(ir_raw_handler_register);
 342
 343void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
 344{
 345        struct ir_raw_event_ctrl *raw;
 346        u64 protocols = ir_raw_handler->protocols;
 347
 348        mutex_lock(&ir_raw_handler_lock);
 349        list_del(&ir_raw_handler->list);
 350        list_for_each_entry(raw, &ir_raw_client_list, list) {
 351                ir_raw_disable_protocols(raw->dev, protocols);
 352                if (ir_raw_handler->raw_unregister)
 353                        ir_raw_handler->raw_unregister(raw->dev);
 354        }
 355        mutex_lock(&available_protocols_lock);
 356        available_protocols &= ~protocols;
 357        mutex_unlock(&available_protocols_lock);
 358        mutex_unlock(&ir_raw_handler_lock);
 359}
 360EXPORT_SYMBOL(ir_raw_handler_unregister);
 361