linux/drivers/iio/industrialio-trigger.c
<<
>>
Prefs
   1/* The industrial I/O core, trigger handling functions
   2 *
   3 * Copyright (c) 2008 Jonathan Cameron
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/idr.h>
  12#include <linux/err.h>
  13#include <linux/device.h>
  14#include <linux/interrupt.h>
  15#include <linux/list.h>
  16#include <linux/slab.h>
  17
  18#include <linux/iio/iio.h>
  19#include <linux/iio/trigger.h>
  20#include "iio_core.h"
  21#include "iio_core_trigger.h"
  22#include <linux/iio/trigger_consumer.h>
  23
  24/* RFC - Question of approach
  25 * Make the common case (single sensor single trigger)
  26 * simple by starting trigger capture from when first sensors
  27 * is added.
  28 *
  29 * Complex simultaneous start requires use of 'hold' functionality
  30 * of the trigger. (not implemented)
  31 *
  32 * Any other suggestions?
  33 */
  34
  35static DEFINE_IDA(iio_trigger_ida);
  36
  37/* Single list of all available triggers */
  38static LIST_HEAD(iio_trigger_list);
  39static DEFINE_MUTEX(iio_trigger_list_lock);
  40
  41/**
  42 * iio_trigger_read_name() - retrieve useful identifying name
  43 **/
  44static ssize_t iio_trigger_read_name(struct device *dev,
  45                                     struct device_attribute *attr,
  46                                     char *buf)
  47{
  48        struct iio_trigger *trig = to_iio_trigger(dev);
  49        return sprintf(buf, "%s\n", trig->name);
  50}
  51
  52static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
  53
  54static struct attribute *iio_trig_dev_attrs[] = {
  55        &dev_attr_name.attr,
  56        NULL,
  57};
  58
  59static struct attribute_group iio_trig_attr_group = {
  60        .attrs  = iio_trig_dev_attrs,
  61};
  62
  63static const struct attribute_group *iio_trig_attr_groups[] = {
  64        &iio_trig_attr_group,
  65        NULL
  66};
  67
  68int iio_trigger_register(struct iio_trigger *trig_info)
  69{
  70        int ret;
  71
  72        trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
  73        if (trig_info->id < 0) {
  74                ret = trig_info->id;
  75                goto error_ret;
  76        }
  77        /* Set the name used for the sysfs directory etc */
  78        dev_set_name(&trig_info->dev, "trigger%ld",
  79                     (unsigned long) trig_info->id);
  80
  81        ret = device_add(&trig_info->dev);
  82        if (ret)
  83                goto error_unregister_id;
  84
  85        /* Add to list of available triggers held by the IIO core */
  86        mutex_lock(&iio_trigger_list_lock);
  87        list_add_tail(&trig_info->list, &iio_trigger_list);
  88        mutex_unlock(&iio_trigger_list_lock);
  89
  90        return 0;
  91
  92error_unregister_id:
  93        ida_simple_remove(&iio_trigger_ida, trig_info->id);
  94error_ret:
  95        return ret;
  96}
  97EXPORT_SYMBOL(iio_trigger_register);
  98
  99void iio_trigger_unregister(struct iio_trigger *trig_info)
 100{
 101        mutex_lock(&iio_trigger_list_lock);
 102        list_del(&trig_info->list);
 103        mutex_unlock(&iio_trigger_list_lock);
 104
 105        ida_simple_remove(&iio_trigger_ida, trig_info->id);
 106        /* Possible issue in here */
 107        device_unregister(&trig_info->dev);
 108}
 109EXPORT_SYMBOL(iio_trigger_unregister);
 110
 111static struct iio_trigger *iio_trigger_find_by_name(const char *name,
 112                                                    size_t len)
 113{
 114        struct iio_trigger *trig = NULL, *iter;
 115
 116        mutex_lock(&iio_trigger_list_lock);
 117        list_for_each_entry(iter, &iio_trigger_list, list)
 118                if (sysfs_streq(iter->name, name)) {
 119                        trig = iter;
 120                        break;
 121                }
 122        mutex_unlock(&iio_trigger_list_lock);
 123
 124        return trig;
 125}
 126
 127void iio_trigger_poll(struct iio_trigger *trig, s64 time)
 128{
 129        int i;
 130        if (!trig->use_count)
 131                for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
 132                        if (trig->subirqs[i].enabled) {
 133                                trig->use_count++;
 134                                generic_handle_irq(trig->subirq_base + i);
 135                        }
 136}
 137EXPORT_SYMBOL(iio_trigger_poll);
 138
 139irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
 140{
 141        iio_trigger_poll(private, iio_get_time_ns());
 142        return IRQ_HANDLED;
 143}
 144EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
 145
 146void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
 147{
 148        int i;
 149        if (!trig->use_count)
 150                for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
 151                        if (trig->subirqs[i].enabled) {
 152                                trig->use_count++;
 153                                handle_nested_irq(trig->subirq_base + i);
 154                        }
 155}
 156EXPORT_SYMBOL(iio_trigger_poll_chained);
 157
 158void iio_trigger_notify_done(struct iio_trigger *trig)
 159{
 160        trig->use_count--;
 161        if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
 162                if (trig->ops->try_reenable(trig))
 163                        /* Missed an interrupt so launch new poll now */
 164                        iio_trigger_poll(trig, 0);
 165}
 166EXPORT_SYMBOL(iio_trigger_notify_done);
 167
 168/* Trigger Consumer related functions */
 169static int iio_trigger_get_irq(struct iio_trigger *trig)
 170{
 171        int ret;
 172        mutex_lock(&trig->pool_lock);
 173        ret = bitmap_find_free_region(trig->pool,
 174                                      CONFIG_IIO_CONSUMERS_PER_TRIGGER,
 175                                      ilog2(1));
 176        mutex_unlock(&trig->pool_lock);
 177        if (ret >= 0)
 178                ret += trig->subirq_base;
 179
 180        return ret;
 181}
 182
 183static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
 184{
 185        mutex_lock(&trig->pool_lock);
 186        clear_bit(irq - trig->subirq_base, trig->pool);
 187        mutex_unlock(&trig->pool_lock);
 188}
 189
 190/* Complexity in here.  With certain triggers (datardy) an acknowledgement
 191 * may be needed if the pollfuncs do not include the data read for the
 192 * triggering device.
 193 * This is not currently handled.  Alternative of not enabling trigger unless
 194 * the relevant function is in there may be the best option.
 195 */
 196/* Worth protecting against double additions? */
 197static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
 198                                        struct iio_poll_func *pf)
 199{
 200        int ret = 0;
 201        bool notinuse
 202                = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
 203
 204        /* Prevent the module from being removed whilst attached to a trigger */
 205        __module_get(pf->indio_dev->info->driver_module);
 206        pf->irq = iio_trigger_get_irq(trig);
 207        ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
 208                                   pf->type, pf->name,
 209                                   pf);
 210        if (ret < 0) {
 211                module_put(pf->indio_dev->info->driver_module);
 212                return ret;
 213        }
 214
 215        if (trig->ops && trig->ops->set_trigger_state && notinuse) {
 216                ret = trig->ops->set_trigger_state(trig, true);
 217                if (ret < 0)
 218                        module_put(pf->indio_dev->info->driver_module);
 219        }
 220
 221        return ret;
 222}
 223
 224static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
 225                                         struct iio_poll_func *pf)
 226{
 227        int ret = 0;
 228        bool no_other_users
 229                = (bitmap_weight(trig->pool,
 230                                 CONFIG_IIO_CONSUMERS_PER_TRIGGER)
 231                   == 1);
 232        if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
 233                ret = trig->ops->set_trigger_state(trig, false);
 234                if (ret)
 235                        goto error_ret;
 236        }
 237        iio_trigger_put_irq(trig, pf->irq);
 238        free_irq(pf->irq, pf);
 239        module_put(pf->indio_dev->info->driver_module);
 240
 241error_ret:
 242        return ret;
 243}
 244
 245irqreturn_t iio_pollfunc_store_time(int irq, void *p)
 246{
 247        struct iio_poll_func *pf = p;
 248        pf->timestamp = iio_get_time_ns();
 249        return IRQ_WAKE_THREAD;
 250}
 251EXPORT_SYMBOL(iio_pollfunc_store_time);
 252
 253struct iio_poll_func
 254*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
 255                    irqreturn_t (*thread)(int irq, void *p),
 256                    int type,
 257                    struct iio_dev *indio_dev,
 258                    const char *fmt,
 259                    ...)
 260{
 261        va_list vargs;
 262        struct iio_poll_func *pf;
 263
 264        pf = kmalloc(sizeof *pf, GFP_KERNEL);
 265        if (pf == NULL)
 266                return NULL;
 267        va_start(vargs, fmt);
 268        pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
 269        va_end(vargs);
 270        if (pf->name == NULL) {
 271                kfree(pf);
 272                return NULL;
 273        }
 274        pf->h = h;
 275        pf->thread = thread;
 276        pf->type = type;
 277        pf->indio_dev = indio_dev;
 278
 279        return pf;
 280}
 281EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
 282
 283void iio_dealloc_pollfunc(struct iio_poll_func *pf)
 284{
 285        kfree(pf->name);
 286        kfree(pf);
 287}
 288EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
 289
 290/**
 291 * iio_trigger_read_current() - trigger consumer sysfs query current trigger
 292 *
 293 * For trigger consumers the current_trigger interface allows the trigger
 294 * used by the device to be queried.
 295 **/
 296static ssize_t iio_trigger_read_current(struct device *dev,
 297                                        struct device_attribute *attr,
 298                                        char *buf)
 299{
 300        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 301
 302        if (indio_dev->trig)
 303                return sprintf(buf, "%s\n", indio_dev->trig->name);
 304        return 0;
 305}
 306
 307/**
 308 * iio_trigger_write_current() - trigger consumer sysfs set current trigger
 309 *
 310 * For trigger consumers the current_trigger interface allows the trigger
 311 * used for this device to be specified at run time based on the triggers
 312 * name.
 313 **/
 314static ssize_t iio_trigger_write_current(struct device *dev,
 315                                         struct device_attribute *attr,
 316                                         const char *buf,
 317                                         size_t len)
 318{
 319        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 320        struct iio_trigger *oldtrig = indio_dev->trig;
 321        struct iio_trigger *trig;
 322        int ret;
 323
 324        mutex_lock(&indio_dev->mlock);
 325        if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
 326                mutex_unlock(&indio_dev->mlock);
 327                return -EBUSY;
 328        }
 329        mutex_unlock(&indio_dev->mlock);
 330
 331        trig = iio_trigger_find_by_name(buf, len);
 332        if (oldtrig == trig)
 333                return len;
 334
 335        if (trig && indio_dev->info->validate_trigger) {
 336                ret = indio_dev->info->validate_trigger(indio_dev, trig);
 337                if (ret)
 338                        return ret;
 339        }
 340
 341        if (trig && trig->ops && trig->ops->validate_device) {
 342                ret = trig->ops->validate_device(trig, indio_dev);
 343                if (ret)
 344                        return ret;
 345        }
 346
 347        indio_dev->trig = trig;
 348
 349        if (oldtrig && indio_dev->trig != oldtrig)
 350                iio_trigger_put(oldtrig);
 351        if (indio_dev->trig)
 352                iio_trigger_get(indio_dev->trig);
 353
 354        return len;
 355}
 356
 357static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
 358                   iio_trigger_read_current,
 359                   iio_trigger_write_current);
 360
 361static struct attribute *iio_trigger_consumer_attrs[] = {
 362        &dev_attr_current_trigger.attr,
 363        NULL,
 364};
 365
 366static const struct attribute_group iio_trigger_consumer_attr_group = {
 367        .name = "trigger",
 368        .attrs = iio_trigger_consumer_attrs,
 369};
 370
 371static void iio_trig_release(struct device *device)
 372{
 373        struct iio_trigger *trig = to_iio_trigger(device);
 374        int i;
 375
 376        if (trig->subirq_base) {
 377                for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
 378                        irq_modify_status(trig->subirq_base + i,
 379                                          IRQ_NOAUTOEN,
 380                                          IRQ_NOREQUEST | IRQ_NOPROBE);
 381                        irq_set_chip(trig->subirq_base + i,
 382                                     NULL);
 383                        irq_set_handler(trig->subirq_base + i,
 384                                        NULL);
 385                }
 386
 387                irq_free_descs(trig->subirq_base,
 388                               CONFIG_IIO_CONSUMERS_PER_TRIGGER);
 389        }
 390        kfree(trig->name);
 391        kfree(trig);
 392}
 393
 394static struct device_type iio_trig_type = {
 395        .release = iio_trig_release,
 396        .groups = iio_trig_attr_groups,
 397};
 398
 399static void iio_trig_subirqmask(struct irq_data *d)
 400{
 401        struct irq_chip *chip = irq_data_get_irq_chip(d);
 402        struct iio_trigger *trig
 403                = container_of(chip,
 404                               struct iio_trigger, subirq_chip);
 405        trig->subirqs[d->irq - trig->subirq_base].enabled = false;
 406}
 407
 408static void iio_trig_subirqunmask(struct irq_data *d)
 409{
 410        struct irq_chip *chip = irq_data_get_irq_chip(d);
 411        struct iio_trigger *trig
 412                = container_of(chip,
 413                               struct iio_trigger, subirq_chip);
 414        trig->subirqs[d->irq - trig->subirq_base].enabled = true;
 415}
 416
 417struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
 418{
 419        va_list vargs;
 420        struct iio_trigger *trig;
 421        trig = kzalloc(sizeof *trig, GFP_KERNEL);
 422        if (trig) {
 423                int i;
 424                trig->dev.type = &iio_trig_type;
 425                trig->dev.bus = &iio_bus_type;
 426                device_initialize(&trig->dev);
 427
 428                mutex_init(&trig->pool_lock);
 429                trig->subirq_base
 430                        = irq_alloc_descs(-1, 0,
 431                                          CONFIG_IIO_CONSUMERS_PER_TRIGGER,
 432                                          0);
 433                if (trig->subirq_base < 0) {
 434                        kfree(trig);
 435                        return NULL;
 436                }
 437                va_start(vargs, fmt);
 438                trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
 439                va_end(vargs);
 440                if (trig->name == NULL) {
 441                        irq_free_descs(trig->subirq_base,
 442                                       CONFIG_IIO_CONSUMERS_PER_TRIGGER);
 443                        kfree(trig);
 444                        return NULL;
 445                }
 446                trig->subirq_chip.name = trig->name;
 447                trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
 448                trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
 449                for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
 450                        irq_set_chip(trig->subirq_base + i,
 451                                     &trig->subirq_chip);
 452                        irq_set_handler(trig->subirq_base + i,
 453                                        &handle_simple_irq);
 454                        irq_modify_status(trig->subirq_base + i,
 455                                          IRQ_NOREQUEST | IRQ_NOAUTOEN,
 456                                          IRQ_NOPROBE);
 457                }
 458                get_device(&trig->dev);
 459        }
 460        return trig;
 461}
 462EXPORT_SYMBOL(iio_trigger_alloc);
 463
 464void iio_trigger_free(struct iio_trigger *trig)
 465{
 466        if (trig)
 467                put_device(&trig->dev);
 468}
 469EXPORT_SYMBOL(iio_trigger_free);
 470
 471void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
 472{
 473        indio_dev->groups[indio_dev->groupcounter++] =
 474                &iio_trigger_consumer_attr_group;
 475}
 476
 477void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
 478{
 479        /* Clean up an associated but not attached trigger reference */
 480        if (indio_dev->trig)
 481                iio_trigger_put(indio_dev->trig);
 482}
 483
 484int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
 485{
 486        return iio_trigger_attach_poll_func(indio_dev->trig,
 487                                            indio_dev->pollfunc);
 488}
 489EXPORT_SYMBOL(iio_triggered_buffer_postenable);
 490
 491int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
 492{
 493        return iio_trigger_detach_poll_func(indio_dev->trig,
 494                                             indio_dev->pollfunc);
 495}
 496EXPORT_SYMBOL(iio_triggered_buffer_predisable);
 497