linux/drivers/iio/industrialio-trigger.c
<<
>>
Prefs
   1/* The industrial I/O core, trigger handling functions
   2 *
   3 * Copyright (c) 2008 Jonathan Cameron
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/idr.h>
  12#include <linux/err.h>
  13#include <linux/device.h>
  14#include <linux/interrupt.h>
  15#include <linux/list.h>
  16#include <linux/slab.h>
  17
  18#include <linux/iio/iio.h>
  19#include <linux/iio/trigger.h>
  20#include "iio_core.h"
  21#include "iio_core_trigger.h"
  22#include <linux/iio/trigger_consumer.h>
  23
  24/* RFC - Question of approach
  25 * Make the common case (single sensor single trigger)
  26 * simple by starting trigger capture from when first sensors
  27 * is added.
  28 *
  29 * Complex simultaneous start requires use of 'hold' functionality
  30 * of the trigger. (not implemented)
  31 *
  32 * Any other suggestions?
  33 */
  34
  35static DEFINE_IDA(iio_trigger_ida);
  36
  37/* Single list of all available triggers */
  38static LIST_HEAD(iio_trigger_list);
  39static DEFINE_MUTEX(iio_trigger_list_lock);
  40
  41/**
  42 * iio_trigger_read_name() - retrieve useful identifying name
  43 * @dev:        device associated with the iio_trigger
  44 * @attr:       pointer to the device_attribute structure that is
  45 *              being processed
  46 * @buf:        buffer to print the name into
  47 *
  48 * Return: a negative number on failure or the number of written
  49 *         characters on success.
  50 */
  51static ssize_t iio_trigger_read_name(struct device *dev,
  52                                     struct device_attribute *attr,
  53                                     char *buf)
  54{
  55        struct iio_trigger *trig = to_iio_trigger(dev);
  56        return sprintf(buf, "%s\n", trig->name);
  57}
  58
  59static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
  60
  61static struct attribute *iio_trig_dev_attrs[] = {
  62        &dev_attr_name.attr,
  63        NULL,
  64};
  65ATTRIBUTE_GROUPS(iio_trig_dev);
  66
  67static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
  68
  69int __iio_trigger_register(struct iio_trigger *trig_info,
  70                           struct module *this_mod)
  71{
  72        int ret;
  73
  74        trig_info->owner = this_mod;
  75
  76        trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
  77        if (trig_info->id < 0)
  78                return trig_info->id;
  79
  80        /* Set the name used for the sysfs directory etc */
  81        dev_set_name(&trig_info->dev, "trigger%ld",
  82                     (unsigned long) trig_info->id);
  83
  84        ret = device_add(&trig_info->dev);
  85        if (ret)
  86                goto error_unregister_id;
  87
  88        /* Add to list of available triggers held by the IIO core */
  89        mutex_lock(&iio_trigger_list_lock);
  90        if (__iio_trigger_find_by_name(trig_info->name)) {
  91                pr_err("Duplicate trigger name '%s'\n", trig_info->name);
  92                ret = -EEXIST;
  93                goto error_device_del;
  94        }
  95        list_add_tail(&trig_info->list, &iio_trigger_list);
  96        mutex_unlock(&iio_trigger_list_lock);
  97
  98        return 0;
  99
 100error_device_del:
 101        mutex_unlock(&iio_trigger_list_lock);
 102        device_del(&trig_info->dev);
 103error_unregister_id:
 104        ida_simple_remove(&iio_trigger_ida, trig_info->id);
 105        return ret;
 106}
 107EXPORT_SYMBOL(__iio_trigger_register);
 108
 109void iio_trigger_unregister(struct iio_trigger *trig_info)
 110{
 111        mutex_lock(&iio_trigger_list_lock);
 112        list_del(&trig_info->list);
 113        mutex_unlock(&iio_trigger_list_lock);
 114
 115        ida_simple_remove(&iio_trigger_ida, trig_info->id);
 116        /* Possible issue in here */
 117        device_del(&trig_info->dev);
 118}
 119EXPORT_SYMBOL(iio_trigger_unregister);
 120
 121int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
 122{
 123        if (!indio_dev || !trig)
 124                return -EINVAL;
 125
 126        mutex_lock(&indio_dev->mlock);
 127        WARN_ON(indio_dev->trig_readonly);
 128
 129        indio_dev->trig = iio_trigger_get(trig);
 130        indio_dev->trig_readonly = true;
 131        mutex_unlock(&indio_dev->mlock);
 132
 133        return 0;
 134}
 135EXPORT_SYMBOL(iio_trigger_set_immutable);
 136
 137/* Search for trigger by name, assuming iio_trigger_list_lock held */
 138static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
 139{
 140        struct iio_trigger *iter;
 141
 142        list_for_each_entry(iter, &iio_trigger_list, list)
 143                if (!strcmp(iter->name, name))
 144                        return iter;
 145
 146        return NULL;
 147}
 148
 149static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
 150{
 151        struct iio_trigger *trig = NULL, *iter;
 152
 153        mutex_lock(&iio_trigger_list_lock);
 154        list_for_each_entry(iter, &iio_trigger_list, list)
 155                if (sysfs_streq(iter->name, name)) {
 156                        trig = iter;
 157                        iio_trigger_get(trig);
 158                        break;
 159                }
 160        mutex_unlock(&iio_trigger_list_lock);
 161
 162        return trig;
 163}
 164
 165void iio_trigger_poll(struct iio_trigger *trig)
 166{
 167        int i;
 168
 169        if (!atomic_read(&trig->use_count)) {
 170                atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
 171
 172                for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
 173                        if (trig->subirqs[i].enabled)
 174                                generic_handle_irq(trig->subirq_base + i);
 175                        else
 176                                iio_trigger_notify_done(trig);
 177                }
 178        }
 179}
 180EXPORT_SYMBOL(iio_trigger_poll);
 181
 182irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
 183{
 184        iio_trigger_poll(private);
 185        return IRQ_HANDLED;
 186}
 187EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
 188
 189void iio_trigger_poll_chained(struct iio_trigger *trig)
 190{
 191        int i;
 192
 193        if (!atomic_read(&trig->use_count)) {
 194                atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
 195
 196                for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
 197                        if (trig->subirqs[i].enabled)
 198                                handle_nested_irq(trig->subirq_base + i);
 199                        else
 200                                iio_trigger_notify_done(trig);
 201                }
 202        }
 203}
 204EXPORT_SYMBOL(iio_trigger_poll_chained);
 205
 206void iio_trigger_notify_done(struct iio_trigger *trig)
 207{
 208        if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
 209            trig->ops->try_reenable)
 210                if (trig->ops->try_reenable(trig))
 211                        /* Missed an interrupt so launch new poll now */
 212                        iio_trigger_poll(trig);
 213}
 214EXPORT_SYMBOL(iio_trigger_notify_done);
 215
 216/* Trigger Consumer related functions */
 217static int iio_trigger_get_irq(struct iio_trigger *trig)
 218{
 219        int ret;
 220        mutex_lock(&trig->pool_lock);
 221        ret = bitmap_find_free_region(trig->pool,
 222                                      CONFIG_IIO_CONSUMERS_PER_TRIGGER,
 223                                      ilog2(1));
 224        mutex_unlock(&trig->pool_lock);
 225        if (ret >= 0)
 226                ret += trig->subirq_base;
 227
 228        return ret;
 229}
 230
 231static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
 232{
 233        mutex_lock(&trig->pool_lock);
 234        clear_bit(irq - trig->subirq_base, trig->pool);
 235        mutex_unlock(&trig->pool_lock);
 236}
 237
 238/* Complexity in here.  With certain triggers (datardy) an acknowledgement
 239 * may be needed if the pollfuncs do not include the data read for the
 240 * triggering device.
 241 * This is not currently handled.  Alternative of not enabling trigger unless
 242 * the relevant function is in there may be the best option.
 243 */
 244/* Worth protecting against double additions? */
 245static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
 246                                        struct iio_poll_func *pf)
 247{
 248        int ret = 0;
 249        bool notinuse
 250                = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
 251
 252        /* Prevent the module from being removed whilst attached to a trigger */
 253        __module_get(pf->indio_dev->driver_module);
 254
 255        /* Get irq number */
 256        pf->irq = iio_trigger_get_irq(trig);
 257        if (pf->irq < 0)
 258                goto out_put_module;
 259
 260        /* Request irq */
 261        ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
 262                                   pf->type, pf->name,
 263                                   pf);
 264        if (ret < 0)
 265                goto out_put_irq;
 266
 267        /* Enable trigger in driver */
 268        if (trig->ops && trig->ops->set_trigger_state && notinuse) {
 269                ret = trig->ops->set_trigger_state(trig, true);
 270                if (ret < 0)
 271                        goto out_free_irq;
 272        }
 273
 274        /*
 275         * Check if we just registered to our own trigger: we determine that
 276         * this is the case if the IIO device and the trigger device share the
 277         * same parent device.
 278         */
 279        if (pf->indio_dev->dev.parent == trig->dev.parent)
 280                trig->attached_own_device = true;
 281
 282        return ret;
 283
 284out_free_irq:
 285        free_irq(pf->irq, pf);
 286out_put_irq:
 287        iio_trigger_put_irq(trig, pf->irq);
 288out_put_module:
 289        module_put(pf->indio_dev->driver_module);
 290        return ret;
 291}
 292
 293static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
 294                                         struct iio_poll_func *pf)
 295{
 296        int ret = 0;
 297        bool no_other_users
 298                = (bitmap_weight(trig->pool,
 299                                 CONFIG_IIO_CONSUMERS_PER_TRIGGER)
 300                   == 1);
 301        if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
 302                ret = trig->ops->set_trigger_state(trig, false);
 303                if (ret)
 304                        return ret;
 305        }
 306        if (pf->indio_dev->dev.parent == trig->dev.parent)
 307                trig->attached_own_device = false;
 308        iio_trigger_put_irq(trig, pf->irq);
 309        free_irq(pf->irq, pf);
 310        module_put(pf->indio_dev->driver_module);
 311
 312        return ret;
 313}
 314
 315irqreturn_t iio_pollfunc_store_time(int irq, void *p)
 316{
 317        struct iio_poll_func *pf = p;
 318        pf->timestamp = iio_get_time_ns(pf->indio_dev);
 319        return IRQ_WAKE_THREAD;
 320}
 321EXPORT_SYMBOL(iio_pollfunc_store_time);
 322
 323struct iio_poll_func
 324*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
 325                    irqreturn_t (*thread)(int irq, void *p),
 326                    int type,
 327                    struct iio_dev *indio_dev,
 328                    const char *fmt,
 329                    ...)
 330{
 331        va_list vargs;
 332        struct iio_poll_func *pf;
 333
 334        pf = kmalloc(sizeof *pf, GFP_KERNEL);
 335        if (pf == NULL)
 336                return NULL;
 337        va_start(vargs, fmt);
 338        pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
 339        va_end(vargs);
 340        if (pf->name == NULL) {
 341                kfree(pf);
 342                return NULL;
 343        }
 344        pf->h = h;
 345        pf->thread = thread;
 346        pf->type = type;
 347        pf->indio_dev = indio_dev;
 348
 349        return pf;
 350}
 351EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
 352
 353void iio_dealloc_pollfunc(struct iio_poll_func *pf)
 354{
 355        kfree(pf->name);
 356        kfree(pf);
 357}
 358EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
 359
 360/**
 361 * iio_trigger_read_current() - trigger consumer sysfs query current trigger
 362 * @dev:        device associated with an industrial I/O device
 363 * @attr:       pointer to the device_attribute structure that
 364 *              is being processed
 365 * @buf:        buffer where the current trigger name will be printed into
 366 *
 367 * For trigger consumers the current_trigger interface allows the trigger
 368 * used by the device to be queried.
 369 *
 370 * Return: a negative number on failure, the number of characters written
 371 *         on success or 0 if no trigger is available
 372 */
 373static ssize_t iio_trigger_read_current(struct device *dev,
 374                                        struct device_attribute *attr,
 375                                        char *buf)
 376{
 377        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 378
 379        if (indio_dev->trig)
 380                return sprintf(buf, "%s\n", indio_dev->trig->name);
 381        return 0;
 382}
 383
 384/**
 385 * iio_trigger_write_current() - trigger consumer sysfs set current trigger
 386 * @dev:        device associated with an industrial I/O device
 387 * @attr:       device attribute that is being processed
 388 * @buf:        string buffer that holds the name of the trigger
 389 * @len:        length of the trigger name held by buf
 390 *
 391 * For trigger consumers the current_trigger interface allows the trigger
 392 * used for this device to be specified at run time based on the trigger's
 393 * name.
 394 *
 395 * Return: negative error code on failure or length of the buffer
 396 *         on success
 397 */
 398static ssize_t iio_trigger_write_current(struct device *dev,
 399                                         struct device_attribute *attr,
 400                                         const char *buf,
 401                                         size_t len)
 402{
 403        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 404        struct iio_trigger *oldtrig = indio_dev->trig;
 405        struct iio_trigger *trig;
 406        int ret;
 407
 408        mutex_lock(&indio_dev->mlock);
 409        if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
 410                mutex_unlock(&indio_dev->mlock);
 411                return -EBUSY;
 412        }
 413        if (indio_dev->trig_readonly) {
 414                mutex_unlock(&indio_dev->mlock);
 415                return -EPERM;
 416        }
 417        mutex_unlock(&indio_dev->mlock);
 418
 419        trig = iio_trigger_acquire_by_name(buf);
 420        if (oldtrig == trig) {
 421                ret = len;
 422                goto out_trigger_put;
 423        }
 424
 425        if (trig && indio_dev->info->validate_trigger) {
 426                ret = indio_dev->info->validate_trigger(indio_dev, trig);
 427                if (ret)
 428                        goto out_trigger_put;
 429        }
 430
 431        if (trig && trig->ops && trig->ops->validate_device) {
 432                ret = trig->ops->validate_device(trig, indio_dev);
 433                if (ret)
 434                        goto out_trigger_put;
 435        }
 436
 437        indio_dev->trig = trig;
 438
 439        if (oldtrig) {
 440                if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
 441                        iio_trigger_detach_poll_func(oldtrig,
 442                                                     indio_dev->pollfunc_event);
 443                iio_trigger_put(oldtrig);
 444        }
 445        if (indio_dev->trig) {
 446                if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
 447                        iio_trigger_attach_poll_func(indio_dev->trig,
 448                                                     indio_dev->pollfunc_event);
 449        }
 450
 451        return len;
 452
 453out_trigger_put:
 454        if (trig)
 455                iio_trigger_put(trig);
 456        return ret;
 457}
 458
 459static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
 460                   iio_trigger_read_current,
 461                   iio_trigger_write_current);
 462
 463static struct attribute *iio_trigger_consumer_attrs[] = {
 464        &dev_attr_current_trigger.attr,
 465        NULL,
 466};
 467
 468static const struct attribute_group iio_trigger_consumer_attr_group = {
 469        .name = "trigger",
 470        .attrs = iio_trigger_consumer_attrs,
 471};
 472
 473static void iio_trig_release(struct device *device)
 474{
 475        struct iio_trigger *trig = to_iio_trigger(device);
 476        int i;
 477
 478        if (trig->subirq_base) {
 479                for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
 480                        irq_modify_status(trig->subirq_base + i,
 481                                          IRQ_NOAUTOEN,
 482                                          IRQ_NOREQUEST | IRQ_NOPROBE);
 483                        irq_set_chip(trig->subirq_base + i,
 484                                     NULL);
 485                        irq_set_handler(trig->subirq_base + i,
 486                                        NULL);
 487                }
 488
 489                irq_free_descs(trig->subirq_base,
 490                               CONFIG_IIO_CONSUMERS_PER_TRIGGER);
 491        }
 492        kfree(trig->name);
 493        kfree(trig);
 494}
 495
 496static const struct device_type iio_trig_type = {
 497        .release = iio_trig_release,
 498        .groups = iio_trig_dev_groups,
 499};
 500
 501static void iio_trig_subirqmask(struct irq_data *d)
 502{
 503        struct irq_chip *chip = irq_data_get_irq_chip(d);
 504        struct iio_trigger *trig
 505                = container_of(chip,
 506                               struct iio_trigger, subirq_chip);
 507        trig->subirqs[d->irq - trig->subirq_base].enabled = false;
 508}
 509
 510static void iio_trig_subirqunmask(struct irq_data *d)
 511{
 512        struct irq_chip *chip = irq_data_get_irq_chip(d);
 513        struct iio_trigger *trig
 514                = container_of(chip,
 515                               struct iio_trigger, subirq_chip);
 516        trig->subirqs[d->irq - trig->subirq_base].enabled = true;
 517}
 518
 519static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
 520{
 521        struct iio_trigger *trig;
 522        int i;
 523
 524        trig = kzalloc(sizeof *trig, GFP_KERNEL);
 525        if (!trig)
 526                return NULL;
 527
 528        trig->dev.type = &iio_trig_type;
 529        trig->dev.bus = &iio_bus_type;
 530        device_initialize(&trig->dev);
 531
 532        mutex_init(&trig->pool_lock);
 533        trig->subirq_base = irq_alloc_descs(-1, 0,
 534                                            CONFIG_IIO_CONSUMERS_PER_TRIGGER,
 535                                            0);
 536        if (trig->subirq_base < 0)
 537                goto free_trig;
 538
 539        trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
 540        if (trig->name == NULL)
 541                goto free_descs;
 542
 543        trig->subirq_chip.name = trig->name;
 544        trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
 545        trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
 546        for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
 547                irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
 548                irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
 549                irq_modify_status(trig->subirq_base + i,
 550                                  IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
 551        }
 552        get_device(&trig->dev);
 553
 554        return trig;
 555
 556free_descs:
 557        irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
 558free_trig:
 559        kfree(trig);
 560        return NULL;
 561}
 562
 563struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
 564{
 565        struct iio_trigger *trig;
 566        va_list vargs;
 567
 568        va_start(vargs, fmt);
 569        trig = viio_trigger_alloc(fmt, vargs);
 570        va_end(vargs);
 571
 572        return trig;
 573}
 574EXPORT_SYMBOL(iio_trigger_alloc);
 575
 576void iio_trigger_free(struct iio_trigger *trig)
 577{
 578        if (trig)
 579                put_device(&trig->dev);
 580}
 581EXPORT_SYMBOL(iio_trigger_free);
 582
 583static void devm_iio_trigger_release(struct device *dev, void *res)
 584{
 585        iio_trigger_free(*(struct iio_trigger **)res);
 586}
 587
 588static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
 589{
 590        struct iio_trigger **r = res;
 591
 592        if (!r || !*r) {
 593                WARN_ON(!r || !*r);
 594                return 0;
 595        }
 596
 597        return *r == data;
 598}
 599
 600/**
 601 * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
 602 * @dev:                Device to allocate iio_trigger for
 603 * @fmt:                trigger name format. If it includes format
 604 *                      specifiers, the additional arguments following
 605 *                      format are formatted and inserted in the resulting
 606 *                      string replacing their respective specifiers.
 607 *
 608 * Managed iio_trigger_alloc.  iio_trigger allocated with this function is
 609 * automatically freed on driver detach.
 610 *
 611 * If an iio_trigger allocated with this function needs to be freed separately,
 612 * devm_iio_trigger_free() must be used.
 613 *
 614 * RETURNS:
 615 * Pointer to allocated iio_trigger on success, NULL on failure.
 616 */
 617struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
 618                                                const char *fmt, ...)
 619{
 620        struct iio_trigger **ptr, *trig;
 621        va_list vargs;
 622
 623        ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
 624                           GFP_KERNEL);
 625        if (!ptr)
 626                return NULL;
 627
 628        /* use raw alloc_dr for kmalloc caller tracing */
 629        va_start(vargs, fmt);
 630        trig = viio_trigger_alloc(fmt, vargs);
 631        va_end(vargs);
 632        if (trig) {
 633                *ptr = trig;
 634                devres_add(dev, ptr);
 635        } else {
 636                devres_free(ptr);
 637        }
 638
 639        return trig;
 640}
 641EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
 642
 643/**
 644 * devm_iio_trigger_free - Resource-managed iio_trigger_free()
 645 * @dev:                Device this iio_dev belongs to
 646 * @iio_trig:           the iio_trigger associated with the device
 647 *
 648 * Free iio_trigger allocated with devm_iio_trigger_alloc().
 649 */
 650void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
 651{
 652        int rc;
 653
 654        rc = devres_release(dev, devm_iio_trigger_release,
 655                            devm_iio_trigger_match, iio_trig);
 656        WARN_ON(rc);
 657}
 658EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
 659
 660static void devm_iio_trigger_unreg(struct device *dev, void *res)
 661{
 662        iio_trigger_unregister(*(struct iio_trigger **)res);
 663}
 664
 665/**
 666 * __devm_iio_trigger_register - Resource-managed iio_trigger_register()
 667 * @dev:        device this trigger was allocated for
 668 * @trig_info:  trigger to register
 669 * @this_mod:   module registering the trigger
 670 *
 671 * Managed iio_trigger_register().  The IIO trigger registered with this
 672 * function is automatically unregistered on driver detach. This function
 673 * calls iio_trigger_register() internally. Refer to that function for more
 674 * information.
 675 *
 676 * If an iio_trigger registered with this function needs to be unregistered
 677 * separately, devm_iio_trigger_unregister() must be used.
 678 *
 679 * RETURNS:
 680 * 0 on success, negative error number on failure.
 681 */
 682int __devm_iio_trigger_register(struct device *dev,
 683                                struct iio_trigger *trig_info,
 684                                struct module *this_mod)
 685{
 686        struct iio_trigger **ptr;
 687        int ret;
 688
 689        ptr = devres_alloc(devm_iio_trigger_unreg, sizeof(*ptr), GFP_KERNEL);
 690        if (!ptr)
 691                return -ENOMEM;
 692
 693        *ptr = trig_info;
 694        ret = __iio_trigger_register(trig_info, this_mod);
 695        if (!ret)
 696                devres_add(dev, ptr);
 697        else
 698                devres_free(ptr);
 699
 700        return ret;
 701}
 702EXPORT_SYMBOL_GPL(__devm_iio_trigger_register);
 703
 704/**
 705 * devm_iio_trigger_unregister - Resource-managed iio_trigger_unregister()
 706 * @dev:        device this iio_trigger belongs to
 707 * @trig_info:  the trigger associated with the device
 708 *
 709 * Unregister trigger registered with devm_iio_trigger_register().
 710 */
 711void devm_iio_trigger_unregister(struct device *dev,
 712                                 struct iio_trigger *trig_info)
 713{
 714        int rc;
 715
 716        rc = devres_release(dev, devm_iio_trigger_unreg, devm_iio_trigger_match,
 717                            trig_info);
 718        WARN_ON(rc);
 719}
 720EXPORT_SYMBOL_GPL(devm_iio_trigger_unregister);
 721
 722bool iio_trigger_using_own(struct iio_dev *indio_dev)
 723{
 724        return indio_dev->trig->attached_own_device;
 725}
 726EXPORT_SYMBOL(iio_trigger_using_own);
 727
 728/**
 729 * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
 730 *  the same device
 731 * @trig: The IIO trigger to check
 732 * @indio_dev: the IIO device to check
 733 *
 734 * This function can be used as the validate_device callback for triggers that
 735 * can only be attached to their own device.
 736 *
 737 * Return: 0 if both the trigger and the IIO device belong to the same
 738 * device, -EINVAL otherwise.
 739 */
 740int iio_trigger_validate_own_device(struct iio_trigger *trig,
 741        struct iio_dev *indio_dev)
 742{
 743        if (indio_dev->dev.parent != trig->dev.parent)
 744                return -EINVAL;
 745        return 0;
 746}
 747EXPORT_SYMBOL(iio_trigger_validate_own_device);
 748
 749void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
 750{
 751        indio_dev->groups[indio_dev->groupcounter++] =
 752                &iio_trigger_consumer_attr_group;
 753}
 754
 755void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
 756{
 757        /* Clean up an associated but not attached trigger reference */
 758        if (indio_dev->trig)
 759                iio_trigger_put(indio_dev->trig);
 760}
 761
 762int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
 763{
 764        return iio_trigger_attach_poll_func(indio_dev->trig,
 765                                            indio_dev->pollfunc);
 766}
 767EXPORT_SYMBOL(iio_triggered_buffer_postenable);
 768
 769int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
 770{
 771        return iio_trigger_detach_poll_func(indio_dev->trig,
 772                                             indio_dev->pollfunc);
 773}
 774EXPORT_SYMBOL(iio_triggered_buffer_predisable);
 775