linux/drivers/iio/industrialio-buffer.c
<<
>>
Prefs
   1/* The industrial I/O core
   2 *
   3 * Copyright (c) 2008 Jonathan Cameron
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * Handling of buffer allocation / resizing.
  10 *
  11 *
  12 * Things to look at here.
  13 * - Better memory allocation techniques?
  14 * - Alternative access techniques?
  15 */
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/device.h>
  19#include <linux/fs.h>
  20#include <linux/cdev.h>
  21#include <linux/slab.h>
  22#include <linux/poll.h>
  23#include <linux/sched.h>
  24
  25#include <linux/iio/iio.h>
  26#include "iio_core.h"
  27#include <linux/iio/sysfs.h>
  28#include <linux/iio/buffer.h>
  29
  30static const char * const iio_endian_prefix[] = {
  31        [IIO_BE] = "be",
  32        [IIO_LE] = "le",
  33};
  34
  35static bool iio_buffer_is_active(struct iio_buffer *buf)
  36{
  37        return !list_empty(&buf->buffer_list);
  38}
  39
  40static bool iio_buffer_data_available(struct iio_buffer *buf)
  41{
  42        return buf->access->data_available(buf);
  43}
  44
  45/**
  46 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  47 *
  48 * This function relies on all buffer implementations having an
  49 * iio_buffer as their first element.
  50 **/
  51ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  52                                      size_t n, loff_t *f_ps)
  53{
  54        struct iio_dev *indio_dev = filp->private_data;
  55        struct iio_buffer *rb = indio_dev->buffer;
  56        int ret;
  57
  58        if (!indio_dev->info)
  59                return -ENODEV;
  60
  61        if (!rb || !rb->access->read_first_n)
  62                return -EINVAL;
  63
  64        do {
  65                if (!iio_buffer_data_available(rb)) {
  66                        if (filp->f_flags & O_NONBLOCK)
  67                                return -EAGAIN;
  68
  69                        ret = wait_event_interruptible(rb->pollq,
  70                                        iio_buffer_data_available(rb) ||
  71                                        indio_dev->info == NULL);
  72                        if (ret)
  73                                return ret;
  74                        if (indio_dev->info == NULL)
  75                                return -ENODEV;
  76                }
  77
  78                ret = rb->access->read_first_n(rb, n, buf);
  79                if (ret == 0 && (filp->f_flags & O_NONBLOCK))
  80                        ret = -EAGAIN;
  81         } while (ret == 0);
  82
  83        return ret;
  84}
  85
  86/**
  87 * iio_buffer_poll() - poll the buffer to find out if it has data
  88 */
  89unsigned int iio_buffer_poll(struct file *filp,
  90                             struct poll_table_struct *wait)
  91{
  92        struct iio_dev *indio_dev = filp->private_data;
  93        struct iio_buffer *rb = indio_dev->buffer;
  94
  95        if (!indio_dev->info)
  96                return -ENODEV;
  97
  98        poll_wait(filp, &rb->pollq, wait);
  99        if (iio_buffer_data_available(rb))
 100                return POLLIN | POLLRDNORM;
 101        /* need a way of knowing if there may be enough data... */
 102        return 0;
 103}
 104
 105/**
 106 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
 107 * @indio_dev: The IIO device
 108 *
 109 * Wakes up the event waitqueue used for poll(). Should usually
 110 * be called when the device is unregistered.
 111 */
 112void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
 113{
 114        if (!indio_dev->buffer)
 115                return;
 116
 117        wake_up(&indio_dev->buffer->pollq);
 118}
 119
 120void iio_buffer_init(struct iio_buffer *buffer)
 121{
 122        INIT_LIST_HEAD(&buffer->demux_list);
 123        INIT_LIST_HEAD(&buffer->buffer_list);
 124        init_waitqueue_head(&buffer->pollq);
 125        kref_init(&buffer->ref);
 126}
 127EXPORT_SYMBOL(iio_buffer_init);
 128
 129static ssize_t iio_show_scan_index(struct device *dev,
 130                                   struct device_attribute *attr,
 131                                   char *buf)
 132{
 133        return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
 134}
 135
 136static ssize_t iio_show_fixed_type(struct device *dev,
 137                                   struct device_attribute *attr,
 138                                   char *buf)
 139{
 140        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 141        u8 type = this_attr->c->scan_type.endianness;
 142
 143        if (type == IIO_CPU) {
 144#ifdef __LITTLE_ENDIAN
 145                type = IIO_LE;
 146#else
 147                type = IIO_BE;
 148#endif
 149        }
 150        if (this_attr->c->scan_type.repeat > 1)
 151                return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
 152                       iio_endian_prefix[type],
 153                       this_attr->c->scan_type.sign,
 154                       this_attr->c->scan_type.realbits,
 155                       this_attr->c->scan_type.storagebits,
 156                       this_attr->c->scan_type.repeat,
 157                       this_attr->c->scan_type.shift);
 158                else
 159                        return sprintf(buf, "%s:%c%d/%d>>%u\n",
 160                       iio_endian_prefix[type],
 161                       this_attr->c->scan_type.sign,
 162                       this_attr->c->scan_type.realbits,
 163                       this_attr->c->scan_type.storagebits,
 164                       this_attr->c->scan_type.shift);
 165}
 166
 167static ssize_t iio_scan_el_show(struct device *dev,
 168                                struct device_attribute *attr,
 169                                char *buf)
 170{
 171        int ret;
 172        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 173
 174        /* Ensure ret is 0 or 1. */
 175        ret = !!test_bit(to_iio_dev_attr(attr)->address,
 176                       indio_dev->buffer->scan_mask);
 177
 178        return sprintf(buf, "%d\n", ret);
 179}
 180
 181static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
 182{
 183        clear_bit(bit, buffer->scan_mask);
 184        return 0;
 185}
 186
 187static ssize_t iio_scan_el_store(struct device *dev,
 188                                 struct device_attribute *attr,
 189                                 const char *buf,
 190                                 size_t len)
 191{
 192        int ret;
 193        bool state;
 194        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 195        struct iio_buffer *buffer = indio_dev->buffer;
 196        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 197
 198        ret = strtobool(buf, &state);
 199        if (ret < 0)
 200                return ret;
 201        mutex_lock(&indio_dev->mlock);
 202        if (iio_buffer_is_active(indio_dev->buffer)) {
 203                ret = -EBUSY;
 204                goto error_ret;
 205        }
 206        ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
 207        if (ret < 0)
 208                goto error_ret;
 209        if (!state && ret) {
 210                ret = iio_scan_mask_clear(buffer, this_attr->address);
 211                if (ret)
 212                        goto error_ret;
 213        } else if (state && !ret) {
 214                ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
 215                if (ret)
 216                        goto error_ret;
 217        }
 218
 219error_ret:
 220        mutex_unlock(&indio_dev->mlock);
 221
 222        return ret < 0 ? ret : len;
 223
 224}
 225
 226static ssize_t iio_scan_el_ts_show(struct device *dev,
 227                                   struct device_attribute *attr,
 228                                   char *buf)
 229{
 230        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 231        return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
 232}
 233
 234static ssize_t iio_scan_el_ts_store(struct device *dev,
 235                                    struct device_attribute *attr,
 236                                    const char *buf,
 237                                    size_t len)
 238{
 239        int ret;
 240        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 241        bool state;
 242
 243        ret = strtobool(buf, &state);
 244        if (ret < 0)
 245                return ret;
 246
 247        mutex_lock(&indio_dev->mlock);
 248        if (iio_buffer_is_active(indio_dev->buffer)) {
 249                ret = -EBUSY;
 250                goto error_ret;
 251        }
 252        indio_dev->buffer->scan_timestamp = state;
 253error_ret:
 254        mutex_unlock(&indio_dev->mlock);
 255
 256        return ret ? ret : len;
 257}
 258
 259static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
 260                                        const struct iio_chan_spec *chan)
 261{
 262        int ret, attrcount = 0;
 263        struct iio_buffer *buffer = indio_dev->buffer;
 264
 265        ret = __iio_add_chan_devattr("index",
 266                                     chan,
 267                                     &iio_show_scan_index,
 268                                     NULL,
 269                                     0,
 270                                     IIO_SEPARATE,
 271                                     &indio_dev->dev,
 272                                     &buffer->scan_el_dev_attr_list);
 273        if (ret)
 274                return ret;
 275        attrcount++;
 276        ret = __iio_add_chan_devattr("type",
 277                                     chan,
 278                                     &iio_show_fixed_type,
 279                                     NULL,
 280                                     0,
 281                                     0,
 282                                     &indio_dev->dev,
 283                                     &buffer->scan_el_dev_attr_list);
 284        if (ret)
 285                return ret;
 286        attrcount++;
 287        if (chan->type != IIO_TIMESTAMP)
 288                ret = __iio_add_chan_devattr("en",
 289                                             chan,
 290                                             &iio_scan_el_show,
 291                                             &iio_scan_el_store,
 292                                             chan->scan_index,
 293                                             0,
 294                                             &indio_dev->dev,
 295                                             &buffer->scan_el_dev_attr_list);
 296        else
 297                ret = __iio_add_chan_devattr("en",
 298                                             chan,
 299                                             &iio_scan_el_ts_show,
 300                                             &iio_scan_el_ts_store,
 301                                             chan->scan_index,
 302                                             0,
 303                                             &indio_dev->dev,
 304                                             &buffer->scan_el_dev_attr_list);
 305        if (ret)
 306                return ret;
 307        attrcount++;
 308        ret = attrcount;
 309        return ret;
 310}
 311
 312static const char * const iio_scan_elements_group_name = "scan_elements";
 313
 314int iio_buffer_register(struct iio_dev *indio_dev,
 315                        const struct iio_chan_spec *channels,
 316                        int num_channels)
 317{
 318        struct iio_dev_attr *p;
 319        struct attribute **attr;
 320        struct iio_buffer *buffer = indio_dev->buffer;
 321        int ret, i, attrn, attrcount, attrcount_orig = 0;
 322
 323        if (buffer->attrs)
 324                indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
 325
 326        if (buffer->scan_el_attrs != NULL) {
 327                attr = buffer->scan_el_attrs->attrs;
 328                while (*attr++ != NULL)
 329                        attrcount_orig++;
 330        }
 331        attrcount = attrcount_orig;
 332        INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
 333        if (channels) {
 334                /* new magic */
 335                for (i = 0; i < num_channels; i++) {
 336                        if (channels[i].scan_index < 0)
 337                                continue;
 338
 339                        /* Establish necessary mask length */
 340                        if (channels[i].scan_index >
 341                            (int)indio_dev->masklength - 1)
 342                                indio_dev->masklength
 343                                        = channels[i].scan_index + 1;
 344
 345                        ret = iio_buffer_add_channel_sysfs(indio_dev,
 346                                                         &channels[i]);
 347                        if (ret < 0)
 348                                goto error_cleanup_dynamic;
 349                        attrcount += ret;
 350                        if (channels[i].type == IIO_TIMESTAMP)
 351                                indio_dev->scan_index_timestamp =
 352                                        channels[i].scan_index;
 353                }
 354                if (indio_dev->masklength && buffer->scan_mask == NULL) {
 355                        buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
 356                                                    sizeof(*buffer->scan_mask),
 357                                                    GFP_KERNEL);
 358                        if (buffer->scan_mask == NULL) {
 359                                ret = -ENOMEM;
 360                                goto error_cleanup_dynamic;
 361                        }
 362                }
 363        }
 364
 365        buffer->scan_el_group.name = iio_scan_elements_group_name;
 366
 367        buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
 368                                              sizeof(buffer->scan_el_group.attrs[0]),
 369                                              GFP_KERNEL);
 370        if (buffer->scan_el_group.attrs == NULL) {
 371                ret = -ENOMEM;
 372                goto error_free_scan_mask;
 373        }
 374        if (buffer->scan_el_attrs)
 375                memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
 376                       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
 377        attrn = attrcount_orig;
 378
 379        list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
 380                buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
 381        indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
 382
 383        return 0;
 384
 385error_free_scan_mask:
 386        kfree(buffer->scan_mask);
 387error_cleanup_dynamic:
 388        iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
 389
 390        return ret;
 391}
 392EXPORT_SYMBOL(iio_buffer_register);
 393
 394void iio_buffer_unregister(struct iio_dev *indio_dev)
 395{
 396        kfree(indio_dev->buffer->scan_mask);
 397        kfree(indio_dev->buffer->scan_el_group.attrs);
 398        iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
 399}
 400EXPORT_SYMBOL(iio_buffer_unregister);
 401
 402ssize_t iio_buffer_read_length(struct device *dev,
 403                               struct device_attribute *attr,
 404                               char *buf)
 405{
 406        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 407        struct iio_buffer *buffer = indio_dev->buffer;
 408
 409        if (buffer->access->get_length)
 410                return sprintf(buf, "%d\n",
 411                               buffer->access->get_length(buffer));
 412
 413        return 0;
 414}
 415EXPORT_SYMBOL(iio_buffer_read_length);
 416
 417ssize_t iio_buffer_write_length(struct device *dev,
 418                                struct device_attribute *attr,
 419                                const char *buf,
 420                                size_t len)
 421{
 422        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 423        struct iio_buffer *buffer = indio_dev->buffer;
 424        unsigned int val;
 425        int ret;
 426
 427        ret = kstrtouint(buf, 10, &val);
 428        if (ret)
 429                return ret;
 430
 431        if (buffer->access->get_length)
 432                if (val == buffer->access->get_length(buffer))
 433                        return len;
 434
 435        mutex_lock(&indio_dev->mlock);
 436        if (iio_buffer_is_active(indio_dev->buffer)) {
 437                ret = -EBUSY;
 438        } else {
 439                if (buffer->access->set_length)
 440                        buffer->access->set_length(buffer, val);
 441                ret = 0;
 442        }
 443        mutex_unlock(&indio_dev->mlock);
 444
 445        return ret ? ret : len;
 446}
 447EXPORT_SYMBOL(iio_buffer_write_length);
 448
 449ssize_t iio_buffer_show_enable(struct device *dev,
 450                               struct device_attribute *attr,
 451                               char *buf)
 452{
 453        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 454        return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
 455}
 456EXPORT_SYMBOL(iio_buffer_show_enable);
 457
 458/* Note NULL used as error indicator as it doesn't make sense. */
 459static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
 460                                          unsigned int masklength,
 461                                          const unsigned long *mask)
 462{
 463        if (bitmap_empty(mask, masklength))
 464                return NULL;
 465        while (*av_masks) {
 466                if (bitmap_subset(mask, av_masks, masklength))
 467                        return av_masks;
 468                av_masks += BITS_TO_LONGS(masklength);
 469        }
 470        return NULL;
 471}
 472
 473static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
 474                                const unsigned long *mask, bool timestamp)
 475{
 476        const struct iio_chan_spec *ch;
 477        unsigned bytes = 0;
 478        int length, i;
 479
 480        /* How much space will the demuxed element take? */
 481        for_each_set_bit(i, mask,
 482                         indio_dev->masklength) {
 483                ch = iio_find_channel_from_si(indio_dev, i);
 484                if (ch->scan_type.repeat > 1)
 485                        length = ch->scan_type.storagebits / 8 *
 486                                ch->scan_type.repeat;
 487                else
 488                        length = ch->scan_type.storagebits / 8;
 489                bytes = ALIGN(bytes, length);
 490                bytes += length;
 491        }
 492        if (timestamp) {
 493                ch = iio_find_channel_from_si(indio_dev,
 494                                              indio_dev->scan_index_timestamp);
 495                if (ch->scan_type.repeat > 1)
 496                        length = ch->scan_type.storagebits / 8 *
 497                                ch->scan_type.repeat;
 498                else
 499                        length = ch->scan_type.storagebits / 8;
 500                bytes = ALIGN(bytes, length);
 501                bytes += length;
 502        }
 503        return bytes;
 504}
 505
 506static void iio_buffer_activate(struct iio_dev *indio_dev,
 507        struct iio_buffer *buffer)
 508{
 509        iio_buffer_get(buffer);
 510        list_add(&buffer->buffer_list, &indio_dev->buffer_list);
 511}
 512
 513static void iio_buffer_deactivate(struct iio_buffer *buffer)
 514{
 515        list_del_init(&buffer->buffer_list);
 516        iio_buffer_put(buffer);
 517}
 518
 519void iio_disable_all_buffers(struct iio_dev *indio_dev)
 520{
 521        struct iio_buffer *buffer, *_buffer;
 522
 523        if (list_empty(&indio_dev->buffer_list))
 524                return;
 525
 526        if (indio_dev->setup_ops->predisable)
 527                indio_dev->setup_ops->predisable(indio_dev);
 528
 529        list_for_each_entry_safe(buffer, _buffer,
 530                        &indio_dev->buffer_list, buffer_list)
 531                iio_buffer_deactivate(buffer);
 532
 533        indio_dev->currentmode = INDIO_DIRECT_MODE;
 534        if (indio_dev->setup_ops->postdisable)
 535                indio_dev->setup_ops->postdisable(indio_dev);
 536
 537        if (indio_dev->available_scan_masks == NULL)
 538                kfree(indio_dev->active_scan_mask);
 539}
 540
 541static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
 542        struct iio_buffer *buffer)
 543{
 544        unsigned int bytes;
 545
 546        if (!buffer->access->set_bytes_per_datum)
 547                return;
 548
 549        bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
 550                buffer->scan_timestamp);
 551
 552        buffer->access->set_bytes_per_datum(buffer, bytes);
 553}
 554
 555static int __iio_update_buffers(struct iio_dev *indio_dev,
 556                       struct iio_buffer *insert_buffer,
 557                       struct iio_buffer *remove_buffer)
 558{
 559        int ret;
 560        int success = 0;
 561        struct iio_buffer *buffer;
 562        unsigned long *compound_mask;
 563        const unsigned long *old_mask;
 564
 565        /* Wind down existing buffers - iff there are any */
 566        if (!list_empty(&indio_dev->buffer_list)) {
 567                if (indio_dev->setup_ops->predisable) {
 568                        ret = indio_dev->setup_ops->predisable(indio_dev);
 569                        if (ret)
 570                                return ret;
 571                }
 572                indio_dev->currentmode = INDIO_DIRECT_MODE;
 573                if (indio_dev->setup_ops->postdisable) {
 574                        ret = indio_dev->setup_ops->postdisable(indio_dev);
 575                        if (ret)
 576                                return ret;
 577                }
 578        }
 579        /* Keep a copy of current setup to allow roll back */
 580        old_mask = indio_dev->active_scan_mask;
 581        if (!indio_dev->available_scan_masks)
 582                indio_dev->active_scan_mask = NULL;
 583
 584        if (remove_buffer)
 585                iio_buffer_deactivate(remove_buffer);
 586        if (insert_buffer)
 587                iio_buffer_activate(indio_dev, insert_buffer);
 588
 589        /* If no buffers in list, we are done */
 590        if (list_empty(&indio_dev->buffer_list)) {
 591                indio_dev->currentmode = INDIO_DIRECT_MODE;
 592                if (indio_dev->available_scan_masks == NULL)
 593                        kfree(old_mask);
 594                return 0;
 595        }
 596
 597        /* What scan mask do we actually have? */
 598        compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
 599                                sizeof(long), GFP_KERNEL);
 600        if (compound_mask == NULL) {
 601                if (indio_dev->available_scan_masks == NULL)
 602                        kfree(old_mask);
 603                return -ENOMEM;
 604        }
 605        indio_dev->scan_timestamp = 0;
 606
 607        list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
 608                bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
 609                          indio_dev->masklength);
 610                indio_dev->scan_timestamp |= buffer->scan_timestamp;
 611        }
 612        if (indio_dev->available_scan_masks) {
 613                indio_dev->active_scan_mask =
 614                        iio_scan_mask_match(indio_dev->available_scan_masks,
 615                                            indio_dev->masklength,
 616                                            compound_mask);
 617                if (indio_dev->active_scan_mask == NULL) {
 618                        /*
 619                         * Roll back.
 620                         * Note can only occur when adding a buffer.
 621                         */
 622                        iio_buffer_deactivate(insert_buffer);
 623                        if (old_mask) {
 624                                indio_dev->active_scan_mask = old_mask;
 625                                success = -EINVAL;
 626                        }
 627                        else {
 628                                kfree(compound_mask);
 629                                ret = -EINVAL;
 630                                return ret;
 631                        }
 632                }
 633        } else {
 634                indio_dev->active_scan_mask = compound_mask;
 635        }
 636
 637        iio_update_demux(indio_dev);
 638
 639        /* Wind up again */
 640        if (indio_dev->setup_ops->preenable) {
 641                ret = indio_dev->setup_ops->preenable(indio_dev);
 642                if (ret) {
 643                        printk(KERN_ERR
 644                               "Buffer not started: buffer preenable failed (%d)\n", ret);
 645                        goto error_remove_inserted;
 646                }
 647        }
 648        indio_dev->scan_bytes =
 649                iio_compute_scan_bytes(indio_dev,
 650                                       indio_dev->active_scan_mask,
 651                                       indio_dev->scan_timestamp);
 652        list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
 653                iio_buffer_update_bytes_per_datum(indio_dev, buffer);
 654                if (buffer->access->request_update) {
 655                        ret = buffer->access->request_update(buffer);
 656                        if (ret) {
 657                                printk(KERN_INFO
 658                                       "Buffer not started: buffer parameter update failed (%d)\n", ret);
 659                                goto error_run_postdisable;
 660                        }
 661                }
 662        }
 663        if (indio_dev->info->update_scan_mode) {
 664                ret = indio_dev->info
 665                        ->update_scan_mode(indio_dev,
 666                                           indio_dev->active_scan_mask);
 667                if (ret < 0) {
 668                        printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
 669                        goto error_run_postdisable;
 670                }
 671        }
 672        /* Definitely possible for devices to support both of these. */
 673        if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
 674                if (!indio_dev->trig) {
 675                        printk(KERN_INFO "Buffer not started: no trigger\n");
 676                        ret = -EINVAL;
 677                        /* Can only occur on first buffer */
 678                        goto error_run_postdisable;
 679                }
 680                indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
 681        } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
 682                indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
 683        } else { /* Should never be reached */
 684                ret = -EINVAL;
 685                goto error_run_postdisable;
 686        }
 687
 688        if (indio_dev->setup_ops->postenable) {
 689                ret = indio_dev->setup_ops->postenable(indio_dev);
 690                if (ret) {
 691                        printk(KERN_INFO
 692                               "Buffer not started: postenable failed (%d)\n", ret);
 693                        indio_dev->currentmode = INDIO_DIRECT_MODE;
 694                        if (indio_dev->setup_ops->postdisable)
 695                                indio_dev->setup_ops->postdisable(indio_dev);
 696                        goto error_disable_all_buffers;
 697                }
 698        }
 699
 700        if (indio_dev->available_scan_masks)
 701                kfree(compound_mask);
 702        else
 703                kfree(old_mask);
 704
 705        return success;
 706
 707error_disable_all_buffers:
 708        indio_dev->currentmode = INDIO_DIRECT_MODE;
 709error_run_postdisable:
 710        if (indio_dev->setup_ops->postdisable)
 711                indio_dev->setup_ops->postdisable(indio_dev);
 712error_remove_inserted:
 713        if (insert_buffer)
 714                iio_buffer_deactivate(insert_buffer);
 715        indio_dev->active_scan_mask = old_mask;
 716        kfree(compound_mask);
 717        return ret;
 718}
 719
 720int iio_update_buffers(struct iio_dev *indio_dev,
 721                       struct iio_buffer *insert_buffer,
 722                       struct iio_buffer *remove_buffer)
 723{
 724        int ret;
 725
 726        if (insert_buffer == remove_buffer)
 727                return 0;
 728
 729        mutex_lock(&indio_dev->info_exist_lock);
 730        mutex_lock(&indio_dev->mlock);
 731
 732        if (insert_buffer && iio_buffer_is_active(insert_buffer))
 733                insert_buffer = NULL;
 734
 735        if (remove_buffer && !iio_buffer_is_active(remove_buffer))
 736                remove_buffer = NULL;
 737
 738        if (!insert_buffer && !remove_buffer) {
 739                ret = 0;
 740                goto out_unlock;
 741        }
 742
 743        if (indio_dev->info == NULL) {
 744                ret = -ENODEV;
 745                goto out_unlock;
 746        }
 747
 748        ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
 749
 750out_unlock:
 751        mutex_unlock(&indio_dev->mlock);
 752        mutex_unlock(&indio_dev->info_exist_lock);
 753
 754        return ret;
 755}
 756EXPORT_SYMBOL_GPL(iio_update_buffers);
 757
 758ssize_t iio_buffer_store_enable(struct device *dev,
 759                                struct device_attribute *attr,
 760                                const char *buf,
 761                                size_t len)
 762{
 763        int ret;
 764        bool requested_state;
 765        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 766        bool inlist;
 767
 768        ret = strtobool(buf, &requested_state);
 769        if (ret < 0)
 770                return ret;
 771
 772        mutex_lock(&indio_dev->mlock);
 773
 774        /* Find out if it is in the list */
 775        inlist = iio_buffer_is_active(indio_dev->buffer);
 776        /* Already in desired state */
 777        if (inlist == requested_state)
 778                goto done;
 779
 780        if (requested_state)
 781                ret = __iio_update_buffers(indio_dev,
 782                                         indio_dev->buffer, NULL);
 783        else
 784                ret = __iio_update_buffers(indio_dev,
 785                                         NULL, indio_dev->buffer);
 786
 787        if (ret < 0)
 788                goto done;
 789done:
 790        mutex_unlock(&indio_dev->mlock);
 791        return (ret < 0) ? ret : len;
 792}
 793EXPORT_SYMBOL(iio_buffer_store_enable);
 794
 795/**
 796 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 797 * @indio_dev: the iio device
 798 * @mask: scan mask to be checked
 799 *
 800 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 801 * can be used for devices where only one channel can be active for sampling at
 802 * a time.
 803 */
 804bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
 805        const unsigned long *mask)
 806{
 807        return bitmap_weight(mask, indio_dev->masklength) == 1;
 808}
 809EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
 810
 811static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
 812        const unsigned long *mask)
 813{
 814        if (!indio_dev->setup_ops->validate_scan_mask)
 815                return true;
 816
 817        return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
 818}
 819
 820/**
 821 * iio_scan_mask_set() - set particular bit in the scan mask
 822 * @indio_dev: the iio device
 823 * @buffer: the buffer whose scan mask we are interested in
 824 * @bit: the bit to be set.
 825 *
 826 * Note that at this point we have no way of knowing what other
 827 * buffers might request, hence this code only verifies that the
 828 * individual buffers request is plausible.
 829 */
 830int iio_scan_mask_set(struct iio_dev *indio_dev,
 831                      struct iio_buffer *buffer, int bit)
 832{
 833        const unsigned long *mask;
 834        unsigned long *trialmask;
 835
 836        trialmask = kmalloc(sizeof(*trialmask)*
 837                            BITS_TO_LONGS(indio_dev->masklength),
 838                            GFP_KERNEL);
 839
 840        if (trialmask == NULL)
 841                return -ENOMEM;
 842        if (!indio_dev->masklength) {
 843                WARN_ON("Trying to set scanmask prior to registering buffer\n");
 844                goto err_invalid_mask;
 845        }
 846        bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
 847        set_bit(bit, trialmask);
 848
 849        if (!iio_validate_scan_mask(indio_dev, trialmask))
 850                goto err_invalid_mask;
 851
 852        if (indio_dev->available_scan_masks) {
 853                mask = iio_scan_mask_match(indio_dev->available_scan_masks,
 854                                           indio_dev->masklength,
 855                                           trialmask);
 856                if (!mask)
 857                        goto err_invalid_mask;
 858        }
 859        bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
 860
 861        kfree(trialmask);
 862
 863        return 0;
 864
 865err_invalid_mask:
 866        kfree(trialmask);
 867        return -EINVAL;
 868}
 869EXPORT_SYMBOL_GPL(iio_scan_mask_set);
 870
 871int iio_scan_mask_query(struct iio_dev *indio_dev,
 872                        struct iio_buffer *buffer, int bit)
 873{
 874        if (bit > indio_dev->masklength)
 875                return -EINVAL;
 876
 877        if (!buffer->scan_mask)
 878                return 0;
 879
 880        /* Ensure return value is 0 or 1. */
 881        return !!test_bit(bit, buffer->scan_mask);
 882};
 883EXPORT_SYMBOL_GPL(iio_scan_mask_query);
 884
 885/**
 886 * struct iio_demux_table() - table describing demux memcpy ops
 887 * @from:       index to copy from
 888 * @to:         index to copy to
 889 * @length:     how many bytes to copy
 890 * @l:          list head used for management
 891 */
 892struct iio_demux_table {
 893        unsigned from;
 894        unsigned to;
 895        unsigned length;
 896        struct list_head l;
 897};
 898
 899static const void *iio_demux(struct iio_buffer *buffer,
 900                                 const void *datain)
 901{
 902        struct iio_demux_table *t;
 903
 904        if (list_empty(&buffer->demux_list))
 905                return datain;
 906        list_for_each_entry(t, &buffer->demux_list, l)
 907                memcpy(buffer->demux_bounce + t->to,
 908                       datain + t->from, t->length);
 909
 910        return buffer->demux_bounce;
 911}
 912
 913static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
 914{
 915        const void *dataout = iio_demux(buffer, data);
 916
 917        return buffer->access->store_to(buffer, dataout);
 918}
 919
 920static void iio_buffer_demux_free(struct iio_buffer *buffer)
 921{
 922        struct iio_demux_table *p, *q;
 923        list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
 924                list_del(&p->l);
 925                kfree(p);
 926        }
 927}
 928
 929
 930int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
 931{
 932        int ret;
 933        struct iio_buffer *buf;
 934
 935        list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
 936                ret = iio_push_to_buffer(buf, data);
 937                if (ret < 0)
 938                        return ret;
 939        }
 940
 941        return 0;
 942}
 943EXPORT_SYMBOL_GPL(iio_push_to_buffers);
 944
 945static int iio_buffer_add_demux(struct iio_buffer *buffer,
 946        struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
 947        unsigned int length)
 948{
 949
 950        if (*p && (*p)->from + (*p)->length == in_loc &&
 951                (*p)->to + (*p)->length == out_loc) {
 952                (*p)->length += length;
 953        } else {
 954                *p = kmalloc(sizeof(**p), GFP_KERNEL);
 955                if (*p == NULL)
 956                        return -ENOMEM;
 957                (*p)->from = in_loc;
 958                (*p)->to = out_loc;
 959                (*p)->length = length;
 960                list_add_tail(&(*p)->l, &buffer->demux_list);
 961        }
 962
 963        return 0;
 964}
 965
 966static int iio_buffer_update_demux(struct iio_dev *indio_dev,
 967                                   struct iio_buffer *buffer)
 968{
 969        const struct iio_chan_spec *ch;
 970        int ret, in_ind = -1, out_ind, length;
 971        unsigned in_loc = 0, out_loc = 0;
 972        struct iio_demux_table *p = NULL;
 973
 974        /* Clear out any old demux */
 975        iio_buffer_demux_free(buffer);
 976        kfree(buffer->demux_bounce);
 977        buffer->demux_bounce = NULL;
 978
 979        /* First work out which scan mode we will actually have */
 980        if (bitmap_equal(indio_dev->active_scan_mask,
 981                         buffer->scan_mask,
 982                         indio_dev->masklength))
 983                return 0;
 984
 985        /* Now we have the two masks, work from least sig and build up sizes */
 986        for_each_set_bit(out_ind,
 987                         buffer->scan_mask,
 988                         indio_dev->masklength) {
 989                in_ind = find_next_bit(indio_dev->active_scan_mask,
 990                                       indio_dev->masklength,
 991                                       in_ind + 1);
 992                while (in_ind != out_ind) {
 993                        in_ind = find_next_bit(indio_dev->active_scan_mask,
 994                                               indio_dev->masklength,
 995                                               in_ind + 1);
 996                        ch = iio_find_channel_from_si(indio_dev, in_ind);
 997                        if (ch->scan_type.repeat > 1)
 998                                length = ch->scan_type.storagebits / 8 *
 999                                        ch->scan_type.repeat;
1000                        else
1001                                length = ch->scan_type.storagebits / 8;
1002                        /* Make sure we are aligned */
1003                        in_loc = roundup(in_loc, length) + length;
1004                }
1005                ch = iio_find_channel_from_si(indio_dev, in_ind);
1006                if (ch->scan_type.repeat > 1)
1007                        length = ch->scan_type.storagebits / 8 *
1008                                ch->scan_type.repeat;
1009                else
1010                        length = ch->scan_type.storagebits / 8;
1011                out_loc = roundup(out_loc, length);
1012                in_loc = roundup(in_loc, length);
1013                ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1014                if (ret)
1015                        goto error_clear_mux_table;
1016                out_loc += length;
1017                in_loc += length;
1018        }
1019        /* Relies on scan_timestamp being last */
1020        if (buffer->scan_timestamp) {
1021                ch = iio_find_channel_from_si(indio_dev,
1022                        indio_dev->scan_index_timestamp);
1023                if (ch->scan_type.repeat > 1)
1024                        length = ch->scan_type.storagebits / 8 *
1025                                ch->scan_type.repeat;
1026                else
1027                        length = ch->scan_type.storagebits / 8;
1028                out_loc = roundup(out_loc, length);
1029                in_loc = roundup(in_loc, length);
1030                ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1031                if (ret)
1032                        goto error_clear_mux_table;
1033                out_loc += length;
1034                in_loc += length;
1035        }
1036        buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1037        if (buffer->demux_bounce == NULL) {
1038                ret = -ENOMEM;
1039                goto error_clear_mux_table;
1040        }
1041        return 0;
1042
1043error_clear_mux_table:
1044        iio_buffer_demux_free(buffer);
1045
1046        return ret;
1047}
1048
1049int iio_update_demux(struct iio_dev *indio_dev)
1050{
1051        struct iio_buffer *buffer;
1052        int ret;
1053
1054        list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1055                ret = iio_buffer_update_demux(indio_dev, buffer);
1056                if (ret < 0)
1057                        goto error_clear_mux_table;
1058        }
1059        return 0;
1060
1061error_clear_mux_table:
1062        list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1063                iio_buffer_demux_free(buffer);
1064
1065        return ret;
1066}
1067EXPORT_SYMBOL_GPL(iio_update_demux);
1068
1069/**
1070 * iio_buffer_release() - Free a buffer's resources
1071 * @ref: Pointer to the kref embedded in the iio_buffer struct
1072 *
1073 * This function is called when the last reference to the buffer has been
1074 * dropped. It will typically free all resources allocated by the buffer. Do not
1075 * call this function manually, always use iio_buffer_put() when done using a
1076 * buffer.
1077 */
1078static void iio_buffer_release(struct kref *ref)
1079{
1080        struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1081
1082        buffer->access->release(buffer);
1083}
1084
1085/**
1086 * iio_buffer_get() - Grab a reference to the buffer
1087 * @buffer: The buffer to grab a reference for, may be NULL
1088 *
1089 * Returns the pointer to the buffer that was passed into the function.
1090 */
1091struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1092{
1093        if (buffer)
1094                kref_get(&buffer->ref);
1095
1096        return buffer;
1097}
1098EXPORT_SYMBOL_GPL(iio_buffer_get);
1099
1100/**
1101 * iio_buffer_put() - Release the reference to the buffer
1102 * @buffer: The buffer to release the reference for, may be NULL
1103 */
1104void iio_buffer_put(struct iio_buffer *buffer)
1105{
1106        if (buffer)
1107                kref_put(&buffer->ref, iio_buffer_release);
1108}
1109EXPORT_SYMBOL_GPL(iio_buffer_put);
1110