linux/drivers/iio/industrialio-buffer.c
<<
>>
Prefs
   1/* The industrial I/O core
   2 *
   3 * Copyright (c) 2008 Jonathan Cameron
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * Handling of buffer allocation / resizing.
  10 *
  11 *
  12 * Things to look at here.
  13 * - Better memory allocation techniques?
  14 * - Alternative access techniques?
  15 */
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/device.h>
  19#include <linux/fs.h>
  20#include <linux/cdev.h>
  21#include <linux/slab.h>
  22#include <linux/poll.h>
  23
  24#include <linux/iio/iio.h>
  25#include "iio_core.h"
  26#include <linux/iio/sysfs.h>
  27#include <linux/iio/buffer.h>
  28
  29static const char * const iio_endian_prefix[] = {
  30        [IIO_BE] = "be",
  31        [IIO_LE] = "le",
  32};
  33
  34static bool iio_buffer_is_active(struct iio_dev *indio_dev,
  35                                 struct iio_buffer *buf)
  36{
  37        struct list_head *p;
  38
  39        list_for_each(p, &indio_dev->buffer_list)
  40                if (p == &buf->buffer_list)
  41                        return true;
  42
  43        return false;
  44}
  45
  46/**
  47 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  48 *
  49 * This function relies on all buffer implementations having an
  50 * iio_buffer as their first element.
  51 **/
  52ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  53                                      size_t n, loff_t *f_ps)
  54{
  55        struct iio_dev *indio_dev = filp->private_data;
  56        struct iio_buffer *rb = indio_dev->buffer;
  57
  58        if (!rb || !rb->access->read_first_n)
  59                return -EINVAL;
  60        return rb->access->read_first_n(rb, n, buf);
  61}
  62
  63/**
  64 * iio_buffer_poll() - poll the buffer to find out if it has data
  65 */
  66unsigned int iio_buffer_poll(struct file *filp,
  67                             struct poll_table_struct *wait)
  68{
  69        struct iio_dev *indio_dev = filp->private_data;
  70        struct iio_buffer *rb = indio_dev->buffer;
  71
  72        poll_wait(filp, &rb->pollq, wait);
  73        if (rb->stufftoread)
  74                return POLLIN | POLLRDNORM;
  75        /* need a way of knowing if there may be enough data... */
  76        return 0;
  77}
  78
  79void iio_buffer_init(struct iio_buffer *buffer)
  80{
  81        INIT_LIST_HEAD(&buffer->demux_list);
  82        init_waitqueue_head(&buffer->pollq);
  83}
  84EXPORT_SYMBOL(iio_buffer_init);
  85
  86static ssize_t iio_show_scan_index(struct device *dev,
  87                                   struct device_attribute *attr,
  88                                   char *buf)
  89{
  90        return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  91}
  92
  93static ssize_t iio_show_fixed_type(struct device *dev,
  94                                   struct device_attribute *attr,
  95                                   char *buf)
  96{
  97        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  98        u8 type = this_attr->c->scan_type.endianness;
  99
 100        if (type == IIO_CPU) {
 101#ifdef __LITTLE_ENDIAN
 102                type = IIO_LE;
 103#else
 104                type = IIO_BE;
 105#endif
 106        }
 107        return sprintf(buf, "%s:%c%d/%d>>%u\n",
 108                       iio_endian_prefix[type],
 109                       this_attr->c->scan_type.sign,
 110                       this_attr->c->scan_type.realbits,
 111                       this_attr->c->scan_type.storagebits,
 112                       this_attr->c->scan_type.shift);
 113}
 114
 115static ssize_t iio_scan_el_show(struct device *dev,
 116                                struct device_attribute *attr,
 117                                char *buf)
 118{
 119        int ret;
 120        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 121
 122        ret = test_bit(to_iio_dev_attr(attr)->address,
 123                       indio_dev->buffer->scan_mask);
 124
 125        return sprintf(buf, "%d\n", ret);
 126}
 127
 128static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
 129{
 130        clear_bit(bit, buffer->scan_mask);
 131        return 0;
 132}
 133
 134static ssize_t iio_scan_el_store(struct device *dev,
 135                                 struct device_attribute *attr,
 136                                 const char *buf,
 137                                 size_t len)
 138{
 139        int ret;
 140        bool state;
 141        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 142        struct iio_buffer *buffer = indio_dev->buffer;
 143        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 144
 145        ret = strtobool(buf, &state);
 146        if (ret < 0)
 147                return ret;
 148        mutex_lock(&indio_dev->mlock);
 149        if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
 150                ret = -EBUSY;
 151                goto error_ret;
 152        }
 153        ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
 154        if (ret < 0)
 155                goto error_ret;
 156        if (!state && ret) {
 157                ret = iio_scan_mask_clear(buffer, this_attr->address);
 158                if (ret)
 159                        goto error_ret;
 160        } else if (state && !ret) {
 161                ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
 162                if (ret)
 163                        goto error_ret;
 164        }
 165
 166error_ret:
 167        mutex_unlock(&indio_dev->mlock);
 168
 169        return ret < 0 ? ret : len;
 170
 171}
 172
 173static ssize_t iio_scan_el_ts_show(struct device *dev,
 174                                   struct device_attribute *attr,
 175                                   char *buf)
 176{
 177        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 178        return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
 179}
 180
 181static ssize_t iio_scan_el_ts_store(struct device *dev,
 182                                    struct device_attribute *attr,
 183                                    const char *buf,
 184                                    size_t len)
 185{
 186        int ret;
 187        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 188        bool state;
 189
 190        ret = strtobool(buf, &state);
 191        if (ret < 0)
 192                return ret;
 193
 194        mutex_lock(&indio_dev->mlock);
 195        if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
 196                ret = -EBUSY;
 197                goto error_ret;
 198        }
 199        indio_dev->buffer->scan_timestamp = state;
 200error_ret:
 201        mutex_unlock(&indio_dev->mlock);
 202
 203        return ret ? ret : len;
 204}
 205
 206static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
 207                                        const struct iio_chan_spec *chan)
 208{
 209        int ret, attrcount = 0;
 210        struct iio_buffer *buffer = indio_dev->buffer;
 211
 212        ret = __iio_add_chan_devattr("index",
 213                                     chan,
 214                                     &iio_show_scan_index,
 215                                     NULL,
 216                                     0,
 217                                     0,
 218                                     &indio_dev->dev,
 219                                     &buffer->scan_el_dev_attr_list);
 220        if (ret)
 221                goto error_ret;
 222        attrcount++;
 223        ret = __iio_add_chan_devattr("type",
 224                                     chan,
 225                                     &iio_show_fixed_type,
 226                                     NULL,
 227                                     0,
 228                                     0,
 229                                     &indio_dev->dev,
 230                                     &buffer->scan_el_dev_attr_list);
 231        if (ret)
 232                goto error_ret;
 233        attrcount++;
 234        if (chan->type != IIO_TIMESTAMP)
 235                ret = __iio_add_chan_devattr("en",
 236                                             chan,
 237                                             &iio_scan_el_show,
 238                                             &iio_scan_el_store,
 239                                             chan->scan_index,
 240                                             0,
 241                                             &indio_dev->dev,
 242                                             &buffer->scan_el_dev_attr_list);
 243        else
 244                ret = __iio_add_chan_devattr("en",
 245                                             chan,
 246                                             &iio_scan_el_ts_show,
 247                                             &iio_scan_el_ts_store,
 248                                             chan->scan_index,
 249                                             0,
 250                                             &indio_dev->dev,
 251                                             &buffer->scan_el_dev_attr_list);
 252        attrcount++;
 253        ret = attrcount;
 254error_ret:
 255        return ret;
 256}
 257
 258static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
 259                                                     struct iio_dev_attr *p)
 260{
 261        kfree(p->dev_attr.attr.name);
 262        kfree(p);
 263}
 264
 265static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
 266{
 267        struct iio_dev_attr *p, *n;
 268        struct iio_buffer *buffer = indio_dev->buffer;
 269
 270        list_for_each_entry_safe(p, n,
 271                                 &buffer->scan_el_dev_attr_list, l)
 272                iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
 273}
 274
 275static const char * const iio_scan_elements_group_name = "scan_elements";
 276
 277int iio_buffer_register(struct iio_dev *indio_dev,
 278                        const struct iio_chan_spec *channels,
 279                        int num_channels)
 280{
 281        struct iio_dev_attr *p;
 282        struct attribute **attr;
 283        struct iio_buffer *buffer = indio_dev->buffer;
 284        int ret, i, attrn, attrcount, attrcount_orig = 0;
 285
 286        if (buffer->attrs)
 287                indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
 288
 289        if (buffer->scan_el_attrs != NULL) {
 290                attr = buffer->scan_el_attrs->attrs;
 291                while (*attr++ != NULL)
 292                        attrcount_orig++;
 293        }
 294        attrcount = attrcount_orig;
 295        INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
 296        if (channels) {
 297                /* new magic */
 298                for (i = 0; i < num_channels; i++) {
 299                        if (channels[i].scan_index < 0)
 300                                continue;
 301
 302                        /* Establish necessary mask length */
 303                        if (channels[i].scan_index >
 304                            (int)indio_dev->masklength - 1)
 305                                indio_dev->masklength
 306                                        = channels[i].scan_index + 1;
 307
 308                        ret = iio_buffer_add_channel_sysfs(indio_dev,
 309                                                         &channels[i]);
 310                        if (ret < 0)
 311                                goto error_cleanup_dynamic;
 312                        attrcount += ret;
 313                        if (channels[i].type == IIO_TIMESTAMP)
 314                                indio_dev->scan_index_timestamp =
 315                                        channels[i].scan_index;
 316                }
 317                if (indio_dev->masklength && buffer->scan_mask == NULL) {
 318                        buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
 319                                                    sizeof(*buffer->scan_mask),
 320                                                    GFP_KERNEL);
 321                        if (buffer->scan_mask == NULL) {
 322                                ret = -ENOMEM;
 323                                goto error_cleanup_dynamic;
 324                        }
 325                }
 326        }
 327
 328        buffer->scan_el_group.name = iio_scan_elements_group_name;
 329
 330        buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
 331                                              sizeof(buffer->scan_el_group.attrs[0]),
 332                                              GFP_KERNEL);
 333        if (buffer->scan_el_group.attrs == NULL) {
 334                ret = -ENOMEM;
 335                goto error_free_scan_mask;
 336        }
 337        if (buffer->scan_el_attrs)
 338                memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
 339                       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
 340        attrn = attrcount_orig;
 341
 342        list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
 343                buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
 344        indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
 345
 346        return 0;
 347
 348error_free_scan_mask:
 349        kfree(buffer->scan_mask);
 350error_cleanup_dynamic:
 351        __iio_buffer_attr_cleanup(indio_dev);
 352
 353        return ret;
 354}
 355EXPORT_SYMBOL(iio_buffer_register);
 356
 357void iio_buffer_unregister(struct iio_dev *indio_dev)
 358{
 359        kfree(indio_dev->buffer->scan_mask);
 360        kfree(indio_dev->buffer->scan_el_group.attrs);
 361        __iio_buffer_attr_cleanup(indio_dev);
 362}
 363EXPORT_SYMBOL(iio_buffer_unregister);
 364
 365ssize_t iio_buffer_read_length(struct device *dev,
 366                               struct device_attribute *attr,
 367                               char *buf)
 368{
 369        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 370        struct iio_buffer *buffer = indio_dev->buffer;
 371
 372        if (buffer->access->get_length)
 373                return sprintf(buf, "%d\n",
 374                               buffer->access->get_length(buffer));
 375
 376        return 0;
 377}
 378EXPORT_SYMBOL(iio_buffer_read_length);
 379
 380ssize_t iio_buffer_write_length(struct device *dev,
 381                                struct device_attribute *attr,
 382                                const char *buf,
 383                                size_t len)
 384{
 385        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 386        struct iio_buffer *buffer = indio_dev->buffer;
 387        unsigned int val;
 388        int ret;
 389
 390        ret = kstrtouint(buf, 10, &val);
 391        if (ret)
 392                return ret;
 393
 394        if (buffer->access->get_length)
 395                if (val == buffer->access->get_length(buffer))
 396                        return len;
 397
 398        mutex_lock(&indio_dev->mlock);
 399        if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
 400                ret = -EBUSY;
 401        } else {
 402                if (buffer->access->set_length)
 403                        buffer->access->set_length(buffer, val);
 404                ret = 0;
 405        }
 406        mutex_unlock(&indio_dev->mlock);
 407
 408        return ret ? ret : len;
 409}
 410EXPORT_SYMBOL(iio_buffer_write_length);
 411
 412ssize_t iio_buffer_show_enable(struct device *dev,
 413                               struct device_attribute *attr,
 414                               char *buf)
 415{
 416        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 417        return sprintf(buf, "%d\n",
 418                       iio_buffer_is_active(indio_dev,
 419                                            indio_dev->buffer));
 420}
 421EXPORT_SYMBOL(iio_buffer_show_enable);
 422
 423/* note NULL used as error indicator as it doesn't make sense. */
 424static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
 425                                          unsigned int masklength,
 426                                          const unsigned long *mask)
 427{
 428        if (bitmap_empty(mask, masklength))
 429                return NULL;
 430        while (*av_masks) {
 431                if (bitmap_subset(mask, av_masks, masklength))
 432                        return av_masks;
 433                av_masks += BITS_TO_LONGS(masklength);
 434        }
 435        return NULL;
 436}
 437
 438static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
 439                                  bool timestamp)
 440{
 441        const struct iio_chan_spec *ch;
 442        unsigned bytes = 0;
 443        int length, i;
 444
 445        /* How much space will the demuxed element take? */
 446        for_each_set_bit(i, mask,
 447                         indio_dev->masklength) {
 448                ch = iio_find_channel_from_si(indio_dev, i);
 449                length = ch->scan_type.storagebits / 8;
 450                bytes = ALIGN(bytes, length);
 451                bytes += length;
 452        }
 453        if (timestamp) {
 454                ch = iio_find_channel_from_si(indio_dev,
 455                                              indio_dev->scan_index_timestamp);
 456                length = ch->scan_type.storagebits / 8;
 457                bytes = ALIGN(bytes, length);
 458                bytes += length;
 459        }
 460        return bytes;
 461}
 462
 463void iio_disable_all_buffers(struct iio_dev *indio_dev)
 464{
 465        struct iio_buffer *buffer, *_buffer;
 466
 467        if (list_empty(&indio_dev->buffer_list))
 468                return;
 469
 470        if (indio_dev->setup_ops->predisable)
 471                indio_dev->setup_ops->predisable(indio_dev);
 472
 473        list_for_each_entry_safe(buffer, _buffer,
 474                        &indio_dev->buffer_list, buffer_list)
 475                list_del_init(&buffer->buffer_list);
 476
 477        indio_dev->currentmode = INDIO_DIRECT_MODE;
 478        if (indio_dev->setup_ops->postdisable)
 479                indio_dev->setup_ops->postdisable(indio_dev);
 480
 481        if (indio_dev->available_scan_masks == NULL)
 482                kfree(indio_dev->active_scan_mask);
 483}
 484
 485int iio_update_buffers(struct iio_dev *indio_dev,
 486                       struct iio_buffer *insert_buffer,
 487                       struct iio_buffer *remove_buffer)
 488{
 489        int ret;
 490        int success = 0;
 491        struct iio_buffer *buffer;
 492        unsigned long *compound_mask;
 493        const unsigned long *old_mask;
 494
 495        /* Wind down existing buffers - iff there are any */
 496        if (!list_empty(&indio_dev->buffer_list)) {
 497                if (indio_dev->setup_ops->predisable) {
 498                        ret = indio_dev->setup_ops->predisable(indio_dev);
 499                        if (ret)
 500                                goto error_ret;
 501                }
 502                indio_dev->currentmode = INDIO_DIRECT_MODE;
 503                if (indio_dev->setup_ops->postdisable) {
 504                        ret = indio_dev->setup_ops->postdisable(indio_dev);
 505                        if (ret)
 506                                goto error_ret;
 507                }
 508        }
 509        /* Keep a copy of current setup to allow roll back */
 510        old_mask = indio_dev->active_scan_mask;
 511        if (!indio_dev->available_scan_masks)
 512                indio_dev->active_scan_mask = NULL;
 513
 514        if (remove_buffer)
 515                list_del(&remove_buffer->buffer_list);
 516        if (insert_buffer)
 517                list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
 518
 519        /* If no buffers in list, we are done */
 520        if (list_empty(&indio_dev->buffer_list)) {
 521                indio_dev->currentmode = INDIO_DIRECT_MODE;
 522                if (indio_dev->available_scan_masks == NULL)
 523                        kfree(old_mask);
 524                return 0;
 525        }
 526
 527        /* What scan mask do we actually have ?*/
 528        compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
 529                                sizeof(long), GFP_KERNEL);
 530        if (compound_mask == NULL) {
 531                if (indio_dev->available_scan_masks == NULL)
 532                        kfree(old_mask);
 533                return -ENOMEM;
 534        }
 535        indio_dev->scan_timestamp = 0;
 536
 537        list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
 538                bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
 539                          indio_dev->masklength);
 540                indio_dev->scan_timestamp |= buffer->scan_timestamp;
 541        }
 542        if (indio_dev->available_scan_masks) {
 543                indio_dev->active_scan_mask =
 544                        iio_scan_mask_match(indio_dev->available_scan_masks,
 545                                            indio_dev->masklength,
 546                                            compound_mask);
 547                if (indio_dev->active_scan_mask == NULL) {
 548                        /*
 549                         * Roll back.
 550                         * Note can only occur when adding a buffer.
 551                         */
 552                        list_del(&insert_buffer->buffer_list);
 553                        if (old_mask) {
 554                                indio_dev->active_scan_mask = old_mask;
 555                                success = -EINVAL;
 556                        }
 557                        else {
 558                                kfree(compound_mask);
 559                                ret = -EINVAL;
 560                                goto error_ret;
 561                        }
 562                }
 563        } else {
 564                indio_dev->active_scan_mask = compound_mask;
 565        }
 566
 567        iio_update_demux(indio_dev);
 568
 569        /* Wind up again */
 570        if (indio_dev->setup_ops->preenable) {
 571                ret = indio_dev->setup_ops->preenable(indio_dev);
 572                if (ret) {
 573                        printk(KERN_ERR
 574                               "Buffer not started: buffer preenable failed (%d)\n", ret);
 575                        goto error_remove_inserted;
 576                }
 577        }
 578        indio_dev->scan_bytes =
 579                iio_compute_scan_bytes(indio_dev,
 580                                       indio_dev->active_scan_mask,
 581                                       indio_dev->scan_timestamp);
 582        list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
 583                if (buffer->access->request_update) {
 584                        ret = buffer->access->request_update(buffer);
 585                        if (ret) {
 586                                printk(KERN_INFO
 587                                       "Buffer not started: buffer parameter update failed (%d)\n", ret);
 588                                goto error_run_postdisable;
 589                        }
 590                }
 591        if (indio_dev->info->update_scan_mode) {
 592                ret = indio_dev->info
 593                        ->update_scan_mode(indio_dev,
 594                                           indio_dev->active_scan_mask);
 595                if (ret < 0) {
 596                        printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
 597                        goto error_run_postdisable;
 598                }
 599        }
 600        /* Definitely possible for devices to support both of these.*/
 601        if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
 602                if (!indio_dev->trig) {
 603                        printk(KERN_INFO "Buffer not started: no trigger\n");
 604                        ret = -EINVAL;
 605                        /* Can only occur on first buffer */
 606                        goto error_run_postdisable;
 607                }
 608                indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
 609        } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
 610                indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
 611        } else { /* should never be reached */
 612                ret = -EINVAL;
 613                goto error_run_postdisable;
 614        }
 615
 616        if (indio_dev->setup_ops->postenable) {
 617                ret = indio_dev->setup_ops->postenable(indio_dev);
 618                if (ret) {
 619                        printk(KERN_INFO
 620                               "Buffer not started: postenable failed (%d)\n", ret);
 621                        indio_dev->currentmode = INDIO_DIRECT_MODE;
 622                        if (indio_dev->setup_ops->postdisable)
 623                                indio_dev->setup_ops->postdisable(indio_dev);
 624                        goto error_disable_all_buffers;
 625                }
 626        }
 627
 628        if (indio_dev->available_scan_masks)
 629                kfree(compound_mask);
 630        else
 631                kfree(old_mask);
 632
 633        return success;
 634
 635error_disable_all_buffers:
 636        indio_dev->currentmode = INDIO_DIRECT_MODE;
 637error_run_postdisable:
 638        if (indio_dev->setup_ops->postdisable)
 639                indio_dev->setup_ops->postdisable(indio_dev);
 640error_remove_inserted:
 641
 642        if (insert_buffer)
 643                list_del(&insert_buffer->buffer_list);
 644        indio_dev->active_scan_mask = old_mask;
 645        kfree(compound_mask);
 646error_ret:
 647
 648        return ret;
 649}
 650EXPORT_SYMBOL_GPL(iio_update_buffers);
 651
 652ssize_t iio_buffer_store_enable(struct device *dev,
 653                                struct device_attribute *attr,
 654                                const char *buf,
 655                                size_t len)
 656{
 657        int ret;
 658        bool requested_state;
 659        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 660        struct iio_buffer *pbuf = indio_dev->buffer;
 661        bool inlist;
 662
 663        ret = strtobool(buf, &requested_state);
 664        if (ret < 0)
 665                return ret;
 666
 667        mutex_lock(&indio_dev->mlock);
 668
 669        /* Find out if it is in the list */
 670        inlist = iio_buffer_is_active(indio_dev, pbuf);
 671        /* Already in desired state */
 672        if (inlist == requested_state)
 673                goto done;
 674
 675        if (requested_state)
 676                ret = iio_update_buffers(indio_dev,
 677                                         indio_dev->buffer, NULL);
 678        else
 679                ret = iio_update_buffers(indio_dev,
 680                                         NULL, indio_dev->buffer);
 681
 682        if (ret < 0)
 683                goto done;
 684done:
 685        mutex_unlock(&indio_dev->mlock);
 686        return (ret < 0) ? ret : len;
 687}
 688EXPORT_SYMBOL(iio_buffer_store_enable);
 689
 690int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
 691{
 692        struct iio_buffer *buffer;
 693        unsigned bytes;
 694        dev_dbg(&indio_dev->dev, "%s\n", __func__);
 695
 696        list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
 697                if (buffer->access->set_bytes_per_datum) {
 698                        bytes = iio_compute_scan_bytes(indio_dev,
 699                                                       buffer->scan_mask,
 700                                                       buffer->scan_timestamp);
 701
 702                        buffer->access->set_bytes_per_datum(buffer, bytes);
 703                }
 704        return 0;
 705}
 706EXPORT_SYMBOL(iio_sw_buffer_preenable);
 707
 708/**
 709 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 710 * @indio_dev: the iio device
 711 * @mask: scan mask to be checked
 712 *
 713 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 714 * can be used for devices where only one channel can be active for sampling at
 715 * a time.
 716 */
 717bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
 718        const unsigned long *mask)
 719{
 720        return bitmap_weight(mask, indio_dev->masklength) == 1;
 721}
 722EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
 723
 724static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
 725        const unsigned long *mask)
 726{
 727        if (!indio_dev->setup_ops->validate_scan_mask)
 728                return true;
 729
 730        return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
 731}
 732
 733/**
 734 * iio_scan_mask_set() - set particular bit in the scan mask
 735 * @buffer: the buffer whose scan mask we are interested in
 736 * @bit: the bit to be set.
 737 *
 738 * Note that at this point we have no way of knowing what other
 739 * buffers might request, hence this code only verifies that the
 740 * individual buffers request is plausible.
 741 */
 742int iio_scan_mask_set(struct iio_dev *indio_dev,
 743                      struct iio_buffer *buffer, int bit)
 744{
 745        const unsigned long *mask;
 746        unsigned long *trialmask;
 747
 748        trialmask = kmalloc(sizeof(*trialmask)*
 749                            BITS_TO_LONGS(indio_dev->masklength),
 750                            GFP_KERNEL);
 751
 752        if (trialmask == NULL)
 753                return -ENOMEM;
 754        if (!indio_dev->masklength) {
 755                WARN_ON("trying to set scanmask prior to registering buffer\n");
 756                goto err_invalid_mask;
 757        }
 758        bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
 759        set_bit(bit, trialmask);
 760
 761        if (!iio_validate_scan_mask(indio_dev, trialmask))
 762                goto err_invalid_mask;
 763
 764        if (indio_dev->available_scan_masks) {
 765                mask = iio_scan_mask_match(indio_dev->available_scan_masks,
 766                                           indio_dev->masklength,
 767                                           trialmask);
 768                if (!mask)
 769                        goto err_invalid_mask;
 770        }
 771        bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
 772
 773        kfree(trialmask);
 774
 775        return 0;
 776
 777err_invalid_mask:
 778        kfree(trialmask);
 779        return -EINVAL;
 780}
 781EXPORT_SYMBOL_GPL(iio_scan_mask_set);
 782
 783int iio_scan_mask_query(struct iio_dev *indio_dev,
 784                        struct iio_buffer *buffer, int bit)
 785{
 786        if (bit > indio_dev->masklength)
 787                return -EINVAL;
 788
 789        if (!buffer->scan_mask)
 790                return 0;
 791
 792        return test_bit(bit, buffer->scan_mask);
 793};
 794EXPORT_SYMBOL_GPL(iio_scan_mask_query);
 795
 796/**
 797 * struct iio_demux_table() - table describing demux memcpy ops
 798 * @from:       index to copy from
 799 * @to:         index to copy to
 800 * @length:     how many bytes to copy
 801 * @l:          list head used for management
 802 */
 803struct iio_demux_table {
 804        unsigned from;
 805        unsigned to;
 806        unsigned length;
 807        struct list_head l;
 808};
 809
 810static unsigned char *iio_demux(struct iio_buffer *buffer,
 811                                 unsigned char *datain)
 812{
 813        struct iio_demux_table *t;
 814
 815        if (list_empty(&buffer->demux_list))
 816                return datain;
 817        list_for_each_entry(t, &buffer->demux_list, l)
 818                memcpy(buffer->demux_bounce + t->to,
 819                       datain + t->from, t->length);
 820
 821        return buffer->demux_bounce;
 822}
 823
 824static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
 825{
 826        unsigned char *dataout = iio_demux(buffer, data);
 827
 828        return buffer->access->store_to(buffer, dataout);
 829}
 830
 831static void iio_buffer_demux_free(struct iio_buffer *buffer)
 832{
 833        struct iio_demux_table *p, *q;
 834        list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
 835                list_del(&p->l);
 836                kfree(p);
 837        }
 838}
 839
 840
 841int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
 842{
 843        int ret;
 844        struct iio_buffer *buf;
 845
 846        list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
 847                ret = iio_push_to_buffer(buf, data);
 848                if (ret < 0)
 849                        return ret;
 850        }
 851
 852        return 0;
 853}
 854EXPORT_SYMBOL_GPL(iio_push_to_buffers);
 855
 856static int iio_buffer_update_demux(struct iio_dev *indio_dev,
 857                                   struct iio_buffer *buffer)
 858{
 859        const struct iio_chan_spec *ch;
 860        int ret, in_ind = -1, out_ind, length;
 861        unsigned in_loc = 0, out_loc = 0;
 862        struct iio_demux_table *p;
 863
 864        /* Clear out any old demux */
 865        iio_buffer_demux_free(buffer);
 866        kfree(buffer->demux_bounce);
 867        buffer->demux_bounce = NULL;
 868
 869        /* First work out which scan mode we will actually have */
 870        if (bitmap_equal(indio_dev->active_scan_mask,
 871                         buffer->scan_mask,
 872                         indio_dev->masklength))
 873                return 0;
 874
 875        /* Now we have the two masks, work from least sig and build up sizes */
 876        for_each_set_bit(out_ind,
 877                         indio_dev->active_scan_mask,
 878                         indio_dev->masklength) {
 879                in_ind = find_next_bit(indio_dev->active_scan_mask,
 880                                       indio_dev->masklength,
 881                                       in_ind + 1);
 882                while (in_ind != out_ind) {
 883                        in_ind = find_next_bit(indio_dev->active_scan_mask,
 884                                               indio_dev->masklength,
 885                                               in_ind + 1);
 886                        ch = iio_find_channel_from_si(indio_dev, in_ind);
 887                        length = ch->scan_type.storagebits/8;
 888                        /* Make sure we are aligned */
 889                        in_loc += length;
 890                        if (in_loc % length)
 891                                in_loc += length - in_loc % length;
 892                }
 893                p = kmalloc(sizeof(*p), GFP_KERNEL);
 894                if (p == NULL) {
 895                        ret = -ENOMEM;
 896                        goto error_clear_mux_table;
 897                }
 898                ch = iio_find_channel_from_si(indio_dev, in_ind);
 899                length = ch->scan_type.storagebits/8;
 900                if (out_loc % length)
 901                        out_loc += length - out_loc % length;
 902                if (in_loc % length)
 903                        in_loc += length - in_loc % length;
 904                p->from = in_loc;
 905                p->to = out_loc;
 906                p->length = length;
 907                list_add_tail(&p->l, &buffer->demux_list);
 908                out_loc += length;
 909                in_loc += length;
 910        }
 911        /* Relies on scan_timestamp being last */
 912        if (buffer->scan_timestamp) {
 913                p = kmalloc(sizeof(*p), GFP_KERNEL);
 914                if (p == NULL) {
 915                        ret = -ENOMEM;
 916                        goto error_clear_mux_table;
 917                }
 918                ch = iio_find_channel_from_si(indio_dev,
 919                        indio_dev->scan_index_timestamp);
 920                length = ch->scan_type.storagebits/8;
 921                if (out_loc % length)
 922                        out_loc += length - out_loc % length;
 923                if (in_loc % length)
 924                        in_loc += length - in_loc % length;
 925                p->from = in_loc;
 926                p->to = out_loc;
 927                p->length = length;
 928                list_add_tail(&p->l, &buffer->demux_list);
 929                out_loc += length;
 930                in_loc += length;
 931        }
 932        buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
 933        if (buffer->demux_bounce == NULL) {
 934                ret = -ENOMEM;
 935                goto error_clear_mux_table;
 936        }
 937        return 0;
 938
 939error_clear_mux_table:
 940        iio_buffer_demux_free(buffer);
 941
 942        return ret;
 943}
 944
 945int iio_update_demux(struct iio_dev *indio_dev)
 946{
 947        struct iio_buffer *buffer;
 948        int ret;
 949
 950        list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
 951                ret = iio_buffer_update_demux(indio_dev, buffer);
 952                if (ret < 0)
 953                        goto error_clear_mux_table;
 954        }
 955        return 0;
 956
 957error_clear_mux_table:
 958        list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
 959                iio_buffer_demux_free(buffer);
 960
 961        return ret;
 962}
 963EXPORT_SYMBOL_GPL(iio_update_demux);
 964