linux/drivers/staging/comedi/comedi_buf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * comedi_buf.c
   4 *
   5 * COMEDI - Linux Control and Measurement Device Interface
   6 * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
   7 * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
   8 */
   9
  10#include <linux/vmalloc.h>
  11#include <linux/slab.h>
  12
  13#include "comedidev.h"
  14#include "comedi_internal.h"
  15
  16#ifdef PAGE_KERNEL_NOCACHE
  17#define COMEDI_PAGE_PROTECTION          PAGE_KERNEL_NOCACHE
  18#else
  19#define COMEDI_PAGE_PROTECTION          PAGE_KERNEL
  20#endif
  21
  22static void comedi_buf_map_kref_release(struct kref *kref)
  23{
  24        struct comedi_buf_map *bm =
  25                container_of(kref, struct comedi_buf_map, refcount);
  26        struct comedi_buf_page *buf;
  27        unsigned int i;
  28
  29        if (bm->page_list) {
  30                if (bm->dma_dir != DMA_NONE) {
  31                        /*
  32                         * DMA buffer was allocated as a single block.
  33                         * Address is in page_list[0].
  34                         */
  35                        buf = &bm->page_list[0];
  36                        dma_free_coherent(bm->dma_hw_dev,
  37                                          PAGE_SIZE * bm->n_pages,
  38                                          buf->virt_addr, buf->dma_addr);
  39                } else {
  40                        for (i = 0; i < bm->n_pages; i++) {
  41                                buf = &bm->page_list[i];
  42                                ClearPageReserved(virt_to_page(buf->virt_addr));
  43                                free_page((unsigned long)buf->virt_addr);
  44                        }
  45                }
  46                vfree(bm->page_list);
  47        }
  48        if (bm->dma_dir != DMA_NONE)
  49                put_device(bm->dma_hw_dev);
  50        kfree(bm);
  51}
  52
  53static void __comedi_buf_free(struct comedi_device *dev,
  54                              struct comedi_subdevice *s)
  55{
  56        struct comedi_async *async = s->async;
  57        struct comedi_buf_map *bm;
  58        unsigned long flags;
  59
  60        if (async->prealloc_buf) {
  61                if (s->async_dma_dir == DMA_NONE)
  62                        vunmap(async->prealloc_buf);
  63                async->prealloc_buf = NULL;
  64                async->prealloc_bufsz = 0;
  65        }
  66
  67        spin_lock_irqsave(&s->spin_lock, flags);
  68        bm = async->buf_map;
  69        async->buf_map = NULL;
  70        spin_unlock_irqrestore(&s->spin_lock, flags);
  71        comedi_buf_map_put(bm);
  72}
  73
  74static struct comedi_buf_map *
  75comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
  76                     unsigned int n_pages)
  77{
  78        struct comedi_buf_map *bm;
  79        struct comedi_buf_page *buf;
  80        unsigned int i;
  81
  82        bm = kzalloc(sizeof(*bm), GFP_KERNEL);
  83        if (!bm)
  84                return NULL;
  85
  86        kref_init(&bm->refcount);
  87        bm->dma_dir = dma_dir;
  88        if (bm->dma_dir != DMA_NONE) {
  89                /* Need ref to hardware device to free buffer later. */
  90                bm->dma_hw_dev = get_device(dev->hw_dev);
  91        }
  92
  93        bm->page_list = vzalloc(sizeof(*buf) * n_pages);
  94        if (!bm->page_list)
  95                goto err;
  96
  97        if (bm->dma_dir != DMA_NONE) {
  98                void *virt_addr;
  99                dma_addr_t dma_addr;
 100
 101                /*
 102                 * Currently, the DMA buffer needs to be allocated as a
 103                 * single block so that it can be mmap()'ed.
 104                 */
 105                virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
 106                                               PAGE_SIZE * n_pages, &dma_addr,
 107                                               GFP_KERNEL);
 108                if (!virt_addr)
 109                        goto err;
 110
 111                for (i = 0; i < n_pages; i++) {
 112                        buf = &bm->page_list[i];
 113                        buf->virt_addr = virt_addr + (i << PAGE_SHIFT);
 114                        buf->dma_addr = dma_addr + (i << PAGE_SHIFT);
 115                }
 116
 117                bm->n_pages = i;
 118        } else {
 119                for (i = 0; i < n_pages; i++) {
 120                        buf = &bm->page_list[i];
 121                        buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
 122                        if (!buf->virt_addr)
 123                                break;
 124
 125                        SetPageReserved(virt_to_page(buf->virt_addr));
 126                }
 127
 128                bm->n_pages = i;
 129                if (i < n_pages)
 130                        goto err;
 131        }
 132
 133        return bm;
 134
 135err:
 136        comedi_buf_map_put(bm);
 137        return NULL;
 138}
 139
 140static void __comedi_buf_alloc(struct comedi_device *dev,
 141                               struct comedi_subdevice *s,
 142                               unsigned int n_pages)
 143{
 144        struct comedi_async *async = s->async;
 145        struct page **pages = NULL;
 146        struct comedi_buf_map *bm;
 147        struct comedi_buf_page *buf;
 148        unsigned long flags;
 149        unsigned int i;
 150
 151        if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
 152                dev_err(dev->class_dev,
 153                        "dma buffer allocation not supported\n");
 154                return;
 155        }
 156
 157        bm = comedi_buf_map_alloc(dev, s->async_dma_dir, n_pages);
 158        if (!bm)
 159                return;
 160
 161        spin_lock_irqsave(&s->spin_lock, flags);
 162        async->buf_map = bm;
 163        spin_unlock_irqrestore(&s->spin_lock, flags);
 164
 165        if (bm->dma_dir != DMA_NONE) {
 166                /*
 167                 * DMA buffer was allocated as a single block.
 168                 * Address is in page_list[0].
 169                 */
 170                buf = &bm->page_list[0];
 171                async->prealloc_buf = buf->virt_addr;
 172        } else {
 173                pages = vmalloc(sizeof(struct page *) * n_pages);
 174                if (!pages)
 175                        return;
 176
 177                for (i = 0; i < n_pages; i++) {
 178                        buf = &bm->page_list[i];
 179                        pages[i] = virt_to_page(buf->virt_addr);
 180                }
 181
 182                /* vmap the pages to prealloc_buf */
 183                async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
 184                                           COMEDI_PAGE_PROTECTION);
 185
 186                vfree(pages);
 187        }
 188}
 189
 190void comedi_buf_map_get(struct comedi_buf_map *bm)
 191{
 192        if (bm)
 193                kref_get(&bm->refcount);
 194}
 195
 196int comedi_buf_map_put(struct comedi_buf_map *bm)
 197{
 198        if (bm)
 199                return kref_put(&bm->refcount, comedi_buf_map_kref_release);
 200        return 1;
 201}
 202
 203/* helper for "access" vm operation */
 204int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
 205                          void *buf, int len, int write)
 206{
 207        unsigned int pgoff = offset_in_page(offset);
 208        unsigned long pg = offset >> PAGE_SHIFT;
 209        int done = 0;
 210
 211        while (done < len && pg < bm->n_pages) {
 212                int l = min_t(int, len - done, PAGE_SIZE - pgoff);
 213                void *b = bm->page_list[pg].virt_addr + pgoff;
 214
 215                if (write)
 216                        memcpy(b, buf, l);
 217                else
 218                        memcpy(buf, b, l);
 219                buf += l;
 220                done += l;
 221                pg++;
 222                pgoff = 0;
 223        }
 224        return done;
 225}
 226
 227/* returns s->async->buf_map and increments its kref refcount */
 228struct comedi_buf_map *
 229comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
 230{
 231        struct comedi_async *async = s->async;
 232        struct comedi_buf_map *bm = NULL;
 233        unsigned long flags;
 234
 235        if (!async)
 236                return NULL;
 237
 238        spin_lock_irqsave(&s->spin_lock, flags);
 239        bm = async->buf_map;
 240        /* only want it if buffer pages allocated */
 241        if (bm && bm->n_pages)
 242                comedi_buf_map_get(bm);
 243        else
 244                bm = NULL;
 245        spin_unlock_irqrestore(&s->spin_lock, flags);
 246
 247        return bm;
 248}
 249
 250bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
 251{
 252        struct comedi_buf_map *bm = s->async->buf_map;
 253
 254        return bm && (kref_read(&bm->refcount) > 1);
 255}
 256
 257int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
 258                     unsigned long new_size)
 259{
 260        struct comedi_async *async = s->async;
 261
 262        lockdep_assert_held(&dev->mutex);
 263
 264        /* Round up new_size to multiple of PAGE_SIZE */
 265        new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
 266
 267        /* if no change is required, do nothing */
 268        if (async->prealloc_buf && async->prealloc_bufsz == new_size)
 269                return 0;
 270
 271        /* deallocate old buffer */
 272        __comedi_buf_free(dev, s);
 273
 274        /* allocate new buffer */
 275        if (new_size) {
 276                unsigned int n_pages = new_size >> PAGE_SHIFT;
 277
 278                __comedi_buf_alloc(dev, s, n_pages);
 279
 280                if (!async->prealloc_buf) {
 281                        /* allocation failed */
 282                        __comedi_buf_free(dev, s);
 283                        return -ENOMEM;
 284                }
 285        }
 286        async->prealloc_bufsz = new_size;
 287
 288        return 0;
 289}
 290
 291void comedi_buf_reset(struct comedi_subdevice *s)
 292{
 293        struct comedi_async *async = s->async;
 294
 295        async->buf_write_alloc_count = 0;
 296        async->buf_write_count = 0;
 297        async->buf_read_alloc_count = 0;
 298        async->buf_read_count = 0;
 299
 300        async->buf_write_ptr = 0;
 301        async->buf_read_ptr = 0;
 302
 303        async->cur_chan = 0;
 304        async->scans_done = 0;
 305        async->scan_progress = 0;
 306        async->munge_chan = 0;
 307        async->munge_count = 0;
 308        async->munge_ptr = 0;
 309
 310        async->events = 0;
 311}
 312
 313static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
 314{
 315        struct comedi_async *async = s->async;
 316        unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
 317
 318        return free_end - async->buf_write_alloc_count;
 319}
 320
 321unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
 322{
 323        struct comedi_async *async = s->async;
 324        unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
 325
 326        return free_end - async->buf_write_count;
 327}
 328
 329/**
 330 * comedi_buf_write_alloc() - Reserve buffer space for writing
 331 * @s: COMEDI subdevice.
 332 * @nbytes: Maximum space to reserve in bytes.
 333 *
 334 * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
 335 * data buffer associated with the subdevice.  The amount reserved is limited
 336 * by the space available.
 337 *
 338 * Return: The amount of space reserved in bytes.
 339 */
 340unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
 341                                    unsigned int nbytes)
 342{
 343        struct comedi_async *async = s->async;
 344        unsigned int unalloc = comedi_buf_write_n_unalloc(s);
 345
 346        if (nbytes > unalloc)
 347                nbytes = unalloc;
 348
 349        async->buf_write_alloc_count += nbytes;
 350
 351        /*
 352         * ensure the async buffer 'counts' are read and updated
 353         * before we write data to the write-alloc'ed buffer space
 354         */
 355        smp_mb();
 356
 357        return nbytes;
 358}
 359EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
 360
 361/*
 362 * munging is applied to data by core as it passes between user
 363 * and kernel space
 364 */
 365static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
 366                                     unsigned int num_bytes)
 367{
 368        struct comedi_async *async = s->async;
 369        unsigned int count = 0;
 370        const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
 371
 372        if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
 373                async->munge_count += num_bytes;
 374                count = num_bytes;
 375        } else {
 376                /* don't munge partial samples */
 377                num_bytes -= num_bytes % num_sample_bytes;
 378                while (count < num_bytes) {
 379                        int block_size = num_bytes - count;
 380                        unsigned int buf_end;
 381
 382                        buf_end = async->prealloc_bufsz - async->munge_ptr;
 383                        if (block_size > buf_end)
 384                                block_size = buf_end;
 385
 386                        s->munge(s->device, s,
 387                                 async->prealloc_buf + async->munge_ptr,
 388                                 block_size, async->munge_chan);
 389
 390                        /*
 391                         * ensure data is munged in buffer before the
 392                         * async buffer munge_count is incremented
 393                         */
 394                        smp_wmb();
 395
 396                        async->munge_chan += block_size / num_sample_bytes;
 397                        async->munge_chan %= async->cmd.chanlist_len;
 398                        async->munge_count += block_size;
 399                        async->munge_ptr += block_size;
 400                        async->munge_ptr %= async->prealloc_bufsz;
 401                        count += block_size;
 402                }
 403        }
 404
 405        return count;
 406}
 407
 408unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
 409{
 410        struct comedi_async *async = s->async;
 411
 412        return async->buf_write_alloc_count - async->buf_write_count;
 413}
 414
 415/**
 416 * comedi_buf_write_free() - Free buffer space after it is written
 417 * @s: COMEDI subdevice.
 418 * @nbytes: Maximum space to free in bytes.
 419 *
 420 * Free up to @nbytes bytes of space previously reserved for writing in the
 421 * COMEDI acquisition data buffer associated with the subdevice.  The amount of
 422 * space freed is limited to the amount that was reserved.  The freed space is
 423 * assumed to have been filled with sample data by the writer.
 424 *
 425 * If the samples in the freed space need to be "munged", do so here.  The
 426 * freed space becomes available for allocation by the reader.
 427 *
 428 * Return: The amount of space freed in bytes.
 429 */
 430unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
 431                                   unsigned int nbytes)
 432{
 433        struct comedi_async *async = s->async;
 434        unsigned int allocated = comedi_buf_write_n_allocated(s);
 435
 436        if (nbytes > allocated)
 437                nbytes = allocated;
 438
 439        async->buf_write_count += nbytes;
 440        async->buf_write_ptr += nbytes;
 441        comedi_buf_munge(s, async->buf_write_count - async->munge_count);
 442        if (async->buf_write_ptr >= async->prealloc_bufsz)
 443                async->buf_write_ptr %= async->prealloc_bufsz;
 444
 445        return nbytes;
 446}
 447EXPORT_SYMBOL_GPL(comedi_buf_write_free);
 448
 449/**
 450 * comedi_buf_read_n_available() - Determine amount of readable buffer space
 451 * @s: COMEDI subdevice.
 452 *
 453 * Determine the amount of readable buffer space in the COMEDI acquisition data
 454 * buffer associated with the subdevice.  The readable buffer space is that
 455 * which has been freed by the writer and "munged" to the sample data format
 456 * expected by COMEDI if necessary.
 457 *
 458 * Return: The amount of readable buffer space.
 459 */
 460unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
 461{
 462        struct comedi_async *async = s->async;
 463        unsigned int num_bytes;
 464
 465        if (!async)
 466                return 0;
 467
 468        num_bytes = async->munge_count - async->buf_read_count;
 469
 470        /*
 471         * ensure the async buffer 'counts' are read before we
 472         * attempt to read data from the buffer
 473         */
 474        smp_rmb();
 475
 476        return num_bytes;
 477}
 478EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
 479
 480/**
 481 * comedi_buf_read_alloc() - Reserve buffer space for reading
 482 * @s: COMEDI subdevice.
 483 * @nbytes: Maximum space to reserve in bytes.
 484 *
 485 * Reserve up to @nbytes bytes of previously written and "munged" buffer space
 486 * for reading in the COMEDI acquisition data buffer associated with the
 487 * subdevice.  The amount reserved is limited to the space available.  The
 488 * reader can read from the reserved space and then free it.  A reader is also
 489 * allowed to read from the space before reserving it as long as it determines
 490 * the amount of readable data available, but the space needs to be marked as
 491 * reserved before it can be freed.
 492 *
 493 * Return: The amount of space reserved in bytes.
 494 */
 495unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
 496                                   unsigned int nbytes)
 497{
 498        struct comedi_async *async = s->async;
 499        unsigned int available;
 500
 501        available = async->munge_count - async->buf_read_alloc_count;
 502        if (nbytes > available)
 503                nbytes = available;
 504
 505        async->buf_read_alloc_count += nbytes;
 506
 507        /*
 508         * ensure the async buffer 'counts' are read before we
 509         * attempt to read data from the read-alloc'ed buffer space
 510         */
 511        smp_rmb();
 512
 513        return nbytes;
 514}
 515EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
 516
 517static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
 518{
 519        return async->buf_read_alloc_count - async->buf_read_count;
 520}
 521
 522/**
 523 * comedi_buf_read_free() - Free buffer space after it has been read
 524 * @s: COMEDI subdevice.
 525 * @nbytes: Maximum space to free in bytes.
 526 *
 527 * Free up to @nbytes bytes of buffer space previously reserved for reading in
 528 * the COMEDI acquisition data buffer associated with the subdevice.  The
 529 * amount of space freed is limited to the amount that was reserved.
 530 *
 531 * The freed space becomes available for allocation by the writer.
 532 *
 533 * Return: The amount of space freed in bytes.
 534 */
 535unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
 536                                  unsigned int nbytes)
 537{
 538        struct comedi_async *async = s->async;
 539        unsigned int allocated;
 540
 541        /*
 542         * ensure data has been read out of buffer before
 543         * the async read count is incremented
 544         */
 545        smp_mb();
 546
 547        allocated = comedi_buf_read_n_allocated(async);
 548        if (nbytes > allocated)
 549                nbytes = allocated;
 550
 551        async->buf_read_count += nbytes;
 552        async->buf_read_ptr += nbytes;
 553        async->buf_read_ptr %= async->prealloc_bufsz;
 554        return nbytes;
 555}
 556EXPORT_SYMBOL_GPL(comedi_buf_read_free);
 557
 558static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
 559                                 const void *data, unsigned int num_bytes)
 560{
 561        struct comedi_async *async = s->async;
 562        unsigned int write_ptr = async->buf_write_ptr;
 563
 564        while (num_bytes) {
 565                unsigned int block_size;
 566
 567                if (write_ptr + num_bytes > async->prealloc_bufsz)
 568                        block_size = async->prealloc_bufsz - write_ptr;
 569                else
 570                        block_size = num_bytes;
 571
 572                memcpy(async->prealloc_buf + write_ptr, data, block_size);
 573
 574                data += block_size;
 575                num_bytes -= block_size;
 576
 577                write_ptr = 0;
 578        }
 579}
 580
 581static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
 582                                   void *dest, unsigned int nbytes)
 583{
 584        void *src;
 585        struct comedi_async *async = s->async;
 586        unsigned int read_ptr = async->buf_read_ptr;
 587
 588        while (nbytes) {
 589                unsigned int block_size;
 590
 591                src = async->prealloc_buf + read_ptr;
 592
 593                if (nbytes >= async->prealloc_bufsz - read_ptr)
 594                        block_size = async->prealloc_bufsz - read_ptr;
 595                else
 596                        block_size = nbytes;
 597
 598                memcpy(dest, src, block_size);
 599                nbytes -= block_size;
 600                dest += block_size;
 601                read_ptr = 0;
 602        }
 603}
 604
 605/**
 606 * comedi_buf_write_samples() - Write sample data to COMEDI buffer
 607 * @s: COMEDI subdevice.
 608 * @data: Pointer to source samples.
 609 * @nsamples: Number of samples to write.
 610 *
 611 * Write up to @nsamples samples to the COMEDI acquisition data buffer
 612 * associated with the subdevice, mark it as written and update the
 613 * acquisition scan progress.  If there is not enough room for the specified
 614 * number of samples, the number of samples written is limited to the number
 615 * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
 616 * acquisition to terminate with an overrun error.  Set the %COMEDI_CB_BLOCK
 617 * event flag if any samples are written to cause waiting tasks to be woken
 618 * when the event flags are processed.
 619 *
 620 * Return: The amount of data written in bytes.
 621 */
 622unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
 623                                      const void *data, unsigned int nsamples)
 624{
 625        unsigned int max_samples;
 626        unsigned int nbytes;
 627
 628        /*
 629         * Make sure there is enough room in the buffer for all the samples.
 630         * If not, clamp the nsamples to the number that will fit, flag the
 631         * buffer overrun and add the samples that fit.
 632         */
 633        max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
 634        if (nsamples > max_samples) {
 635                dev_warn(s->device->class_dev, "buffer overrun\n");
 636                s->async->events |= COMEDI_CB_OVERFLOW;
 637                nsamples = max_samples;
 638        }
 639
 640        if (nsamples == 0)
 641                return 0;
 642
 643        nbytes = comedi_buf_write_alloc(s,
 644                                        comedi_samples_to_bytes(s, nsamples));
 645        comedi_buf_memcpy_to(s, data, nbytes);
 646        comedi_buf_write_free(s, nbytes);
 647        comedi_inc_scan_progress(s, nbytes);
 648        s->async->events |= COMEDI_CB_BLOCK;
 649
 650        return nbytes;
 651}
 652EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
 653
 654/**
 655 * comedi_buf_read_samples() - Read sample data from COMEDI buffer
 656 * @s: COMEDI subdevice.
 657 * @data: Pointer to destination.
 658 * @nsamples: Maximum number of samples to read.
 659 *
 660 * Read up to @nsamples samples from the COMEDI acquisition data buffer
 661 * associated with the subdevice, mark it as read and update the acquisition
 662 * scan progress.  Limit the number of samples read to the number available.
 663 * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
 664 * tasks to be woken when the event flags are processed.
 665 *
 666 * Return: The amount of data read in bytes.
 667 */
 668unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
 669                                     void *data, unsigned int nsamples)
 670{
 671        unsigned int max_samples;
 672        unsigned int nbytes;
 673
 674        /* clamp nsamples to the number of full samples available */
 675        max_samples = comedi_bytes_to_samples(s,
 676                                              comedi_buf_read_n_available(s));
 677        if (nsamples > max_samples)
 678                nsamples = max_samples;
 679
 680        if (nsamples == 0)
 681                return 0;
 682
 683        nbytes = comedi_buf_read_alloc(s,
 684                                       comedi_samples_to_bytes(s, nsamples));
 685        comedi_buf_memcpy_from(s, data, nbytes);
 686        comedi_buf_read_free(s, nbytes);
 687        comedi_inc_scan_progress(s, nbytes);
 688        s->async->events |= COMEDI_CB_BLOCK;
 689
 690        return nbytes;
 691}
 692EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
 693