linux/drivers/staging/comedi/comedi_buf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * comedi_buf.c
   4 *
   5 * COMEDI - Linux Control and Measurement Device Interface
   6 * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
   7 * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
   8 */
   9
  10#include <linux/vmalloc.h>
  11#include <linux/slab.h>
  12
  13#include "comedidev.h"
  14#include "comedi_internal.h"
  15
  16#ifdef PAGE_KERNEL_NOCACHE
  17#define COMEDI_PAGE_PROTECTION          PAGE_KERNEL_NOCACHE
  18#else
  19#define COMEDI_PAGE_PROTECTION          PAGE_KERNEL
  20#endif
  21
  22static void comedi_buf_map_kref_release(struct kref *kref)
  23{
  24        struct comedi_buf_map *bm =
  25                container_of(kref, struct comedi_buf_map, refcount);
  26        struct comedi_buf_page *buf;
  27        unsigned int i;
  28
  29        if (bm->page_list) {
  30                for (i = 0; i < bm->n_pages; i++) {
  31                        buf = &bm->page_list[i];
  32                        clear_bit(PG_reserved,
  33                                  &(virt_to_page(buf->virt_addr)->flags));
  34                        if (bm->dma_dir != DMA_NONE) {
  35#ifdef CONFIG_HAS_DMA
  36                                dma_free_coherent(bm->dma_hw_dev,
  37                                                  PAGE_SIZE,
  38                                                  buf->virt_addr,
  39                                                  buf->dma_addr);
  40#endif
  41                        } else {
  42                                free_page((unsigned long)buf->virt_addr);
  43                        }
  44                }
  45                vfree(bm->page_list);
  46        }
  47        if (bm->dma_dir != DMA_NONE)
  48                put_device(bm->dma_hw_dev);
  49        kfree(bm);
  50}
  51
  52static void __comedi_buf_free(struct comedi_device *dev,
  53                              struct comedi_subdevice *s)
  54{
  55        struct comedi_async *async = s->async;
  56        struct comedi_buf_map *bm;
  57        unsigned long flags;
  58
  59        if (async->prealloc_buf) {
  60                vunmap(async->prealloc_buf);
  61                async->prealloc_buf = NULL;
  62                async->prealloc_bufsz = 0;
  63        }
  64
  65        spin_lock_irqsave(&s->spin_lock, flags);
  66        bm = async->buf_map;
  67        async->buf_map = NULL;
  68        spin_unlock_irqrestore(&s->spin_lock, flags);
  69        comedi_buf_map_put(bm);
  70}
  71
  72static void __comedi_buf_alloc(struct comedi_device *dev,
  73                               struct comedi_subdevice *s,
  74                               unsigned int n_pages)
  75{
  76        struct comedi_async *async = s->async;
  77        struct page **pages = NULL;
  78        struct comedi_buf_map *bm;
  79        struct comedi_buf_page *buf;
  80        unsigned long flags;
  81        unsigned int i;
  82
  83        if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
  84                dev_err(dev->class_dev,
  85                        "dma buffer allocation not supported\n");
  86                return;
  87        }
  88
  89        bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
  90        if (!bm)
  91                return;
  92
  93        kref_init(&bm->refcount);
  94        spin_lock_irqsave(&s->spin_lock, flags);
  95        async->buf_map = bm;
  96        spin_unlock_irqrestore(&s->spin_lock, flags);
  97        bm->dma_dir = s->async_dma_dir;
  98        if (bm->dma_dir != DMA_NONE)
  99                /* Need ref to hardware device to free buffer later. */
 100                bm->dma_hw_dev = get_device(dev->hw_dev);
 101
 102        bm->page_list = vzalloc(sizeof(*buf) * n_pages);
 103        if (bm->page_list)
 104                pages = vmalloc(sizeof(struct page *) * n_pages);
 105
 106        if (!pages)
 107                return;
 108
 109        for (i = 0; i < n_pages; i++) {
 110                buf = &bm->page_list[i];
 111                if (bm->dma_dir != DMA_NONE)
 112#ifdef CONFIG_HAS_DMA
 113                        buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
 114                                                            PAGE_SIZE,
 115                                                            &buf->dma_addr,
 116                                                            GFP_KERNEL |
 117                                                            __GFP_COMP);
 118#else
 119                        break;
 120#endif
 121                else
 122                        buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
 123                if (!buf->virt_addr)
 124                        break;
 125
 126                set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
 127
 128                pages[i] = virt_to_page(buf->virt_addr);
 129        }
 130        spin_lock_irqsave(&s->spin_lock, flags);
 131        bm->n_pages = i;
 132        spin_unlock_irqrestore(&s->spin_lock, flags);
 133
 134        /* vmap the prealloc_buf if all the pages were allocated */
 135        if (i == n_pages)
 136                async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
 137                                           COMEDI_PAGE_PROTECTION);
 138
 139        vfree(pages);
 140}
 141
 142void comedi_buf_map_get(struct comedi_buf_map *bm)
 143{
 144        if (bm)
 145                kref_get(&bm->refcount);
 146}
 147
 148int comedi_buf_map_put(struct comedi_buf_map *bm)
 149{
 150        if (bm)
 151                return kref_put(&bm->refcount, comedi_buf_map_kref_release);
 152        return 1;
 153}
 154
 155/* helper for "access" vm operation */
 156int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
 157                          void *buf, int len, int write)
 158{
 159        unsigned int pgoff = offset_in_page(offset);
 160        unsigned long pg = offset >> PAGE_SHIFT;
 161        int done = 0;
 162
 163        while (done < len && pg < bm->n_pages) {
 164                int l = min_t(int, len - done, PAGE_SIZE - pgoff);
 165                void *b = bm->page_list[pg].virt_addr + pgoff;
 166
 167                if (write)
 168                        memcpy(b, buf, l);
 169                else
 170                        memcpy(buf, b, l);
 171                buf += l;
 172                done += l;
 173                pg++;
 174                pgoff = 0;
 175        }
 176        return done;
 177}
 178
 179/* returns s->async->buf_map and increments its kref refcount */
 180struct comedi_buf_map *
 181comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
 182{
 183        struct comedi_async *async = s->async;
 184        struct comedi_buf_map *bm = NULL;
 185        unsigned long flags;
 186
 187        if (!async)
 188                return NULL;
 189
 190        spin_lock_irqsave(&s->spin_lock, flags);
 191        bm = async->buf_map;
 192        /* only want it if buffer pages allocated */
 193        if (bm && bm->n_pages)
 194                comedi_buf_map_get(bm);
 195        else
 196                bm = NULL;
 197        spin_unlock_irqrestore(&s->spin_lock, flags);
 198
 199        return bm;
 200}
 201
 202bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
 203{
 204        struct comedi_buf_map *bm = s->async->buf_map;
 205
 206        return bm && (kref_read(&bm->refcount) > 1);
 207}
 208
 209int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
 210                     unsigned long new_size)
 211{
 212        struct comedi_async *async = s->async;
 213
 214        lockdep_assert_held(&dev->mutex);
 215
 216        /* Round up new_size to multiple of PAGE_SIZE */
 217        new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
 218
 219        /* if no change is required, do nothing */
 220        if (async->prealloc_buf && async->prealloc_bufsz == new_size)
 221                return 0;
 222
 223        /* deallocate old buffer */
 224        __comedi_buf_free(dev, s);
 225
 226        /* allocate new buffer */
 227        if (new_size) {
 228                unsigned int n_pages = new_size >> PAGE_SHIFT;
 229
 230                __comedi_buf_alloc(dev, s, n_pages);
 231
 232                if (!async->prealloc_buf) {
 233                        /* allocation failed */
 234                        __comedi_buf_free(dev, s);
 235                        return -ENOMEM;
 236                }
 237        }
 238        async->prealloc_bufsz = new_size;
 239
 240        return 0;
 241}
 242
 243void comedi_buf_reset(struct comedi_subdevice *s)
 244{
 245        struct comedi_async *async = s->async;
 246
 247        async->buf_write_alloc_count = 0;
 248        async->buf_write_count = 0;
 249        async->buf_read_alloc_count = 0;
 250        async->buf_read_count = 0;
 251
 252        async->buf_write_ptr = 0;
 253        async->buf_read_ptr = 0;
 254
 255        async->cur_chan = 0;
 256        async->scans_done = 0;
 257        async->scan_progress = 0;
 258        async->munge_chan = 0;
 259        async->munge_count = 0;
 260        async->munge_ptr = 0;
 261
 262        async->events = 0;
 263}
 264
 265static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
 266{
 267        struct comedi_async *async = s->async;
 268        unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
 269
 270        return free_end - async->buf_write_alloc_count;
 271}
 272
 273unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
 274{
 275        struct comedi_async *async = s->async;
 276        unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
 277
 278        return free_end - async->buf_write_count;
 279}
 280
 281/**
 282 * comedi_buf_write_alloc() - Reserve buffer space for writing
 283 * @s: COMEDI subdevice.
 284 * @nbytes: Maximum space to reserve in bytes.
 285 *
 286 * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
 287 * data buffer associated with the subdevice.  The amount reserved is limited
 288 * by the space available.
 289 *
 290 * Return: The amount of space reserved in bytes.
 291 */
 292unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
 293                                    unsigned int nbytes)
 294{
 295        struct comedi_async *async = s->async;
 296        unsigned int unalloc = comedi_buf_write_n_unalloc(s);
 297
 298        if (nbytes > unalloc)
 299                nbytes = unalloc;
 300
 301        async->buf_write_alloc_count += nbytes;
 302
 303        /*
 304         * ensure the async buffer 'counts' are read and updated
 305         * before we write data to the write-alloc'ed buffer space
 306         */
 307        smp_mb();
 308
 309        return nbytes;
 310}
 311EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
 312
 313/*
 314 * munging is applied to data by core as it passes between user
 315 * and kernel space
 316 */
 317static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
 318                                     unsigned int num_bytes)
 319{
 320        struct comedi_async *async = s->async;
 321        unsigned int count = 0;
 322        const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
 323
 324        if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
 325                async->munge_count += num_bytes;
 326                count = num_bytes;
 327        } else {
 328                /* don't munge partial samples */
 329                num_bytes -= num_bytes % num_sample_bytes;
 330                while (count < num_bytes) {
 331                        int block_size = num_bytes - count;
 332                        unsigned int buf_end;
 333
 334                        buf_end = async->prealloc_bufsz - async->munge_ptr;
 335                        if (block_size > buf_end)
 336                                block_size = buf_end;
 337
 338                        s->munge(s->device, s,
 339                                 async->prealloc_buf + async->munge_ptr,
 340                                 block_size, async->munge_chan);
 341
 342                        /*
 343                         * ensure data is munged in buffer before the
 344                         * async buffer munge_count is incremented
 345                         */
 346                        smp_wmb();
 347
 348                        async->munge_chan += block_size / num_sample_bytes;
 349                        async->munge_chan %= async->cmd.chanlist_len;
 350                        async->munge_count += block_size;
 351                        async->munge_ptr += block_size;
 352                        async->munge_ptr %= async->prealloc_bufsz;
 353                        count += block_size;
 354                }
 355        }
 356
 357        return count;
 358}
 359
 360unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
 361{
 362        struct comedi_async *async = s->async;
 363
 364        return async->buf_write_alloc_count - async->buf_write_count;
 365}
 366
 367/**
 368 * comedi_buf_write_free() - Free buffer space after it is written
 369 * @s: COMEDI subdevice.
 370 * @nbytes: Maximum space to free in bytes.
 371 *
 372 * Free up to @nbytes bytes of space previously reserved for writing in the
 373 * COMEDI acquisition data buffer associated with the subdevice.  The amount of
 374 * space freed is limited to the amount that was reserved.  The freed space is
 375 * assumed to have been filled with sample data by the writer.
 376 *
 377 * If the samples in the freed space need to be "munged", do so here.  The
 378 * freed space becomes available for allocation by the reader.
 379 *
 380 * Return: The amount of space freed in bytes.
 381 */
 382unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
 383                                   unsigned int nbytes)
 384{
 385        struct comedi_async *async = s->async;
 386        unsigned int allocated = comedi_buf_write_n_allocated(s);
 387
 388        if (nbytes > allocated)
 389                nbytes = allocated;
 390
 391        async->buf_write_count += nbytes;
 392        async->buf_write_ptr += nbytes;
 393        comedi_buf_munge(s, async->buf_write_count - async->munge_count);
 394        if (async->buf_write_ptr >= async->prealloc_bufsz)
 395                async->buf_write_ptr %= async->prealloc_bufsz;
 396
 397        return nbytes;
 398}
 399EXPORT_SYMBOL_GPL(comedi_buf_write_free);
 400
 401/**
 402 * comedi_buf_read_n_available() - Determine amount of readable buffer space
 403 * @s: COMEDI subdevice.
 404 *
 405 * Determine the amount of readable buffer space in the COMEDI acquisition data
 406 * buffer associated with the subdevice.  The readable buffer space is that
 407 * which has been freed by the writer and "munged" to the sample data format
 408 * expected by COMEDI if necessary.
 409 *
 410 * Return: The amount of readable buffer space.
 411 */
 412unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
 413{
 414        struct comedi_async *async = s->async;
 415        unsigned int num_bytes;
 416
 417        if (!async)
 418                return 0;
 419
 420        num_bytes = async->munge_count - async->buf_read_count;
 421
 422        /*
 423         * ensure the async buffer 'counts' are read before we
 424         * attempt to read data from the buffer
 425         */
 426        smp_rmb();
 427
 428        return num_bytes;
 429}
 430EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
 431
 432/**
 433 * comedi_buf_read_alloc() - Reserve buffer space for reading
 434 * @s: COMEDI subdevice.
 435 * @nbytes: Maximum space to reserve in bytes.
 436 *
 437 * Reserve up to @nbytes bytes of previously written and "munged" buffer space
 438 * for reading in the COMEDI acquisition data buffer associated with the
 439 * subdevice.  The amount reserved is limited to the space available.  The
 440 * reader can read from the reserved space and then free it.  A reader is also
 441 * allowed to read from the space before reserving it as long as it determines
 442 * the amount of readable data available, but the space needs to be marked as
 443 * reserved before it can be freed.
 444 *
 445 * Return: The amount of space reserved in bytes.
 446 */
 447unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
 448                                   unsigned int nbytes)
 449{
 450        struct comedi_async *async = s->async;
 451        unsigned int available;
 452
 453        available = async->munge_count - async->buf_read_alloc_count;
 454        if (nbytes > available)
 455                nbytes = available;
 456
 457        async->buf_read_alloc_count += nbytes;
 458
 459        /*
 460         * ensure the async buffer 'counts' are read before we
 461         * attempt to read data from the read-alloc'ed buffer space
 462         */
 463        smp_rmb();
 464
 465        return nbytes;
 466}
 467EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
 468
 469static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
 470{
 471        return async->buf_read_alloc_count - async->buf_read_count;
 472}
 473
 474/**
 475 * comedi_buf_read_free() - Free buffer space after it has been read
 476 * @s: COMEDI subdevice.
 477 * @nbytes: Maximum space to free in bytes.
 478 *
 479 * Free up to @nbytes bytes of buffer space previously reserved for reading in
 480 * the COMEDI acquisition data buffer associated with the subdevice.  The
 481 * amount of space freed is limited to the amount that was reserved.
 482 *
 483 * The freed space becomes available for allocation by the writer.
 484 *
 485 * Return: The amount of space freed in bytes.
 486 */
 487unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
 488                                  unsigned int nbytes)
 489{
 490        struct comedi_async *async = s->async;
 491        unsigned int allocated;
 492
 493        /*
 494         * ensure data has been read out of buffer before
 495         * the async read count is incremented
 496         */
 497        smp_mb();
 498
 499        allocated = comedi_buf_read_n_allocated(async);
 500        if (nbytes > allocated)
 501                nbytes = allocated;
 502
 503        async->buf_read_count += nbytes;
 504        async->buf_read_ptr += nbytes;
 505        async->buf_read_ptr %= async->prealloc_bufsz;
 506        return nbytes;
 507}
 508EXPORT_SYMBOL_GPL(comedi_buf_read_free);
 509
 510static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
 511                                 const void *data, unsigned int num_bytes)
 512{
 513        struct comedi_async *async = s->async;
 514        unsigned int write_ptr = async->buf_write_ptr;
 515
 516        while (num_bytes) {
 517                unsigned int block_size;
 518
 519                if (write_ptr + num_bytes > async->prealloc_bufsz)
 520                        block_size = async->prealloc_bufsz - write_ptr;
 521                else
 522                        block_size = num_bytes;
 523
 524                memcpy(async->prealloc_buf + write_ptr, data, block_size);
 525
 526                data += block_size;
 527                num_bytes -= block_size;
 528
 529                write_ptr = 0;
 530        }
 531}
 532
 533static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
 534                                   void *dest, unsigned int nbytes)
 535{
 536        void *src;
 537        struct comedi_async *async = s->async;
 538        unsigned int read_ptr = async->buf_read_ptr;
 539
 540        while (nbytes) {
 541                unsigned int block_size;
 542
 543                src = async->prealloc_buf + read_ptr;
 544
 545                if (nbytes >= async->prealloc_bufsz - read_ptr)
 546                        block_size = async->prealloc_bufsz - read_ptr;
 547                else
 548                        block_size = nbytes;
 549
 550                memcpy(dest, src, block_size);
 551                nbytes -= block_size;
 552                dest += block_size;
 553                read_ptr = 0;
 554        }
 555}
 556
 557/**
 558 * comedi_buf_write_samples() - Write sample data to COMEDI buffer
 559 * @s: COMEDI subdevice.
 560 * @data: Pointer to source samples.
 561 * @nsamples: Number of samples to write.
 562 *
 563 * Write up to @nsamples samples to the COMEDI acquisition data buffer
 564 * associated with the subdevice, mark it as written and update the
 565 * acquisition scan progress.  If there is not enough room for the specified
 566 * number of samples, the number of samples written is limited to the number
 567 * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
 568 * acquisition to terminate with an overrun error.  Set the %COMEDI_CB_BLOCK
 569 * event flag if any samples are written to cause waiting tasks to be woken
 570 * when the event flags are processed.
 571 *
 572 * Return: The amount of data written in bytes.
 573 */
 574unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
 575                                      const void *data, unsigned int nsamples)
 576{
 577        unsigned int max_samples;
 578        unsigned int nbytes;
 579
 580        /*
 581         * Make sure there is enough room in the buffer for all the samples.
 582         * If not, clamp the nsamples to the number that will fit, flag the
 583         * buffer overrun and add the samples that fit.
 584         */
 585        max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
 586        if (nsamples > max_samples) {
 587                dev_warn(s->device->class_dev, "buffer overrun\n");
 588                s->async->events |= COMEDI_CB_OVERFLOW;
 589                nsamples = max_samples;
 590        }
 591
 592        if (nsamples == 0)
 593                return 0;
 594
 595        nbytes = comedi_buf_write_alloc(s,
 596                                        comedi_samples_to_bytes(s, nsamples));
 597        comedi_buf_memcpy_to(s, data, nbytes);
 598        comedi_buf_write_free(s, nbytes);
 599        comedi_inc_scan_progress(s, nbytes);
 600        s->async->events |= COMEDI_CB_BLOCK;
 601
 602        return nbytes;
 603}
 604EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
 605
 606/**
 607 * comedi_buf_read_samples() - Read sample data from COMEDI buffer
 608 * @s: COMEDI subdevice.
 609 * @data: Pointer to destination.
 610 * @nsamples: Maximum number of samples to read.
 611 *
 612 * Read up to @nsamples samples from the COMEDI acquisition data buffer
 613 * associated with the subdevice, mark it as read and update the acquisition
 614 * scan progress.  Limit the number of samples read to the number available.
 615 * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
 616 * tasks to be woken when the event flags are processed.
 617 *
 618 * Return: The amount of data read in bytes.
 619 */
 620unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
 621                                     void *data, unsigned int nsamples)
 622{
 623        unsigned int max_samples;
 624        unsigned int nbytes;
 625
 626        /* clamp nsamples to the number of full samples available */
 627        max_samples = comedi_bytes_to_samples(s,
 628                                              comedi_buf_read_n_available(s));
 629        if (nsamples > max_samples)
 630                nsamples = max_samples;
 631
 632        if (nsamples == 0)
 633                return 0;
 634
 635        nbytes = comedi_buf_read_alloc(s,
 636                                       comedi_samples_to_bytes(s, nsamples));
 637        comedi_buf_memcpy_from(s, data, nbytes);
 638        comedi_buf_read_free(s, nbytes);
 639        comedi_inc_scan_progress(s, nbytes);
 640        s->async->events |= COMEDI_CB_BLOCK;
 641
 642        return nbytes;
 643}
 644EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
 645