linux/fs/pipe.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/pipe.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
   5 */
   6
   7#include <linux/mm.h>
   8#include <linux/file.h>
   9#include <linux/poll.h>
  10#include <linux/slab.h>
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/fs.h>
  14#include <linux/log2.h>
  15#include <linux/mount.h>
  16#include <linux/magic.h>
  17#include <linux/pipe_fs_i.h>
  18#include <linux/uio.h>
  19#include <linux/highmem.h>
  20#include <linux/pagemap.h>
  21#include <linux/audit.h>
  22#include <linux/syscalls.h>
  23#include <linux/fcntl.h>
  24#include <linux/aio.h>
  25
  26#include <asm/uaccess.h>
  27#include <asm/ioctls.h>
  28
  29#include "internal.h"
  30
  31/*
  32 * The max size that a non-root user is allowed to grow the pipe. Can
  33 * be set by root in /proc/sys/fs/pipe-max-size
  34 */
  35unsigned int pipe_max_size = 1048576;
  36
  37/*
  38 * Minimum pipe size, as required by POSIX
  39 */
  40unsigned int pipe_min_size = PAGE_SIZE;
  41
  42/* Maximum allocatable pages per user. Hard limit is unset by default, soft
  43 * matches default values.
  44 */
  45unsigned long pipe_user_pages_hard;
  46unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
  47
  48/*
  49 * We use a start+len construction, which provides full use of the 
  50 * allocated memory.
  51 * -- Florian Coosmann (FGC)
  52 * 
  53 * Reads with count = 0 should always return 0.
  54 * -- Julian Bradfield 1999-06-07.
  55 *
  56 * FIFOs and Pipes now generate SIGIO for both readers and writers.
  57 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
  58 *
  59 * pipe_read & write cleanup
  60 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
  61 */
  62
  63static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
  64{
  65        if (pipe->files)
  66                mutex_lock_nested(&pipe->mutex, subclass);
  67}
  68
  69void pipe_lock(struct pipe_inode_info *pipe)
  70{
  71        /*
  72         * pipe_lock() nests non-pipe inode locks (for writing to a file)
  73         */
  74        pipe_lock_nested(pipe, I_MUTEX_PARENT);
  75}
  76EXPORT_SYMBOL(pipe_lock);
  77
  78void pipe_unlock(struct pipe_inode_info *pipe)
  79{
  80        if (pipe->files)
  81                mutex_unlock(&pipe->mutex);
  82}
  83EXPORT_SYMBOL(pipe_unlock);
  84
  85static inline void __pipe_lock(struct pipe_inode_info *pipe)
  86{
  87        mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
  88}
  89
  90static inline void __pipe_unlock(struct pipe_inode_info *pipe)
  91{
  92        mutex_unlock(&pipe->mutex);
  93}
  94
  95void pipe_double_lock(struct pipe_inode_info *pipe1,
  96                      struct pipe_inode_info *pipe2)
  97{
  98        BUG_ON(pipe1 == pipe2);
  99
 100        if (pipe1 < pipe2) {
 101                pipe_lock_nested(pipe1, I_MUTEX_PARENT);
 102                pipe_lock_nested(pipe2, I_MUTEX_CHILD);
 103        } else {
 104                pipe_lock_nested(pipe2, I_MUTEX_PARENT);
 105                pipe_lock_nested(pipe1, I_MUTEX_CHILD);
 106        }
 107}
 108
 109/* Drop the inode semaphore and wait for a pipe event, atomically */
 110void pipe_wait(struct pipe_inode_info *pipe)
 111{
 112        DEFINE_WAIT(wait);
 113
 114        /*
 115         * Pipes are system-local resources, so sleeping on them
 116         * is considered a noninteractive wait:
 117         */
 118        prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
 119        pipe_unlock(pipe);
 120        schedule();
 121        finish_wait(&pipe->wait, &wait);
 122        pipe_lock(pipe);
 123}
 124
 125static int
 126pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
 127                        size_t *remaining, int atomic)
 128{
 129        unsigned long copy;
 130
 131        while (*remaining > 0) {
 132                while (!iov->iov_len)
 133                        iov++;
 134                copy = min_t(unsigned long, *remaining, iov->iov_len);
 135
 136                if (atomic) {
 137                        if (__copy_from_user_inatomic(addr + *offset,
 138                                                      iov->iov_base, copy))
 139                                return -EFAULT;
 140                } else {
 141                        if (copy_from_user(addr + *offset,
 142                                           iov->iov_base, copy))
 143                                return -EFAULT;
 144                }
 145                *offset += copy;
 146                *remaining -= copy;
 147                iov->iov_base += copy;
 148                iov->iov_len -= copy;
 149        }
 150        return 0;
 151}
 152
 153static int
 154pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
 155                      size_t *remaining, int atomic)
 156{
 157        unsigned long copy;
 158
 159        while (*remaining > 0) {
 160                while (!iov->iov_len)
 161                        iov++;
 162                copy = min_t(unsigned long, *remaining, iov->iov_len);
 163
 164                if (atomic) {
 165                        if (__copy_to_user_inatomic(iov->iov_base,
 166                                                    addr + *offset, copy))
 167                                return -EFAULT;
 168                } else {
 169                        if (copy_to_user(iov->iov_base,
 170                                         addr + *offset, copy))
 171                                return -EFAULT;
 172                }
 173                *offset += copy;
 174                *remaining -= copy;
 175                iov->iov_base += copy;
 176                iov->iov_len -= copy;
 177        }
 178        return 0;
 179}
 180
 181/*
 182 * Attempt to pre-fault in the user memory, so we can use atomic copies.
 183 * Returns the number of bytes not faulted in.
 184 */
 185static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
 186{
 187        while (!iov->iov_len)
 188                iov++;
 189
 190        while (len > 0) {
 191                unsigned long this_len;
 192
 193                this_len = min_t(unsigned long, len, iov->iov_len);
 194                if (fault_in_pages_writeable(iov->iov_base, this_len))
 195                        break;
 196
 197                len -= this_len;
 198                iov++;
 199        }
 200
 201        return len;
 202}
 203
 204/*
 205 * Pre-fault in the user memory, so we can use atomic copies.
 206 */
 207static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
 208{
 209        while (!iov->iov_len)
 210                iov++;
 211
 212        while (len > 0) {
 213                unsigned long this_len;
 214
 215                this_len = min_t(unsigned long, len, iov->iov_len);
 216                fault_in_pages_readable(iov->iov_base, this_len);
 217                len -= this_len;
 218                iov++;
 219        }
 220}
 221
 222static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
 223                                  struct pipe_buffer *buf)
 224{
 225        struct page *page = buf->page;
 226
 227        /*
 228         * If nobody else uses this page, and we don't already have a
 229         * temporary page, let's keep track of it as a one-deep
 230         * allocation cache. (Otherwise just release our reference to it)
 231         */
 232        if (page_count(page) == 1 && !pipe->tmp_page)
 233                pipe->tmp_page = page;
 234        else
 235                page_cache_release(page);
 236}
 237
 238/**
 239 * generic_pipe_buf_map - virtually map a pipe buffer
 240 * @pipe:       the pipe that the buffer belongs to
 241 * @buf:        the buffer that should be mapped
 242 * @atomic:     whether to use an atomic map
 243 *
 244 * Description:
 245 *      This function returns a kernel virtual address mapping for the
 246 *      pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
 247 *      and the caller has to be careful not to fault before calling
 248 *      the unmap function.
 249 *
 250 *      Note that this function calls kmap_atomic() if @atomic != 0.
 251 */
 252void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
 253                           struct pipe_buffer *buf, int atomic)
 254{
 255        if (atomic) {
 256                buf->flags |= PIPE_BUF_FLAG_ATOMIC;
 257                return kmap_atomic(buf->page);
 258        }
 259
 260        return kmap(buf->page);
 261}
 262EXPORT_SYMBOL(generic_pipe_buf_map);
 263
 264/**
 265 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
 266 * @pipe:       the pipe that the buffer belongs to
 267 * @buf:        the buffer that should be unmapped
 268 * @map_data:   the data that the mapping function returned
 269 *
 270 * Description:
 271 *      This function undoes the mapping that ->map() provided.
 272 */
 273void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
 274                            struct pipe_buffer *buf, void *map_data)
 275{
 276        if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
 277                buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
 278                kunmap_atomic(map_data);
 279        } else
 280                kunmap(buf->page);
 281}
 282EXPORT_SYMBOL(generic_pipe_buf_unmap);
 283
 284/**
 285 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
 286 * @pipe:       the pipe that the buffer belongs to
 287 * @buf:        the buffer to attempt to steal
 288 *
 289 * Description:
 290 *      This function attempts to steal the &struct page attached to
 291 *      @buf. If successful, this function returns 0 and returns with
 292 *      the page locked. The caller may then reuse the page for whatever
 293 *      he wishes; the typical use is insertion into a different file
 294 *      page cache.
 295 */
 296int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
 297                           struct pipe_buffer *buf)
 298{
 299        struct page *page = buf->page;
 300
 301        /*
 302         * A reference of one is golden, that means that the owner of this
 303         * page is the only one holding a reference to it. lock the page
 304         * and return OK.
 305         */
 306        if (page_count(page) == 1) {
 307                lock_page(page);
 308                return 0;
 309        }
 310
 311        return 1;
 312}
 313EXPORT_SYMBOL(generic_pipe_buf_steal);
 314
 315/**
 316 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
 317 * @pipe:       the pipe that the buffer belongs to
 318 * @buf:        the buffer to get a reference to
 319 *
 320 * Description:
 321 *      This function grabs an extra reference to @buf. It's used in
 322 *      in the tee() system call, when we duplicate the buffers in one
 323 *      pipe into another.
 324 */
 325void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
 326{
 327        page_cache_get(buf->page);
 328}
 329EXPORT_SYMBOL(generic_pipe_buf_get);
 330
 331/**
 332 * generic_pipe_buf_confirm - verify contents of the pipe buffer
 333 * @info:       the pipe that the buffer belongs to
 334 * @buf:        the buffer to confirm
 335 *
 336 * Description:
 337 *      This function does nothing, because the generic pipe code uses
 338 *      pages that are always good when inserted into the pipe.
 339 */
 340int generic_pipe_buf_confirm(struct pipe_inode_info *info,
 341                             struct pipe_buffer *buf)
 342{
 343        return 0;
 344}
 345EXPORT_SYMBOL(generic_pipe_buf_confirm);
 346
 347/**
 348 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
 349 * @pipe:       the pipe that the buffer belongs to
 350 * @buf:        the buffer to put a reference to
 351 *
 352 * Description:
 353 *      This function releases a reference to @buf.
 354 */
 355void generic_pipe_buf_release(struct pipe_inode_info *pipe,
 356                              struct pipe_buffer *buf)
 357{
 358        page_cache_release(buf->page);
 359}
 360EXPORT_SYMBOL(generic_pipe_buf_release);
 361
 362static const struct pipe_buf_operations anon_pipe_buf_ops = {
 363        .can_merge = 1,
 364        .map = generic_pipe_buf_map,
 365        .unmap = generic_pipe_buf_unmap,
 366        .confirm = generic_pipe_buf_confirm,
 367        .release = anon_pipe_buf_release,
 368        .steal = generic_pipe_buf_steal,
 369        .get = generic_pipe_buf_get,
 370};
 371
 372static const struct pipe_buf_operations packet_pipe_buf_ops = {
 373        .can_merge = 0,
 374        .map = generic_pipe_buf_map,
 375        .unmap = generic_pipe_buf_unmap,
 376        .confirm = generic_pipe_buf_confirm,
 377        .release = anon_pipe_buf_release,
 378        .steal = generic_pipe_buf_steal,
 379        .get = generic_pipe_buf_get,
 380};
 381
 382static ssize_t
 383pipe_read(struct kiocb *iocb, const struct iovec *_iov,
 384           unsigned long nr_segs, loff_t pos)
 385{
 386        struct file *filp = iocb->ki_filp;
 387        struct pipe_inode_info *pipe = filp->private_data;
 388        int do_wakeup;
 389        ssize_t ret;
 390        struct iovec *iov = (struct iovec *)_iov;
 391        size_t total_len;
 392
 393        total_len = iov_length(iov, nr_segs);
 394        /* Null read succeeds. */
 395        if (unlikely(total_len == 0))
 396                return 0;
 397
 398        do_wakeup = 0;
 399        ret = 0;
 400        __pipe_lock(pipe);
 401        for (;;) {
 402                int bufs = pipe->nrbufs;
 403                if (bufs) {
 404                        int curbuf = pipe->curbuf;
 405                        struct pipe_buffer *buf = pipe->bufs + curbuf;
 406                        const struct pipe_buf_operations *ops = buf->ops;
 407                        void *addr;
 408                        size_t chars = buf->len, remaining;
 409                        int error, atomic, offset;
 410
 411                        if (chars > total_len)
 412                                chars = total_len;
 413
 414                        error = ops->confirm(pipe, buf);
 415                        if (error) {
 416                                if (!ret)
 417                                        ret = error;
 418                                break;
 419                        }
 420
 421                        atomic = IS_ENABLED(CONFIG_HIGHMEM) && !iov_fault_in_pages_write(iov, chars);
 422                        remaining = chars;
 423                        offset = buf->offset;
 424redo:
 425                        addr = ops->map(pipe, buf, atomic);
 426                        error = pipe_iov_copy_to_user(iov, addr, &offset,
 427                                                      &remaining, atomic);
 428                        ops->unmap(pipe, buf, addr);
 429                        if (unlikely(error)) {
 430                                /*
 431                                 * Just retry with the slow path if we failed.
 432                                 */
 433                                if (atomic) {
 434                                        atomic = 0;
 435                                        goto redo;
 436                                }
 437                                if (!ret)
 438                                        ret = error;
 439                                break;
 440                        }
 441                        ret += chars;
 442                        buf->offset = offset;
 443                        buf->len -= chars;
 444
 445                        /* Was it a packet buffer? Clean up and exit */
 446                        if (buf->flags & PIPE_BUF_FLAG_PACKET) {
 447                                total_len = chars;
 448                                buf->len = 0;
 449                        }
 450
 451                        if (!buf->len) {
 452                                buf->ops = NULL;
 453                                ops->release(pipe, buf);
 454                                curbuf = (curbuf + 1) & (pipe->buffers - 1);
 455                                pipe->curbuf = curbuf;
 456                                pipe->nrbufs = --bufs;
 457                                do_wakeup = 1;
 458                        }
 459                        total_len -= chars;
 460                        if (!total_len)
 461                                break;  /* common path: read succeeded */
 462                }
 463                if (bufs)       /* More to do? */
 464                        continue;
 465                if (!pipe->writers)
 466                        break;
 467                if (!pipe->waiting_writers) {
 468                        /* syscall merging: Usually we must not sleep
 469                         * if O_NONBLOCK is set, or if we got some data.
 470                         * But if a writer sleeps in kernel space, then
 471                         * we can wait for that data without violating POSIX.
 472                         */
 473                        if (ret)
 474                                break;
 475                        if (filp->f_flags & O_NONBLOCK) {
 476                                ret = -EAGAIN;
 477                                break;
 478                        }
 479                }
 480                if (signal_pending(current)) {
 481                        if (!ret)
 482                                ret = -ERESTARTSYS;
 483                        break;
 484                }
 485                if (do_wakeup) {
 486                        wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
 487                        kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 488                }
 489                pipe_wait(pipe);
 490        }
 491        __pipe_unlock(pipe);
 492
 493        /* Signal writers asynchronously that there is more room. */
 494        if (do_wakeup) {
 495                wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
 496                kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 497        }
 498        if (ret > 0)
 499                file_accessed(filp);
 500        return ret;
 501}
 502
 503static inline int is_packetized(struct file *file)
 504{
 505        return (file->f_flags & O_DIRECT) != 0;
 506}
 507
 508static ssize_t
 509pipe_write(struct kiocb *iocb, const struct iovec *_iov,
 510            unsigned long nr_segs, loff_t ppos)
 511{
 512        struct file *filp = iocb->ki_filp;
 513        struct pipe_inode_info *pipe = filp->private_data;
 514        ssize_t ret;
 515        int do_wakeup;
 516        struct iovec *iov = (struct iovec *)_iov;
 517        size_t total_len;
 518        ssize_t chars;
 519
 520        total_len = iov_length(iov, nr_segs);
 521        /* Null write succeeds. */
 522        if (unlikely(total_len == 0))
 523                return 0;
 524
 525        do_wakeup = 0;
 526        ret = 0;
 527        __pipe_lock(pipe);
 528
 529        if (!pipe->readers) {
 530                send_sig(SIGPIPE, current, 0);
 531                ret = -EPIPE;
 532                goto out;
 533        }
 534
 535        /* We try to merge small writes */
 536        chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
 537        if (pipe->nrbufs && chars != 0) {
 538                int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
 539                                                        (pipe->buffers - 1);
 540                struct pipe_buffer *buf = pipe->bufs + lastbuf;
 541                const struct pipe_buf_operations *ops = buf->ops;
 542                int offset = buf->offset + buf->len;
 543
 544                if (ops->can_merge && offset + chars <= PAGE_SIZE) {
 545                        int error, atomic = IS_ENABLED(CONFIG_HIGHMEM);
 546                        void *addr;
 547                        size_t remaining = chars;
 548
 549                        error = ops->confirm(pipe, buf);
 550                        if (error)
 551                                goto out;
 552
 553                        if (IS_ENABLED(CONFIG_HIGHMEM))
 554                                iov_fault_in_pages_read(iov, chars);
 555redo1:
 556                        addr = ops->map(pipe, buf, atomic);
 557                        error = pipe_iov_copy_from_user(addr, &offset, iov,
 558                                                        &remaining, atomic);
 559                        ops->unmap(pipe, buf, addr);
 560                        ret = error;
 561                        do_wakeup = 1;
 562                        if (error) {
 563                                if (atomic) {
 564                                        atomic = 0;
 565                                        goto redo1;
 566                                }
 567                                goto out;
 568                        }
 569                        buf->len += chars;
 570                        total_len -= chars;
 571                        ret = chars;
 572                        if (!total_len)
 573                                goto out;
 574                }
 575        }
 576
 577        for (;;) {
 578                int bufs;
 579
 580                if (!pipe->readers) {
 581                        send_sig(SIGPIPE, current, 0);
 582                        if (!ret)
 583                                ret = -EPIPE;
 584                        break;
 585                }
 586                bufs = pipe->nrbufs;
 587                if (bufs < pipe->buffers) {
 588                        int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
 589                        struct pipe_buffer *buf = pipe->bufs + newbuf;
 590                        struct page *page = pipe->tmp_page;
 591                        char *src;
 592                        int error, atomic = IS_ENABLED(CONFIG_HIGHMEM);
 593                        int offset = 0;
 594                        size_t remaining;
 595
 596                        if (!page) {
 597                                page = alloc_page(GFP_HIGHUSER);
 598                                if (unlikely(!page)) {
 599                                        ret = ret ? : -ENOMEM;
 600                                        break;
 601                                }
 602                                pipe->tmp_page = page;
 603                        }
 604                        /* Always wake up, even if the copy fails. Otherwise
 605                         * we lock up (O_NONBLOCK-)readers that sleep due to
 606                         * syscall merging.
 607                         * FIXME! Is this really true?
 608                         */
 609                        do_wakeup = 1;
 610                        chars = PAGE_SIZE;
 611                        if (chars > total_len)
 612                                chars = total_len;
 613
 614                        if (IS_ENABLED(CONFIG_HIGHMEM))
 615                                iov_fault_in_pages_read(iov, chars);
 616                        remaining = chars;
 617redo2:
 618                        if (atomic)
 619                                src = kmap_atomic(page);
 620                        else
 621                                src = kmap(page);
 622
 623                        error = pipe_iov_copy_from_user(src, &offset, iov,
 624                                                        &remaining, atomic);
 625                        if (atomic)
 626                                kunmap_atomic(src);
 627                        else
 628                                kunmap(page);
 629
 630                        if (unlikely(error)) {
 631                                if (atomic) {
 632                                        atomic = 0;
 633                                        goto redo2;
 634                                }
 635                                if (!ret)
 636                                        ret = error;
 637                                break;
 638                        }
 639                        ret += chars;
 640
 641                        /* Insert it into the buffer array */
 642                        buf->page = page;
 643                        buf->ops = &anon_pipe_buf_ops;
 644                        buf->offset = 0;
 645                        buf->len = chars;
 646                        buf->flags = 0;
 647                        if (is_packetized(filp)) {
 648                                buf->ops = &packet_pipe_buf_ops;
 649                                buf->flags = PIPE_BUF_FLAG_PACKET;
 650                        }
 651                        pipe->nrbufs = ++bufs;
 652                        pipe->tmp_page = NULL;
 653
 654                        total_len -= chars;
 655                        if (!total_len)
 656                                break;
 657                }
 658                if (bufs < pipe->buffers)
 659                        continue;
 660                if (filp->f_flags & O_NONBLOCK) {
 661                        if (!ret)
 662                                ret = -EAGAIN;
 663                        break;
 664                }
 665                if (signal_pending(current)) {
 666                        if (!ret)
 667                                ret = -ERESTARTSYS;
 668                        break;
 669                }
 670                if (do_wakeup) {
 671                        wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
 672                        kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 673                        do_wakeup = 0;
 674                }
 675                pipe->waiting_writers++;
 676                pipe_wait(pipe);
 677                pipe->waiting_writers--;
 678        }
 679out:
 680        __pipe_unlock(pipe);
 681        if (do_wakeup) {
 682                wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
 683                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 684        }
 685        if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
 686                int err = file_update_time(filp);
 687                if (err)
 688                        ret = err;
 689                sb_end_write(file_inode(filp)->i_sb);
 690        }
 691        return ret;
 692}
 693
 694static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 695{
 696        struct pipe_inode_info *pipe = filp->private_data;
 697        int count, buf, nrbufs;
 698
 699        switch (cmd) {
 700                case FIONREAD:
 701                        __pipe_lock(pipe);
 702                        count = 0;
 703                        buf = pipe->curbuf;
 704                        nrbufs = pipe->nrbufs;
 705                        while (--nrbufs >= 0) {
 706                                count += pipe->bufs[buf].len;
 707                                buf = (buf+1) & (pipe->buffers - 1);
 708                        }
 709                        __pipe_unlock(pipe);
 710
 711                        return put_user(count, (int __user *)arg);
 712                default:
 713                        return -ENOIOCTLCMD;
 714        }
 715}
 716
 717/* No kernel lock held - fine */
 718static unsigned int
 719pipe_poll(struct file *filp, poll_table *wait)
 720{
 721        unsigned int mask;
 722        struct pipe_inode_info *pipe = filp->private_data;
 723        int nrbufs;
 724
 725        poll_wait(filp, &pipe->wait, wait);
 726
 727        /* Reading only -- no need for acquiring the semaphore.  */
 728        nrbufs = pipe->nrbufs;
 729        mask = 0;
 730        if (filp->f_mode & FMODE_READ) {
 731                mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
 732                if (!pipe->writers && filp->f_version != pipe->w_counter)
 733                        mask |= POLLHUP;
 734        }
 735
 736        if (filp->f_mode & FMODE_WRITE) {
 737                mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
 738                /*
 739                 * Most Unices do not set POLLERR for FIFOs but on Linux they
 740                 * behave exactly like pipes for poll().
 741                 */
 742                if (!pipe->readers)
 743                        mask |= POLLERR;
 744        }
 745
 746        return mask;
 747}
 748
 749static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
 750{
 751        int kill = 0;
 752
 753        spin_lock(&inode->i_lock);
 754        if (!--pipe->files) {
 755                inode->i_pipe = NULL;
 756                kill = 1;
 757        }
 758        spin_unlock(&inode->i_lock);
 759
 760        if (kill)
 761                free_pipe_info(pipe);
 762}
 763
 764static int
 765pipe_release(struct inode *inode, struct file *file)
 766{
 767        struct pipe_inode_info *pipe = file->private_data;
 768
 769        __pipe_lock(pipe);
 770        if (file->f_mode & FMODE_READ)
 771                pipe->readers--;
 772        if (file->f_mode & FMODE_WRITE)
 773                pipe->writers--;
 774
 775        if (pipe->readers || pipe->writers) {
 776                wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
 777                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 778                kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 779        }
 780        __pipe_unlock(pipe);
 781
 782        put_pipe_info(inode, pipe);
 783        return 0;
 784}
 785
 786static int
 787pipe_fasync(int fd, struct file *filp, int on)
 788{
 789        struct pipe_inode_info *pipe = filp->private_data;
 790        int retval = 0;
 791
 792        __pipe_lock(pipe);
 793        if (filp->f_mode & FMODE_READ)
 794                retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
 795        if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
 796                retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
 797                if (retval < 0 && (filp->f_mode & FMODE_READ))
 798                        /* this can happen only if on == T */
 799                        fasync_helper(-1, filp, 0, &pipe->fasync_readers);
 800        }
 801        __pipe_unlock(pipe);
 802        return retval;
 803}
 804
 805static unsigned long account_pipe_buffers(struct user_struct *user,
 806                                 unsigned long old, unsigned long new)
 807{
 808        return atomic_long_add_return(new - old, &user->pipe_bufs);
 809}
 810
 811static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
 812{
 813        return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft;
 814}
 815
 816static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
 817{
 818        return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard;
 819}
 820
 821struct pipe_inode_info *alloc_pipe_info(void)
 822{
 823        struct pipe_inode_info *pipe;
 824        unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
 825        struct user_struct *user = get_current_user();
 826        unsigned long user_bufs;
 827
 828        pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
 829        if (pipe == NULL)
 830                goto out_free_uid;
 831
 832        if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
 833                pipe_bufs = pipe_max_size >> PAGE_SHIFT;
 834
 835        user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
 836
 837        if (too_many_pipe_buffers_soft(user_bufs)) {
 838                user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
 839                pipe_bufs = 1;
 840        }
 841
 842        if (too_many_pipe_buffers_hard(user_bufs))
 843                goto out_revert_acct;
 844
 845        pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
 846
 847        if (pipe->bufs) {
 848                init_waitqueue_head(&pipe->wait);
 849                pipe->r_counter = pipe->w_counter = 1;
 850                pipe->buffers = pipe_bufs;
 851                pipe->user = user;
 852                mutex_init(&pipe->mutex);
 853                return pipe;
 854        }
 855
 856out_revert_acct:
 857        (void) account_pipe_buffers(user, pipe_bufs, 0);
 858        kfree(pipe);
 859out_free_uid:
 860        free_uid(user);
 861        return NULL;
 862}
 863
 864void free_pipe_info(struct pipe_inode_info *pipe)
 865{
 866        int i;
 867
 868        (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
 869        free_uid(pipe->user);
 870        for (i = 0; i < pipe->buffers; i++) {
 871                struct pipe_buffer *buf = pipe->bufs + i;
 872                if (buf->ops)
 873                        buf->ops->release(pipe, buf);
 874        }
 875        if (pipe->tmp_page)
 876                __free_page(pipe->tmp_page);
 877        kfree(pipe->bufs);
 878        kfree(pipe);
 879}
 880
 881static struct vfsmount *pipe_mnt __read_mostly;
 882
 883/*
 884 * pipefs_dname() is called from d_path().
 885 */
 886static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
 887{
 888        return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
 889                                dentry->d_inode->i_ino);
 890}
 891
 892static const struct dentry_operations pipefs_dentry_operations = {
 893        .d_dname        = pipefs_dname,
 894};
 895
 896static struct inode * get_pipe_inode(void)
 897{
 898        struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
 899        struct pipe_inode_info *pipe;
 900
 901        if (!inode)
 902                goto fail_inode;
 903
 904        inode->i_ino = get_next_ino();
 905
 906        pipe = alloc_pipe_info();
 907        if (!pipe)
 908                goto fail_iput;
 909
 910        inode->i_pipe = pipe;
 911        pipe->files = 2;
 912        pipe->readers = pipe->writers = 1;
 913        inode->i_fop = &pipefifo_fops;
 914
 915        /*
 916         * Mark the inode dirty from the very beginning,
 917         * that way it will never be moved to the dirty
 918         * list because "mark_inode_dirty()" will think
 919         * that it already _is_ on the dirty list.
 920         */
 921        inode->i_state = I_DIRTY;
 922        inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
 923        inode->i_uid = current_fsuid();
 924        inode->i_gid = current_fsgid();
 925        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 926
 927        return inode;
 928
 929fail_iput:
 930        iput(inode);
 931
 932fail_inode:
 933        return NULL;
 934}
 935
 936int create_pipe_files(struct file **res, int flags)
 937{
 938        int err;
 939        struct inode *inode = get_pipe_inode();
 940        struct file *f;
 941        struct path path;
 942        static struct qstr name = { .name = "" };
 943
 944        if (!inode)
 945                return -ENFILE;
 946
 947        err = -ENOMEM;
 948        path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
 949        if (!path.dentry)
 950                goto err_inode;
 951        path.mnt = mntget(pipe_mnt);
 952
 953        d_instantiate(path.dentry, inode);
 954
 955        err = -ENFILE;
 956        f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
 957        if (IS_ERR(f))
 958                goto err_dentry;
 959
 960        f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
 961        f->private_data = inode->i_pipe;
 962
 963        res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
 964        if (IS_ERR(res[0]))
 965                goto err_file;
 966
 967        path_get(&path);
 968        res[0]->private_data = inode->i_pipe;
 969        res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
 970        res[1] = f;
 971        return 0;
 972
 973err_file:
 974        put_filp(f);
 975err_dentry:
 976        free_pipe_info(inode->i_pipe);
 977        path_put(&path);
 978        return err;
 979
 980err_inode:
 981        free_pipe_info(inode->i_pipe);
 982        iput(inode);
 983        return err;
 984}
 985
 986static int __do_pipe_flags(int *fd, struct file **files, int flags)
 987{
 988        int error;
 989        int fdw, fdr;
 990
 991        if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
 992                return -EINVAL;
 993
 994        error = create_pipe_files(files, flags);
 995        if (error)
 996                return error;
 997
 998        error = get_unused_fd_flags(flags);
 999        if (error < 0)
1000                goto err_read_pipe;
1001        fdr = error;
1002
1003        error = get_unused_fd_flags(flags);
1004        if (error < 0)
1005                goto err_fdr;
1006        fdw = error;
1007
1008        audit_fd_pair(fdr, fdw);
1009        fd[0] = fdr;
1010        fd[1] = fdw;
1011        return 0;
1012
1013 err_fdr:
1014        put_unused_fd(fdr);
1015 err_read_pipe:
1016        fput(files[0]);
1017        fput(files[1]);
1018        return error;
1019}
1020
1021int do_pipe_flags(int *fd, int flags)
1022{
1023        struct file *files[2];
1024        int error = __do_pipe_flags(fd, files, flags);
1025        if (!error) {
1026                fd_install(fd[0], files[0]);
1027                fd_install(fd[1], files[1]);
1028        }
1029        return error;
1030}
1031
1032/*
1033 * sys_pipe() is the normal C calling standard for creating
1034 * a pipe. It's not the way Unix traditionally does this, though.
1035 */
1036SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1037{
1038        struct file *files[2];
1039        int fd[2];
1040        int error;
1041
1042        error = __do_pipe_flags(fd, files, flags);
1043        if (!error) {
1044                if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1045                        fput(files[0]);
1046                        fput(files[1]);
1047                        put_unused_fd(fd[0]);
1048                        put_unused_fd(fd[1]);
1049                        error = -EFAULT;
1050                } else {
1051                        fd_install(fd[0], files[0]);
1052                        fd_install(fd[1], files[1]);
1053                }
1054        }
1055        return error;
1056}
1057
1058SYSCALL_DEFINE1(pipe, int __user *, fildes)
1059{
1060        return sys_pipe2(fildes, 0);
1061}
1062
1063static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1064{
1065        int cur = *cnt; 
1066
1067        while (cur == *cnt) {
1068                pipe_wait(pipe);
1069                if (signal_pending(current))
1070                        break;
1071        }
1072        return cur == *cnt ? -ERESTARTSYS : 0;
1073}
1074
1075static void wake_up_partner(struct pipe_inode_info *pipe)
1076{
1077        wake_up_interruptible(&pipe->wait);
1078}
1079
1080static int fifo_open(struct inode *inode, struct file *filp)
1081{
1082        struct pipe_inode_info *pipe;
1083        bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1084        int ret;
1085
1086        filp->f_version = 0;
1087
1088        spin_lock(&inode->i_lock);
1089        if (inode->i_pipe) {
1090                pipe = inode->i_pipe;
1091                pipe->files++;
1092                spin_unlock(&inode->i_lock);
1093        } else {
1094                spin_unlock(&inode->i_lock);
1095                pipe = alloc_pipe_info();
1096                if (!pipe)
1097                        return -ENOMEM;
1098                pipe->files = 1;
1099                spin_lock(&inode->i_lock);
1100                if (unlikely(inode->i_pipe)) {
1101                        inode->i_pipe->files++;
1102                        spin_unlock(&inode->i_lock);
1103                        free_pipe_info(pipe);
1104                        pipe = inode->i_pipe;
1105                } else {
1106                        inode->i_pipe = pipe;
1107                        spin_unlock(&inode->i_lock);
1108                }
1109        }
1110        filp->private_data = pipe;
1111        /* OK, we have a pipe and it's pinned down */
1112
1113        __pipe_lock(pipe);
1114
1115        /* We can only do regular read/write on fifos */
1116        filp->f_mode &= (FMODE_READ | FMODE_WRITE);
1117
1118        switch (filp->f_mode) {
1119        case FMODE_READ:
1120        /*
1121         *  O_RDONLY
1122         *  POSIX.1 says that O_NONBLOCK means return with the FIFO
1123         *  opened, even when there is no process writing the FIFO.
1124         */
1125                pipe->r_counter++;
1126                if (pipe->readers++ == 0)
1127                        wake_up_partner(pipe);
1128
1129                if (!is_pipe && !pipe->writers) {
1130                        if ((filp->f_flags & O_NONBLOCK)) {
1131                                /* suppress POLLHUP until we have
1132                                 * seen a writer */
1133                                filp->f_version = pipe->w_counter;
1134                        } else {
1135                                if (wait_for_partner(pipe, &pipe->w_counter))
1136                                        goto err_rd;
1137                        }
1138                }
1139                break;
1140        
1141        case FMODE_WRITE:
1142        /*
1143         *  O_WRONLY
1144         *  POSIX.1 says that O_NONBLOCK means return -1 with
1145         *  errno=ENXIO when there is no process reading the FIFO.
1146         */
1147                ret = -ENXIO;
1148                if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1149                        goto err;
1150
1151                pipe->w_counter++;
1152                if (!pipe->writers++)
1153                        wake_up_partner(pipe);
1154
1155                if (!is_pipe && !pipe->readers) {
1156                        if (wait_for_partner(pipe, &pipe->r_counter))
1157                                goto err_wr;
1158                }
1159                break;
1160        
1161        case FMODE_READ | FMODE_WRITE:
1162        /*
1163         *  O_RDWR
1164         *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1165         *  This implementation will NEVER block on a O_RDWR open, since
1166         *  the process can at least talk to itself.
1167         */
1168
1169                pipe->readers++;
1170                pipe->writers++;
1171                pipe->r_counter++;
1172                pipe->w_counter++;
1173                if (pipe->readers == 1 || pipe->writers == 1)
1174                        wake_up_partner(pipe);
1175                break;
1176
1177        default:
1178                ret = -EINVAL;
1179                goto err;
1180        }
1181
1182        /* Ok! */
1183        __pipe_unlock(pipe);
1184        return 0;
1185
1186err_rd:
1187        if (!--pipe->readers)
1188                wake_up_interruptible(&pipe->wait);
1189        ret = -ERESTARTSYS;
1190        goto err;
1191
1192err_wr:
1193        if (!--pipe->writers)
1194                wake_up_interruptible(&pipe->wait);
1195        ret = -ERESTARTSYS;
1196        goto err;
1197
1198err:
1199        __pipe_unlock(pipe);
1200
1201        put_pipe_info(inode, pipe);
1202        return ret;
1203}
1204
1205const struct file_operations pipefifo_fops = {
1206        .open           = fifo_open,
1207        .llseek         = no_llseek,
1208        .read           = do_sync_read,
1209        .aio_read       = pipe_read,
1210        .write          = do_sync_write,
1211        .aio_write      = pipe_write,
1212        .poll           = pipe_poll,
1213        .unlocked_ioctl = pipe_ioctl,
1214        .release        = pipe_release,
1215        .fasync         = pipe_fasync,
1216};
1217
1218/*
1219 * Currently we rely on the pipe array holding a power-of-2 number
1220 * of pages. Returns 0 on error.
1221 */
1222unsigned int round_pipe_size(unsigned int size)
1223{
1224        unsigned long nr_pages;
1225
1226        if (size < pipe_min_size)
1227                size = pipe_min_size;
1228
1229        nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1230        if (nr_pages == 0)
1231                return 0;
1232
1233        return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1234}
1235
1236/*
1237 * Allocate a new array of pipe buffers and copy the info over. Returns the
1238 * pipe size if successful, or return -ERROR on error.
1239 */
1240static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1241{
1242        struct pipe_buffer *bufs;
1243        unsigned int size, nr_pages;
1244        unsigned long user_bufs;
1245        long ret = 0;
1246
1247        size = round_pipe_size(arg);
1248        if (size == 0)
1249                return -EINVAL;
1250        nr_pages = size >> PAGE_SHIFT;
1251
1252        if (!nr_pages)
1253                return -EINVAL;
1254
1255        /*
1256         * If trying to increase the pipe capacity, check that an
1257         * unprivileged user is not trying to exceed various limits
1258         * (soft limit check here, hard limit check just below).
1259         * Decreasing the pipe capacity is always permitted, even
1260         * if the user is currently over a limit.
1261         */
1262        if (nr_pages > pipe->buffers &&
1263                        size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1264                return -EPERM;
1265
1266        user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
1267
1268        if (nr_pages > pipe->buffers &&
1269                        (too_many_pipe_buffers_hard(user_bufs) ||
1270                         too_many_pipe_buffers_soft(user_bufs)) &&
1271                        !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
1272                ret = -EPERM;
1273                goto out_revert_acct;
1274        }
1275
1276        /*
1277         * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1278         * expect a lot of shrink+grow operations, just free and allocate
1279         * again like we would do for growing. If the pipe currently
1280         * contains more buffers than arg, then return busy.
1281         */
1282        if (nr_pages < pipe->nrbufs) {
1283                ret = -EBUSY;
1284                goto out_revert_acct;
1285        }
1286
1287        bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
1288        if (unlikely(!bufs)) {
1289                ret = -ENOMEM;
1290                goto out_revert_acct;
1291        }
1292
1293        /*
1294         * The pipe array wraps around, so just start the new one at zero
1295         * and adjust the indexes.
1296         */
1297        if (pipe->nrbufs) {
1298                unsigned int tail;
1299                unsigned int head;
1300
1301                tail = pipe->curbuf + pipe->nrbufs;
1302                if (tail < pipe->buffers)
1303                        tail = 0;
1304                else
1305                        tail &= (pipe->buffers - 1);
1306
1307                head = pipe->nrbufs - tail;
1308                if (head)
1309                        memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1310                if (tail)
1311                        memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1312        }
1313
1314        pipe->curbuf = 0;
1315        kfree(pipe->bufs);
1316        pipe->bufs = bufs;
1317        pipe->buffers = nr_pages;
1318        return nr_pages * PAGE_SIZE;
1319
1320out_revert_acct:
1321        (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
1322        return ret;
1323}
1324
1325/*
1326 * This should work even if CONFIG_PROC_FS isn't set, as proc_dopipe_max_size
1327 * will return an error.
1328 */
1329int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1330                 size_t *lenp, loff_t *ppos)
1331{
1332        return proc_dopipe_max_size(table, write, buf, lenp, ppos);
1333}
1334
1335/*
1336 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1337 * location, so checking ->i_pipe is not enough to verify that this is a
1338 * pipe.
1339 */
1340struct pipe_inode_info *get_pipe_info(struct file *file)
1341{
1342        return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1343}
1344
1345long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1346{
1347        struct pipe_inode_info *pipe;
1348        long ret;
1349
1350        pipe = get_pipe_info(file);
1351        if (!pipe)
1352                return -EBADF;
1353
1354        __pipe_lock(pipe);
1355
1356        switch (cmd) {
1357        case F_SETPIPE_SZ:
1358                ret = pipe_set_size(pipe, arg);
1359                break;
1360        case F_GETPIPE_SZ:
1361                ret = pipe->buffers * PAGE_SIZE;
1362                break;
1363        default:
1364                ret = -EINVAL;
1365                break;
1366        }
1367
1368        __pipe_unlock(pipe);
1369        return ret;
1370}
1371
1372static const struct super_operations pipefs_ops = {
1373        .destroy_inode = free_inode_nonrcu,
1374        .statfs = simple_statfs,
1375};
1376
1377/*
1378 * pipefs should _never_ be mounted by userland - too much of security hassle,
1379 * no real gain from having the whole whorehouse mounted. So we don't need
1380 * any operations on the root directory. However, we need a non-trivial
1381 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1382 */
1383static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1384                         int flags, const char *dev_name, void *data)
1385{
1386        return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1387                        &pipefs_dentry_operations, PIPEFS_MAGIC);
1388}
1389
1390static struct file_system_type pipe_fs_type = {
1391        .name           = "pipefs",
1392        .mount          = pipefs_mount,
1393        .kill_sb        = kill_anon_super,
1394};
1395
1396static int __init init_pipe_fs(void)
1397{
1398        int err = register_filesystem(&pipe_fs_type);
1399
1400        if (!err) {
1401                pipe_mnt = kern_mount(&pipe_fs_type);
1402                if (IS_ERR(pipe_mnt)) {
1403                        err = PTR_ERR(pipe_mnt);
1404                        unregister_filesystem(&pipe_fs_type);
1405                }
1406        }
1407        return err;
1408}
1409
1410fs_initcall(init_pipe_fs);
1411