linux/fs/aio.c
<<
>>
Prefs
   1/*
   2 *      An async IO implementation for Linux
   3 *      Written by Benjamin LaHaise <bcrl@kvack.org>
   4 *
   5 *      Implements an efficient asynchronous io interface.
   6 *
   7 *      Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
   8 *
   9 *      See ../COPYING for licensing terms.
  10 */
  11#include <linux/kernel.h>
  12#include <linux/init.h>
  13#include <linux/errno.h>
  14#include <linux/time.h>
  15#include <linux/aio_abi.h>
  16#include <linux/module.h>
  17#include <linux/syscalls.h>
  18#include <linux/uio.h>
  19
  20#define DEBUG 0
  21
  22#include <linux/sched.h>
  23#include <linux/fs.h>
  24#include <linux/file.h>
  25#include <linux/mm.h>
  26#include <linux/mman.h>
  27#include <linux/mmu_context.h>
  28#include <linux/slab.h>
  29#include <linux/timer.h>
  30#include <linux/aio.h>
  31#include <linux/highmem.h>
  32#include <linux/workqueue.h>
  33#include <linux/security.h>
  34#include <linux/eventfd.h>
  35
  36#include <asm/kmap_types.h>
  37#include <asm/uaccess.h>
  38
  39#if DEBUG > 1
  40#define dprintk         printk
  41#else
  42#define dprintk(x...)   do { ; } while (0)
  43#endif
  44
  45/*------ sysctl variables----*/
  46static DEFINE_SPINLOCK(aio_nr_lock);
  47unsigned long aio_nr;           /* current system wide number of aio requests */
  48unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
  49/*----end sysctl variables---*/
  50
  51static struct kmem_cache        *kiocb_cachep;
  52static struct kmem_cache        *kioctx_cachep;
  53
  54static struct workqueue_struct *aio_wq;
  55
  56/* Used for rare fput completion. */
  57static void aio_fput_routine(struct work_struct *);
  58static DECLARE_WORK(fput_work, aio_fput_routine);
  59
  60static DEFINE_SPINLOCK(fput_lock);
  61static LIST_HEAD(fput_head);
  62
  63static void aio_kick_handler(struct work_struct *);
  64static void aio_queue_work(struct kioctx *);
  65
  66/* aio_setup
  67 *      Creates the slab caches used by the aio routines, panic on
  68 *      failure as this is done early during the boot sequence.
  69 */
  70static int __init aio_setup(void)
  71{
  72        kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  73        kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  74
  75        aio_wq = create_workqueue("aio");
  76
  77        pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
  78
  79        return 0;
  80}
  81__initcall(aio_setup);
  82
  83static void aio_free_ring(struct kioctx *ctx)
  84{
  85        struct aio_ring_info *info = &ctx->ring_info;
  86        long i;
  87
  88        for (i=0; i<info->nr_pages; i++)
  89                put_page(info->ring_pages[i]);
  90
  91        if (info->mmap_size) {
  92                down_write(&ctx->mm->mmap_sem);
  93                do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
  94                up_write(&ctx->mm->mmap_sem);
  95        }
  96
  97        if (info->ring_pages && info->ring_pages != info->internal_pages)
  98                kfree(info->ring_pages);
  99        info->ring_pages = NULL;
 100        info->nr = 0;
 101}
 102
 103static int aio_setup_ring(struct kioctx *ctx)
 104{
 105        struct aio_ring *ring;
 106        struct aio_ring_info *info = &ctx->ring_info;
 107        unsigned nr_events = ctx->max_reqs;
 108        unsigned long size;
 109        int nr_pages;
 110
 111        /* Compensate for the ring buffer's head/tail overlap entry */
 112        nr_events += 2; /* 1 is required, 2 for good luck */
 113
 114        size = sizeof(struct aio_ring);
 115        size += sizeof(struct io_event) * nr_events;
 116        nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
 117
 118        if (nr_pages < 0)
 119                return -EINVAL;
 120
 121        nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
 122
 123        info->nr = 0;
 124        info->ring_pages = info->internal_pages;
 125        if (nr_pages > AIO_RING_PAGES) {
 126                info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
 127                if (!info->ring_pages)
 128                        return -ENOMEM;
 129        }
 130
 131        info->mmap_size = nr_pages * PAGE_SIZE;
 132        dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
 133        down_write(&ctx->mm->mmap_sem);
 134        info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 
 135                                  PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
 136                                  0);
 137        if (IS_ERR((void *)info->mmap_base)) {
 138                up_write(&ctx->mm->mmap_sem);
 139                info->mmap_size = 0;
 140                aio_free_ring(ctx);
 141                return -EAGAIN;
 142        }
 143
 144        dprintk("mmap address: 0x%08lx\n", info->mmap_base);
 145        info->nr_pages = get_user_pages(current, ctx->mm,
 146                                        info->mmap_base, nr_pages, 
 147                                        1, 0, info->ring_pages, NULL);
 148        up_write(&ctx->mm->mmap_sem);
 149
 150        if (unlikely(info->nr_pages != nr_pages)) {
 151                aio_free_ring(ctx);
 152                return -EAGAIN;
 153        }
 154
 155        ctx->user_id = info->mmap_base;
 156
 157        info->nr = nr_events;           /* trusted copy */
 158
 159        ring = kmap_atomic(info->ring_pages[0], KM_USER0);
 160        ring->nr = nr_events;   /* user copy */
 161        ring->id = ctx->user_id;
 162        ring->head = ring->tail = 0;
 163        ring->magic = AIO_RING_MAGIC;
 164        ring->compat_features = AIO_RING_COMPAT_FEATURES;
 165        ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
 166        ring->header_length = sizeof(struct aio_ring);
 167        kunmap_atomic(ring, KM_USER0);
 168
 169        return 0;
 170}
 171
 172
 173/* aio_ring_event: returns a pointer to the event at the given index from
 174 * kmap_atomic(, km).  Release the pointer with put_aio_ring_event();
 175 */
 176#define AIO_EVENTS_PER_PAGE     (PAGE_SIZE / sizeof(struct io_event))
 177#define AIO_EVENTS_FIRST_PAGE   ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
 178#define AIO_EVENTS_OFFSET       (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
 179
 180#define aio_ring_event(info, nr, km) ({                                 \
 181        unsigned pos = (nr) + AIO_EVENTS_OFFSET;                        \
 182        struct io_event *__event;                                       \
 183        __event = kmap_atomic(                                          \
 184                        (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
 185        __event += pos % AIO_EVENTS_PER_PAGE;                           \
 186        __event;                                                        \
 187})
 188
 189#define put_aio_ring_event(event, km) do {      \
 190        struct io_event *__event = (event);     \
 191        (void)__event;                          \
 192        kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
 193} while(0)
 194
 195static void ctx_rcu_free(struct rcu_head *head)
 196{
 197        struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
 198        unsigned nr_events = ctx->max_reqs;
 199
 200        kmem_cache_free(kioctx_cachep, ctx);
 201
 202        if (nr_events) {
 203                spin_lock(&aio_nr_lock);
 204                BUG_ON(aio_nr - nr_events > aio_nr);
 205                aio_nr -= nr_events;
 206                spin_unlock(&aio_nr_lock);
 207        }
 208}
 209
 210/* __put_ioctx
 211 *      Called when the last user of an aio context has gone away,
 212 *      and the struct needs to be freed.
 213 */
 214static void __put_ioctx(struct kioctx *ctx)
 215{
 216        BUG_ON(ctx->reqs_active);
 217
 218        cancel_delayed_work(&ctx->wq);
 219        cancel_work_sync(&ctx->wq.work);
 220        aio_free_ring(ctx);
 221        mmdrop(ctx->mm);
 222        ctx->mm = NULL;
 223        pr_debug("__put_ioctx: freeing %p\n", ctx);
 224        call_rcu(&ctx->rcu_head, ctx_rcu_free);
 225}
 226
 227#define get_ioctx(kioctx) do {                                          \
 228        BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
 229        atomic_inc(&(kioctx)->users);                                   \
 230} while (0)
 231#define put_ioctx(kioctx) do {                                          \
 232        BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
 233        if (unlikely(atomic_dec_and_test(&(kioctx)->users)))            \
 234                __put_ioctx(kioctx);                                    \
 235} while (0)
 236
 237/* ioctx_alloc
 238 *      Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
 239 */
 240static struct kioctx *ioctx_alloc(unsigned nr_events)
 241{
 242        struct mm_struct *mm;
 243        struct kioctx *ctx;
 244        int did_sync = 0;
 245
 246        /* Prevent overflows */
 247        if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
 248            (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
 249                pr_debug("ENOMEM: nr_events too high\n");
 250                return ERR_PTR(-EINVAL);
 251        }
 252
 253        if ((unsigned long)nr_events > aio_max_nr)
 254                return ERR_PTR(-EAGAIN);
 255
 256        ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
 257        if (!ctx)
 258                return ERR_PTR(-ENOMEM);
 259
 260        ctx->max_reqs = nr_events;
 261        mm = ctx->mm = current->mm;
 262        atomic_inc(&mm->mm_count);
 263
 264        atomic_set(&ctx->users, 1);
 265        spin_lock_init(&ctx->ctx_lock);
 266        spin_lock_init(&ctx->ring_info.ring_lock);
 267        init_waitqueue_head(&ctx->wait);
 268
 269        INIT_LIST_HEAD(&ctx->active_reqs);
 270        INIT_LIST_HEAD(&ctx->run_list);
 271        INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
 272
 273        if (aio_setup_ring(ctx) < 0)
 274                goto out_freectx;
 275
 276        /* limit the number of system wide aios */
 277        do {
 278                spin_lock_bh(&aio_nr_lock);
 279                if (aio_nr + nr_events > aio_max_nr ||
 280                    aio_nr + nr_events < aio_nr)
 281                        ctx->max_reqs = 0;
 282                else
 283                        aio_nr += ctx->max_reqs;
 284                spin_unlock_bh(&aio_nr_lock);
 285                if (ctx->max_reqs || did_sync)
 286                        break;
 287
 288                /* wait for rcu callbacks to have completed before giving up */
 289                synchronize_rcu();
 290                did_sync = 1;
 291                ctx->max_reqs = nr_events;
 292        } while (1);
 293
 294        if (ctx->max_reqs == 0)
 295                goto out_cleanup;
 296
 297        /* now link into global list. */
 298        spin_lock(&mm->ioctx_lock);
 299        hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
 300        spin_unlock(&mm->ioctx_lock);
 301
 302        dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
 303                ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
 304        return ctx;
 305
 306out_cleanup:
 307        __put_ioctx(ctx);
 308        return ERR_PTR(-EAGAIN);
 309
 310out_freectx:
 311        mmdrop(mm);
 312        kmem_cache_free(kioctx_cachep, ctx);
 313        ctx = ERR_PTR(-ENOMEM);
 314
 315        dprintk("aio: error allocating ioctx %p\n", ctx);
 316        return ctx;
 317}
 318
 319/* aio_cancel_all
 320 *      Cancels all outstanding aio requests on an aio context.  Used 
 321 *      when the processes owning a context have all exited to encourage 
 322 *      the rapid destruction of the kioctx.
 323 */
 324static void aio_cancel_all(struct kioctx *ctx)
 325{
 326        int (*cancel)(struct kiocb *, struct io_event *);
 327        struct io_event res;
 328        spin_lock_irq(&ctx->ctx_lock);
 329        ctx->dead = 1;
 330        while (!list_empty(&ctx->active_reqs)) {
 331                struct list_head *pos = ctx->active_reqs.next;
 332                struct kiocb *iocb = list_kiocb(pos);
 333                list_del_init(&iocb->ki_list);
 334                cancel = iocb->ki_cancel;
 335                kiocbSetCancelled(iocb);
 336                if (cancel) {
 337                        iocb->ki_users++;
 338                        spin_unlock_irq(&ctx->ctx_lock);
 339                        cancel(iocb, &res);
 340                        spin_lock_irq(&ctx->ctx_lock);
 341                }
 342        }
 343        spin_unlock_irq(&ctx->ctx_lock);
 344}
 345
 346static void wait_for_all_aios(struct kioctx *ctx)
 347{
 348        struct task_struct *tsk = current;
 349        DECLARE_WAITQUEUE(wait, tsk);
 350
 351        spin_lock_irq(&ctx->ctx_lock);
 352        if (!ctx->reqs_active)
 353                goto out;
 354
 355        add_wait_queue(&ctx->wait, &wait);
 356        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 357        while (ctx->reqs_active) {
 358                spin_unlock_irq(&ctx->ctx_lock);
 359                io_schedule();
 360                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 361                spin_lock_irq(&ctx->ctx_lock);
 362        }
 363        __set_task_state(tsk, TASK_RUNNING);
 364        remove_wait_queue(&ctx->wait, &wait);
 365
 366out:
 367        spin_unlock_irq(&ctx->ctx_lock);
 368}
 369
 370/* wait_on_sync_kiocb:
 371 *      Waits on the given sync kiocb to complete.
 372 */
 373ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
 374{
 375        while (iocb->ki_users) {
 376                set_current_state(TASK_UNINTERRUPTIBLE);
 377                if (!iocb->ki_users)
 378                        break;
 379                io_schedule();
 380        }
 381        __set_current_state(TASK_RUNNING);
 382        return iocb->ki_user_data;
 383}
 384EXPORT_SYMBOL(wait_on_sync_kiocb);
 385
 386/* exit_aio: called when the last user of mm goes away.  At this point, 
 387 * there is no way for any new requests to be submited or any of the 
 388 * io_* syscalls to be called on the context.  However, there may be 
 389 * outstanding requests which hold references to the context; as they 
 390 * go away, they will call put_ioctx and release any pinned memory
 391 * associated with the request (held via struct page * references).
 392 */
 393void exit_aio(struct mm_struct *mm)
 394{
 395        struct kioctx *ctx;
 396
 397        while (!hlist_empty(&mm->ioctx_list)) {
 398                ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
 399                hlist_del_rcu(&ctx->list);
 400
 401                aio_cancel_all(ctx);
 402
 403                wait_for_all_aios(ctx);
 404                /*
 405                 * Ensure we don't leave the ctx on the aio_wq
 406                 */
 407                cancel_work_sync(&ctx->wq.work);
 408
 409                if (1 != atomic_read(&ctx->users))
 410                        printk(KERN_DEBUG
 411                                "exit_aio:ioctx still alive: %d %d %d\n",
 412                                atomic_read(&ctx->users), ctx->dead,
 413                                ctx->reqs_active);
 414                put_ioctx(ctx);
 415        }
 416}
 417
 418/* aio_get_req
 419 *      Allocate a slot for an aio request.  Increments the users count
 420 * of the kioctx so that the kioctx stays around until all requests are
 421 * complete.  Returns NULL if no requests are free.
 422 *
 423 * Returns with kiocb->users set to 2.  The io submit code path holds
 424 * an extra reference while submitting the i/o.
 425 * This prevents races between the aio code path referencing the
 426 * req (after submitting it) and aio_complete() freeing the req.
 427 */
 428static struct kiocb *__aio_get_req(struct kioctx *ctx)
 429{
 430        struct kiocb *req = NULL;
 431        struct aio_ring *ring;
 432        int okay = 0;
 433
 434        req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
 435        if (unlikely(!req))
 436                return NULL;
 437
 438        req->ki_flags = 0;
 439        req->ki_users = 2;
 440        req->ki_key = 0;
 441        req->ki_ctx = ctx;
 442        req->ki_cancel = NULL;
 443        req->ki_retry = NULL;
 444        req->ki_dtor = NULL;
 445        req->private = NULL;
 446        req->ki_iovec = NULL;
 447        INIT_LIST_HEAD(&req->ki_run_list);
 448        req->ki_eventfd = NULL;
 449
 450        /* Check if the completion queue has enough free space to
 451         * accept an event from this io.
 452         */
 453        spin_lock_irq(&ctx->ctx_lock);
 454        ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0);
 455        if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) {
 456                list_add(&req->ki_list, &ctx->active_reqs);
 457                ctx->reqs_active++;
 458                okay = 1;
 459        }
 460        kunmap_atomic(ring, KM_USER0);
 461        spin_unlock_irq(&ctx->ctx_lock);
 462
 463        if (!okay) {
 464                kmem_cache_free(kiocb_cachep, req);
 465                req = NULL;
 466        }
 467
 468        return req;
 469}
 470
 471static inline struct kiocb *aio_get_req(struct kioctx *ctx)
 472{
 473        struct kiocb *req;
 474        /* Handle a potential starvation case -- should be exceedingly rare as 
 475         * requests will be stuck on fput_head only if the aio_fput_routine is 
 476         * delayed and the requests were the last user of the struct file.
 477         */
 478        req = __aio_get_req(ctx);
 479        if (unlikely(NULL == req)) {
 480                aio_fput_routine(NULL);
 481                req = __aio_get_req(ctx);
 482        }
 483        return req;
 484}
 485
 486static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
 487{
 488        assert_spin_locked(&ctx->ctx_lock);
 489
 490        if (req->ki_eventfd != NULL)
 491                eventfd_ctx_put(req->ki_eventfd);
 492        if (req->ki_dtor)
 493                req->ki_dtor(req);
 494        if (req->ki_iovec != &req->ki_inline_vec)
 495                kfree(req->ki_iovec);
 496        kmem_cache_free(kiocb_cachep, req);
 497        ctx->reqs_active--;
 498
 499        if (unlikely(!ctx->reqs_active && ctx->dead))
 500                wake_up(&ctx->wait);
 501}
 502
 503static void aio_fput_routine(struct work_struct *data)
 504{
 505        spin_lock_irq(&fput_lock);
 506        while (likely(!list_empty(&fput_head))) {
 507                struct kiocb *req = list_kiocb(fput_head.next);
 508                struct kioctx *ctx = req->ki_ctx;
 509
 510                list_del(&req->ki_list);
 511                spin_unlock_irq(&fput_lock);
 512
 513                /* Complete the fput(s) */
 514                if (req->ki_filp != NULL)
 515                        __fput(req->ki_filp);
 516
 517                /* Link the iocb into the context's free list */
 518                spin_lock_irq(&ctx->ctx_lock);
 519                really_put_req(ctx, req);
 520                spin_unlock_irq(&ctx->ctx_lock);
 521
 522                put_ioctx(ctx);
 523                spin_lock_irq(&fput_lock);
 524        }
 525        spin_unlock_irq(&fput_lock);
 526}
 527
 528/* __aio_put_req
 529 *      Returns true if this put was the last user of the request.
 530 */
 531static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
 532{
 533        dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
 534                req, atomic_long_read(&req->ki_filp->f_count));
 535
 536        assert_spin_locked(&ctx->ctx_lock);
 537
 538        req->ki_users--;
 539        BUG_ON(req->ki_users < 0);
 540        if (likely(req->ki_users))
 541                return 0;
 542        list_del(&req->ki_list);                /* remove from active_reqs */
 543        req->ki_cancel = NULL;
 544        req->ki_retry = NULL;
 545
 546        /*
 547         * Try to optimize the aio and eventfd file* puts, by avoiding to
 548         * schedule work in case it is not __fput() time. In normal cases,
 549         * we would not be holding the last reference to the file*, so
 550         * this function will be executed w/out any aio kthread wakeup.
 551         */
 552        if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
 553                get_ioctx(ctx);
 554                spin_lock(&fput_lock);
 555                list_add(&req->ki_list, &fput_head);
 556                spin_unlock(&fput_lock);
 557                queue_work(aio_wq, &fput_work);
 558        } else {
 559                req->ki_filp = NULL;
 560                really_put_req(ctx, req);
 561        }
 562        return 1;
 563}
 564
 565/* aio_put_req
 566 *      Returns true if this put was the last user of the kiocb,
 567 *      false if the request is still in use.
 568 */
 569int aio_put_req(struct kiocb *req)
 570{
 571        struct kioctx *ctx = req->ki_ctx;
 572        int ret;
 573        spin_lock_irq(&ctx->ctx_lock);
 574        ret = __aio_put_req(ctx, req);
 575        spin_unlock_irq(&ctx->ctx_lock);
 576        return ret;
 577}
 578EXPORT_SYMBOL(aio_put_req);
 579
 580static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 581{
 582        struct mm_struct *mm = current->mm;
 583        struct kioctx *ctx, *ret = NULL;
 584        struct hlist_node *n;
 585
 586        rcu_read_lock();
 587
 588        hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
 589                if (ctx->user_id == ctx_id && !ctx->dead) {
 590                        get_ioctx(ctx);
 591                        ret = ctx;
 592                        break;
 593                }
 594        }
 595
 596        rcu_read_unlock();
 597        return ret;
 598}
 599
 600/*
 601 * Queue up a kiocb to be retried. Assumes that the kiocb
 602 * has already been marked as kicked, and places it on
 603 * the retry run list for the corresponding ioctx, if it
 604 * isn't already queued. Returns 1 if it actually queued
 605 * the kiocb (to tell the caller to activate the work
 606 * queue to process it), or 0, if it found that it was
 607 * already queued.
 608 */
 609static inline int __queue_kicked_iocb(struct kiocb *iocb)
 610{
 611        struct kioctx *ctx = iocb->ki_ctx;
 612
 613        assert_spin_locked(&ctx->ctx_lock);
 614
 615        if (list_empty(&iocb->ki_run_list)) {
 616                list_add_tail(&iocb->ki_run_list,
 617                        &ctx->run_list);
 618                return 1;
 619        }
 620        return 0;
 621}
 622
 623/* aio_run_iocb
 624 *      This is the core aio execution routine. It is
 625 *      invoked both for initial i/o submission and
 626 *      subsequent retries via the aio_kick_handler.
 627 *      Expects to be invoked with iocb->ki_ctx->lock
 628 *      already held. The lock is released and reacquired
 629 *      as needed during processing.
 630 *
 631 * Calls the iocb retry method (already setup for the
 632 * iocb on initial submission) for operation specific
 633 * handling, but takes care of most of common retry
 634 * execution details for a given iocb. The retry method
 635 * needs to be non-blocking as far as possible, to avoid
 636 * holding up other iocbs waiting to be serviced by the
 637 * retry kernel thread.
 638 *
 639 * The trickier parts in this code have to do with
 640 * ensuring that only one retry instance is in progress
 641 * for a given iocb at any time. Providing that guarantee
 642 * simplifies the coding of individual aio operations as
 643 * it avoids various potential races.
 644 */
 645static ssize_t aio_run_iocb(struct kiocb *iocb)
 646{
 647        struct kioctx   *ctx = iocb->ki_ctx;
 648        ssize_t (*retry)(struct kiocb *);
 649        ssize_t ret;
 650
 651        if (!(retry = iocb->ki_retry)) {
 652                printk("aio_run_iocb: iocb->ki_retry = NULL\n");
 653                return 0;
 654        }
 655
 656        /*
 657         * We don't want the next retry iteration for this
 658         * operation to start until this one has returned and
 659         * updated the iocb state. However, wait_queue functions
 660         * can trigger a kick_iocb from interrupt context in the
 661         * meantime, indicating that data is available for the next
 662         * iteration. We want to remember that and enable the
 663         * next retry iteration _after_ we are through with
 664         * this one.
 665         *
 666         * So, in order to be able to register a "kick", but
 667         * prevent it from being queued now, we clear the kick
 668         * flag, but make the kick code *think* that the iocb is
 669         * still on the run list until we are actually done.
 670         * When we are done with this iteration, we check if
 671         * the iocb was kicked in the meantime and if so, queue
 672         * it up afresh.
 673         */
 674
 675        kiocbClearKicked(iocb);
 676
 677        /*
 678         * This is so that aio_complete knows it doesn't need to
 679         * pull the iocb off the run list (We can't just call
 680         * INIT_LIST_HEAD because we don't want a kick_iocb to
 681         * queue this on the run list yet)
 682         */
 683        iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
 684        spin_unlock_irq(&ctx->ctx_lock);
 685
 686        /* Quit retrying if the i/o has been cancelled */
 687        if (kiocbIsCancelled(iocb)) {
 688                ret = -EINTR;
 689                aio_complete(iocb, ret, 0);
 690                /* must not access the iocb after this */
 691                goto out;
 692        }
 693
 694        /*
 695         * Now we are all set to call the retry method in async
 696         * context.
 697         */
 698        ret = retry(iocb);
 699
 700        if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
 701                BUG_ON(!list_empty(&iocb->ki_wait.task_list));
 702                aio_complete(iocb, ret, 0);
 703        }
 704out:
 705        spin_lock_irq(&ctx->ctx_lock);
 706
 707        if (-EIOCBRETRY == ret) {
 708                /*
 709                 * OK, now that we are done with this iteration
 710                 * and know that there is more left to go,
 711                 * this is where we let go so that a subsequent
 712                 * "kick" can start the next iteration
 713                 */
 714
 715                /* will make __queue_kicked_iocb succeed from here on */
 716                INIT_LIST_HEAD(&iocb->ki_run_list);
 717                /* we must queue the next iteration ourselves, if it
 718                 * has already been kicked */
 719                if (kiocbIsKicked(iocb)) {
 720                        __queue_kicked_iocb(iocb);
 721
 722                        /*
 723                         * __queue_kicked_iocb will always return 1 here, because
 724                         * iocb->ki_run_list is empty at this point so it should
 725                         * be safe to unconditionally queue the context into the
 726                         * work queue.
 727                         */
 728                        aio_queue_work(ctx);
 729                }
 730        }
 731        return ret;
 732}
 733
 734/*
 735 * __aio_run_iocbs:
 736 *      Process all pending retries queued on the ioctx
 737 *      run list.
 738 * Assumes it is operating within the aio issuer's mm
 739 * context.
 740 */
 741static int __aio_run_iocbs(struct kioctx *ctx)
 742{
 743        struct kiocb *iocb;
 744        struct list_head run_list;
 745
 746        assert_spin_locked(&ctx->ctx_lock);
 747
 748        list_replace_init(&ctx->run_list, &run_list);
 749        while (!list_empty(&run_list)) {
 750                iocb = list_entry(run_list.next, struct kiocb,
 751                        ki_run_list);
 752                list_del(&iocb->ki_run_list);
 753                /*
 754                 * Hold an extra reference while retrying i/o.
 755                 */
 756                iocb->ki_users++;       /* grab extra reference */
 757                aio_run_iocb(iocb);
 758                __aio_put_req(ctx, iocb);
 759        }
 760        if (!list_empty(&ctx->run_list))
 761                return 1;
 762        return 0;
 763}
 764
 765static void aio_queue_work(struct kioctx * ctx)
 766{
 767        unsigned long timeout;
 768        /*
 769         * if someone is waiting, get the work started right
 770         * away, otherwise, use a longer delay
 771         */
 772        smp_mb();
 773        if (waitqueue_active(&ctx->wait))
 774                timeout = 1;
 775        else
 776                timeout = HZ/10;
 777        queue_delayed_work(aio_wq, &ctx->wq, timeout);
 778}
 779
 780
 781/*
 782 * aio_run_iocbs:
 783 *      Process all pending retries queued on the ioctx
 784 *      run list.
 785 * Assumes it is operating within the aio issuer's mm
 786 * context.
 787 */
 788static inline void aio_run_iocbs(struct kioctx *ctx)
 789{
 790        int requeue;
 791
 792        spin_lock_irq(&ctx->ctx_lock);
 793
 794        requeue = __aio_run_iocbs(ctx);
 795        spin_unlock_irq(&ctx->ctx_lock);
 796        if (requeue)
 797                aio_queue_work(ctx);
 798}
 799
 800/*
 801 * just like aio_run_iocbs, but keeps running them until
 802 * the list stays empty
 803 */
 804static inline void aio_run_all_iocbs(struct kioctx *ctx)
 805{
 806        spin_lock_irq(&ctx->ctx_lock);
 807        while (__aio_run_iocbs(ctx))
 808                ;
 809        spin_unlock_irq(&ctx->ctx_lock);
 810}
 811
 812/*
 813 * aio_kick_handler:
 814 *      Work queue handler triggered to process pending
 815 *      retries on an ioctx. Takes on the aio issuer's
 816 *      mm context before running the iocbs, so that
 817 *      copy_xxx_user operates on the issuer's address
 818 *      space.
 819 * Run on aiod's context.
 820 */
 821static void aio_kick_handler(struct work_struct *work)
 822{
 823        struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
 824        mm_segment_t oldfs = get_fs();
 825        struct mm_struct *mm;
 826        int requeue;
 827
 828        set_fs(USER_DS);
 829        use_mm(ctx->mm);
 830        spin_lock_irq(&ctx->ctx_lock);
 831        requeue =__aio_run_iocbs(ctx);
 832        mm = ctx->mm;
 833        spin_unlock_irq(&ctx->ctx_lock);
 834        unuse_mm(mm);
 835        set_fs(oldfs);
 836        /*
 837         * we're in a worker thread already, don't use queue_delayed_work,
 838         */
 839        if (requeue)
 840                queue_delayed_work(aio_wq, &ctx->wq, 0);
 841}
 842
 843
 844/*
 845 * Called by kick_iocb to queue the kiocb for retry
 846 * and if required activate the aio work queue to process
 847 * it
 848 */
 849static void try_queue_kicked_iocb(struct kiocb *iocb)
 850{
 851        struct kioctx   *ctx = iocb->ki_ctx;
 852        unsigned long flags;
 853        int run = 0;
 854
 855        /* We're supposed to be the only path putting the iocb back on the run
 856         * list.  If we find that the iocb is *back* on a wait queue already
 857         * than retry has happened before we could queue the iocb.  This also
 858         * means that the retry could have completed and freed our iocb, no
 859         * good. */
 860        BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
 861
 862        spin_lock_irqsave(&ctx->ctx_lock, flags);
 863        /* set this inside the lock so that we can't race with aio_run_iocb()
 864         * testing it and putting the iocb on the run list under the lock */
 865        if (!kiocbTryKick(iocb))
 866                run = __queue_kicked_iocb(iocb);
 867        spin_unlock_irqrestore(&ctx->ctx_lock, flags);
 868        if (run)
 869                aio_queue_work(ctx);
 870}
 871
 872/*
 873 * kick_iocb:
 874 *      Called typically from a wait queue callback context
 875 *      (aio_wake_function) to trigger a retry of the iocb.
 876 *      The retry is usually executed by aio workqueue
 877 *      threads (See aio_kick_handler).
 878 */
 879void kick_iocb(struct kiocb *iocb)
 880{
 881        /* sync iocbs are easy: they can only ever be executing from a 
 882         * single context. */
 883        if (is_sync_kiocb(iocb)) {
 884                kiocbSetKicked(iocb);
 885                wake_up_process(iocb->ki_obj.tsk);
 886                return;
 887        }
 888
 889        try_queue_kicked_iocb(iocb);
 890}
 891EXPORT_SYMBOL(kick_iocb);
 892
 893/* aio_complete
 894 *      Called when the io request on the given iocb is complete.
 895 *      Returns true if this is the last user of the request.  The 
 896 *      only other user of the request can be the cancellation code.
 897 */
 898int aio_complete(struct kiocb *iocb, long res, long res2)
 899{
 900        struct kioctx   *ctx = iocb->ki_ctx;
 901        struct aio_ring_info    *info;
 902        struct aio_ring *ring;
 903        struct io_event *event;
 904        unsigned long   flags;
 905        unsigned long   tail;
 906        int             ret;
 907
 908        /*
 909         * Special case handling for sync iocbs:
 910         *  - events go directly into the iocb for fast handling
 911         *  - the sync task with the iocb in its stack holds the single iocb
 912         *    ref, no other paths have a way to get another ref
 913         *  - the sync task helpfully left a reference to itself in the iocb
 914         */
 915        if (is_sync_kiocb(iocb)) {
 916                BUG_ON(iocb->ki_users != 1);
 917                iocb->ki_user_data = res;
 918                iocb->ki_users = 0;
 919                wake_up_process(iocb->ki_obj.tsk);
 920                return 1;
 921        }
 922
 923        info = &ctx->ring_info;
 924
 925        /* add a completion event to the ring buffer.
 926         * must be done holding ctx->ctx_lock to prevent
 927         * other code from messing with the tail
 928         * pointer since we might be called from irq
 929         * context.
 930         */
 931        spin_lock_irqsave(&ctx->ctx_lock, flags);
 932
 933        if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
 934                list_del_init(&iocb->ki_run_list);
 935
 936        /*
 937         * cancelled requests don't get events, userland was given one
 938         * when the event got cancelled.
 939         */
 940        if (kiocbIsCancelled(iocb))
 941                goto put_rq;
 942
 943        ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
 944
 945        tail = info->tail;
 946        event = aio_ring_event(info, tail, KM_IRQ0);
 947        if (++tail >= info->nr)
 948                tail = 0;
 949
 950        event->obj = (u64)(unsigned long)iocb->ki_obj.user;
 951        event->data = iocb->ki_user_data;
 952        event->res = res;
 953        event->res2 = res2;
 954
 955        dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
 956                ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
 957                res, res2);
 958
 959        /* after flagging the request as done, we
 960         * must never even look at it again
 961         */
 962        smp_wmb();      /* make event visible before updating tail */
 963
 964        info->tail = tail;
 965        ring->tail = tail;
 966
 967        put_aio_ring_event(event, KM_IRQ0);
 968        kunmap_atomic(ring, KM_IRQ1);
 969
 970        pr_debug("added to ring %p at [%lu]\n", iocb, tail);
 971
 972        /*
 973         * Check if the user asked us to deliver the result through an
 974         * eventfd. The eventfd_signal() function is safe to be called
 975         * from IRQ context.
 976         */
 977        if (iocb->ki_eventfd != NULL)
 978                eventfd_signal(iocb->ki_eventfd, 1);
 979
 980put_rq:
 981        /* everything turned out well, dispose of the aiocb. */
 982        ret = __aio_put_req(ctx, iocb);
 983
 984        /*
 985         * We have to order our ring_info tail store above and test
 986         * of the wait list below outside the wait lock.  This is
 987         * like in wake_up_bit() where clearing a bit has to be
 988         * ordered with the unlocked test.
 989         */
 990        smp_mb();
 991
 992        if (waitqueue_active(&ctx->wait))
 993                wake_up(&ctx->wait);
 994
 995        spin_unlock_irqrestore(&ctx->ctx_lock, flags);
 996        return ret;
 997}
 998EXPORT_SYMBOL(aio_complete);
 999
1000/* aio_read_evt
1001 *      Pull an event off of the ioctx's event ring.  Returns the number of 
1002 *      events fetched (0 or 1 ;-)
1003 *      FIXME: make this use cmpxchg.
1004 *      TODO: make the ringbuffer user mmap()able (requires FIXME).
1005 */
1006static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1007{
1008        struct aio_ring_info *info = &ioctx->ring_info;
1009        struct aio_ring *ring;
1010        unsigned long head;
1011        int ret = 0;
1012
1013        ring = kmap_atomic(info->ring_pages[0], KM_USER0);
1014        dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1015                 (unsigned long)ring->head, (unsigned long)ring->tail,
1016                 (unsigned long)ring->nr);
1017
1018        if (ring->head == ring->tail)
1019                goto out;
1020
1021        spin_lock(&info->ring_lock);
1022
1023        head = ring->head % info->nr;
1024        if (head != ring->tail) {
1025                struct io_event *evp = aio_ring_event(info, head, KM_USER1);
1026                *ent = *evp;
1027                head = (head + 1) % info->nr;
1028                smp_mb(); /* finish reading the event before updatng the head */
1029                ring->head = head;
1030                ret = 1;
1031                put_aio_ring_event(evp, KM_USER1);
1032        }
1033        spin_unlock(&info->ring_lock);
1034
1035out:
1036        kunmap_atomic(ring, KM_USER0);
1037        dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
1038                 (unsigned long)ring->head, (unsigned long)ring->tail);
1039        return ret;
1040}
1041
1042struct aio_timeout {
1043        struct timer_list       timer;
1044        int                     timed_out;
1045        struct task_struct      *p;
1046};
1047
1048static void timeout_func(unsigned long data)
1049{
1050        struct aio_timeout *to = (struct aio_timeout *)data;
1051
1052        to->timed_out = 1;
1053        wake_up_process(to->p);
1054}
1055
1056static inline void init_timeout(struct aio_timeout *to)
1057{
1058        setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
1059        to->timed_out = 0;
1060        to->p = current;
1061}
1062
1063static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
1064                               const struct timespec *ts)
1065{
1066        to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
1067        if (time_after(to->timer.expires, jiffies))
1068                add_timer(&to->timer);
1069        else
1070                to->timed_out = 1;
1071}
1072
1073static inline void clear_timeout(struct aio_timeout *to)
1074{
1075        del_singleshot_timer_sync(&to->timer);
1076}
1077
1078static int read_events(struct kioctx *ctx,
1079                        long min_nr, long nr,
1080                        struct io_event __user *event,
1081                        struct timespec __user *timeout)
1082{
1083        long                    start_jiffies = jiffies;
1084        struct task_struct      *tsk = current;
1085        DECLARE_WAITQUEUE(wait, tsk);
1086        int                     ret;
1087        int                     i = 0;
1088        struct io_event         ent;
1089        struct aio_timeout      to;
1090        int                     retry = 0;
1091
1092        /* needed to zero any padding within an entry (there shouldn't be 
1093         * any, but C is fun!
1094         */
1095        memset(&ent, 0, sizeof(ent));
1096retry:
1097        ret = 0;
1098        while (likely(i < nr)) {
1099                ret = aio_read_evt(ctx, &ent);
1100                if (unlikely(ret <= 0))
1101                        break;
1102
1103                dprintk("read event: %Lx %Lx %Lx %Lx\n",
1104                        ent.data, ent.obj, ent.res, ent.res2);
1105
1106                /* Could we split the check in two? */
1107                ret = -EFAULT;
1108                if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1109                        dprintk("aio: lost an event due to EFAULT.\n");
1110                        break;
1111                }
1112                ret = 0;
1113
1114                /* Good, event copied to userland, update counts. */
1115                event ++;
1116                i ++;
1117        }
1118
1119        if (min_nr <= i)
1120                return i;
1121        if (ret)
1122                return ret;
1123
1124        /* End fast path */
1125
1126        /* racey check, but it gets redone */
1127        if (!retry && unlikely(!list_empty(&ctx->run_list))) {
1128                retry = 1;
1129                aio_run_all_iocbs(ctx);
1130                goto retry;
1131        }
1132
1133        init_timeout(&to);
1134        if (timeout) {
1135                struct timespec ts;
1136                ret = -EFAULT;
1137                if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1138                        goto out;
1139
1140                set_timeout(start_jiffies, &to, &ts);
1141        }
1142
1143        while (likely(i < nr)) {
1144                add_wait_queue_exclusive(&ctx->wait, &wait);
1145                do {
1146                        set_task_state(tsk, TASK_INTERRUPTIBLE);
1147                        ret = aio_read_evt(ctx, &ent);
1148                        if (ret)
1149                                break;
1150                        if (min_nr <= i)
1151                                break;
1152                        if (unlikely(ctx->dead)) {
1153                                ret = -EINVAL;
1154                                break;
1155                        }
1156                        if (to.timed_out)       /* Only check after read evt */
1157                                break;
1158                        /* Try to only show up in io wait if there are ops
1159                         *  in flight */
1160                        if (ctx->reqs_active)
1161                                io_schedule();
1162                        else
1163                                schedule();
1164                        if (signal_pending(tsk)) {
1165                                ret = -EINTR;
1166                                break;
1167                        }
1168                        /*ret = aio_read_evt(ctx, &ent);*/
1169                } while (1) ;
1170
1171                set_task_state(tsk, TASK_RUNNING);
1172                remove_wait_queue(&ctx->wait, &wait);
1173
1174                if (unlikely(ret <= 0))
1175                        break;
1176
1177                ret = -EFAULT;
1178                if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1179                        dprintk("aio: lost an event due to EFAULT.\n");
1180                        break;
1181                }
1182
1183                /* Good, event copied to userland, update counts. */
1184                event ++;
1185                i ++;
1186        }
1187
1188        if (timeout)
1189                clear_timeout(&to);
1190out:
1191        destroy_timer_on_stack(&to.timer);
1192        return i ? i : ret;
1193}
1194
1195/* Take an ioctx and remove it from the list of ioctx's.  Protects 
1196 * against races with itself via ->dead.
1197 */
1198static void io_destroy(struct kioctx *ioctx)
1199{
1200        struct mm_struct *mm = current->mm;
1201        int was_dead;
1202
1203        /* delete the entry from the list is someone else hasn't already */
1204        spin_lock(&mm->ioctx_lock);
1205        was_dead = ioctx->dead;
1206        ioctx->dead = 1;
1207        hlist_del_rcu(&ioctx->list);
1208        spin_unlock(&mm->ioctx_lock);
1209
1210        dprintk("aio_release(%p)\n", ioctx);
1211        if (likely(!was_dead))
1212                put_ioctx(ioctx);       /* twice for the list */
1213
1214        aio_cancel_all(ioctx);
1215        wait_for_all_aios(ioctx);
1216
1217        /*
1218         * Wake up any waiters.  The setting of ctx->dead must be seen
1219         * by other CPUs at this point.  Right now, we rely on the
1220         * locking done by the above calls to ensure this consistency.
1221         */
1222        wake_up(&ioctx->wait);
1223        put_ioctx(ioctx);       /* once for the lookup */
1224}
1225
1226/* sys_io_setup:
1227 *      Create an aio_context capable of receiving at least nr_events.
1228 *      ctxp must not point to an aio_context that already exists, and
1229 *      must be initialized to 0 prior to the call.  On successful
1230 *      creation of the aio_context, *ctxp is filled in with the resulting 
1231 *      handle.  May fail with -EINVAL if *ctxp is not initialized,
1232 *      if the specified nr_events exceeds internal limits.  May fail 
1233 *      with -EAGAIN if the specified nr_events exceeds the user's limit 
1234 *      of available events.  May fail with -ENOMEM if insufficient kernel
1235 *      resources are available.  May fail with -EFAULT if an invalid
1236 *      pointer is passed for ctxp.  Will fail with -ENOSYS if not
1237 *      implemented.
1238 */
1239SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1240{
1241        struct kioctx *ioctx = NULL;
1242        unsigned long ctx;
1243        long ret;
1244
1245        ret = get_user(ctx, ctxp);
1246        if (unlikely(ret))
1247                goto out;
1248
1249        ret = -EINVAL;
1250        if (unlikely(ctx || nr_events == 0)) {
1251                pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1252                         ctx, nr_events);
1253                goto out;
1254        }
1255
1256        ioctx = ioctx_alloc(nr_events);
1257        ret = PTR_ERR(ioctx);
1258        if (!IS_ERR(ioctx)) {
1259                ret = put_user(ioctx->user_id, ctxp);
1260                if (!ret)
1261                        return 0;
1262
1263                get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
1264                io_destroy(ioctx);
1265        }
1266
1267out:
1268        return ret;
1269}
1270
1271/* sys_io_destroy:
1272 *      Destroy the aio_context specified.  May cancel any outstanding 
1273 *      AIOs and block on completion.  Will fail with -ENOSYS if not
1274 *      implemented.  May fail with -EFAULT if the context pointed to
1275 *      is invalid.
1276 */
1277SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1278{
1279        struct kioctx *ioctx = lookup_ioctx(ctx);
1280        if (likely(NULL != ioctx)) {
1281                io_destroy(ioctx);
1282                return 0;
1283        }
1284        pr_debug("EINVAL: io_destroy: invalid context id\n");
1285        return -EINVAL;
1286}
1287
1288static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
1289{
1290        struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
1291
1292        BUG_ON(ret <= 0);
1293
1294        while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
1295                ssize_t this = min((ssize_t)iov->iov_len, ret);
1296                iov->iov_base += this;
1297                iov->iov_len -= this;
1298                iocb->ki_left -= this;
1299                ret -= this;
1300                if (iov->iov_len == 0) {
1301                        iocb->ki_cur_seg++;
1302                        iov++;
1303                }
1304        }
1305
1306        /* the caller should not have done more io than what fit in
1307         * the remaining iovecs */
1308        BUG_ON(ret > 0 && iocb->ki_left == 0);
1309}
1310
1311static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1312{
1313        struct file *file = iocb->ki_filp;
1314        struct address_space *mapping = file->f_mapping;
1315        struct inode *inode = mapping->host;
1316        ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1317                         unsigned long, loff_t);
1318        ssize_t ret = 0;
1319        unsigned short opcode;
1320
1321        if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1322                (iocb->ki_opcode == IOCB_CMD_PREAD)) {
1323                rw_op = file->f_op->aio_read;
1324                opcode = IOCB_CMD_PREADV;
1325        } else {
1326                rw_op = file->f_op->aio_write;
1327                opcode = IOCB_CMD_PWRITEV;
1328        }
1329
1330        /* This matches the pread()/pwrite() logic */
1331        if (iocb->ki_pos < 0)
1332                return -EINVAL;
1333
1334        do {
1335                ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1336                            iocb->ki_nr_segs - iocb->ki_cur_seg,
1337                            iocb->ki_pos);
1338                if (ret > 0)
1339                        aio_advance_iovec(iocb, ret);
1340
1341        /* retry all partial writes.  retry partial reads as long as its a
1342         * regular file. */
1343        } while (ret > 0 && iocb->ki_left > 0 &&
1344                 (opcode == IOCB_CMD_PWRITEV ||
1345                  (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
1346
1347        /* This means we must have transferred all that we could */
1348        /* No need to retry anymore */
1349        if ((ret == 0) || (iocb->ki_left == 0))
1350                ret = iocb->ki_nbytes - iocb->ki_left;
1351
1352        /* If we managed to write some out we return that, rather than
1353         * the eventual error. */
1354        if (opcode == IOCB_CMD_PWRITEV
1355            && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
1356            && iocb->ki_nbytes - iocb->ki_left)
1357                ret = iocb->ki_nbytes - iocb->ki_left;
1358
1359        return ret;
1360}
1361
1362static ssize_t aio_fdsync(struct kiocb *iocb)
1363{
1364        struct file *file = iocb->ki_filp;
1365        ssize_t ret = -EINVAL;
1366
1367        if (file->f_op->aio_fsync)
1368                ret = file->f_op->aio_fsync(iocb, 1);
1369        return ret;
1370}
1371
1372static ssize_t aio_fsync(struct kiocb *iocb)
1373{
1374        struct file *file = iocb->ki_filp;
1375        ssize_t ret = -EINVAL;
1376
1377        if (file->f_op->aio_fsync)
1378                ret = file->f_op->aio_fsync(iocb, 0);
1379        return ret;
1380}
1381
1382static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
1383{
1384        ssize_t ret;
1385
1386        ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
1387                                    kiocb->ki_nbytes, 1,
1388                                    &kiocb->ki_inline_vec, &kiocb->ki_iovec);
1389        if (ret < 0)
1390                goto out;
1391
1392        kiocb->ki_nr_segs = kiocb->ki_nbytes;
1393        kiocb->ki_cur_seg = 0;
1394        /* ki_nbytes/left now reflect bytes instead of segs */
1395        kiocb->ki_nbytes = ret;
1396        kiocb->ki_left = ret;
1397
1398        ret = 0;
1399out:
1400        return ret;
1401}
1402
1403static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
1404{
1405        kiocb->ki_iovec = &kiocb->ki_inline_vec;
1406        kiocb->ki_iovec->iov_base = kiocb->ki_buf;
1407        kiocb->ki_iovec->iov_len = kiocb->ki_left;
1408        kiocb->ki_nr_segs = 1;
1409        kiocb->ki_cur_seg = 0;
1410        return 0;
1411}
1412
1413/*
1414 * aio_setup_iocb:
1415 *      Performs the initial checks and aio retry method
1416 *      setup for the kiocb at the time of io submission.
1417 */
1418static ssize_t aio_setup_iocb(struct kiocb *kiocb)
1419{
1420        struct file *file = kiocb->ki_filp;
1421        ssize_t ret = 0;
1422
1423        switch (kiocb->ki_opcode) {
1424        case IOCB_CMD_PREAD:
1425                ret = -EBADF;
1426                if (unlikely(!(file->f_mode & FMODE_READ)))
1427                        break;
1428                ret = -EFAULT;
1429                if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1430                        kiocb->ki_left)))
1431                        break;
1432                ret = security_file_permission(file, MAY_READ);
1433                if (unlikely(ret))
1434                        break;
1435                ret = aio_setup_single_vector(kiocb);
1436                if (ret)
1437                        break;
1438                ret = -EINVAL;
1439                if (file->f_op->aio_read)
1440                        kiocb->ki_retry = aio_rw_vect_retry;
1441                break;
1442        case IOCB_CMD_PWRITE:
1443                ret = -EBADF;
1444                if (unlikely(!(file->f_mode & FMODE_WRITE)))
1445                        break;
1446                ret = -EFAULT;
1447                if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1448                        kiocb->ki_left)))
1449                        break;
1450                ret = security_file_permission(file, MAY_WRITE);
1451                if (unlikely(ret))
1452                        break;
1453                ret = aio_setup_single_vector(kiocb);
1454                if (ret)
1455                        break;
1456                ret = -EINVAL;
1457                if (file->f_op->aio_write)
1458                        kiocb->ki_retry = aio_rw_vect_retry;
1459                break;
1460        case IOCB_CMD_PREADV:
1461                ret = -EBADF;
1462                if (unlikely(!(file->f_mode & FMODE_READ)))
1463                        break;
1464                ret = security_file_permission(file, MAY_READ);
1465                if (unlikely(ret))
1466                        break;
1467                ret = aio_setup_vectored_rw(READ, kiocb);
1468                if (ret)
1469                        break;
1470                ret = -EINVAL;
1471                if (file->f_op->aio_read)
1472                        kiocb->ki_retry = aio_rw_vect_retry;
1473                break;
1474        case IOCB_CMD_PWRITEV:
1475                ret = -EBADF;
1476                if (unlikely(!(file->f_mode & FMODE_WRITE)))
1477                        break;
1478                ret = security_file_permission(file, MAY_WRITE);
1479                if (unlikely(ret))
1480                        break;
1481                ret = aio_setup_vectored_rw(WRITE, kiocb);
1482                if (ret)
1483                        break;
1484                ret = -EINVAL;
1485                if (file->f_op->aio_write)
1486                        kiocb->ki_retry = aio_rw_vect_retry;
1487                break;
1488        case IOCB_CMD_FDSYNC:
1489                ret = -EINVAL;
1490                if (file->f_op->aio_fsync)
1491                        kiocb->ki_retry = aio_fdsync;
1492                break;
1493        case IOCB_CMD_FSYNC:
1494                ret = -EINVAL;
1495                if (file->f_op->aio_fsync)
1496                        kiocb->ki_retry = aio_fsync;
1497                break;
1498        default:
1499                dprintk("EINVAL: io_submit: no operation provided\n");
1500                ret = -EINVAL;
1501        }
1502
1503        if (!kiocb->ki_retry)
1504                return ret;
1505
1506        return 0;
1507}
1508
1509/*
1510 * aio_wake_function:
1511 *      wait queue callback function for aio notification,
1512 *      Simply triggers a retry of the operation via kick_iocb.
1513 *
1514 *      This callback is specified in the wait queue entry in
1515 *      a kiocb.
1516 *
1517 * Note:
1518 * This routine is executed with the wait queue lock held.
1519 * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests
1520 * the ioctx lock inside the wait queue lock. This is safe
1521 * because this callback isn't used for wait queues which
1522 * are nested inside ioctx lock (i.e. ctx->wait)
1523 */
1524static int aio_wake_function(wait_queue_t *wait, unsigned mode,
1525                             int sync, void *key)
1526{
1527        struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait);
1528
1529        list_del_init(&wait->task_list);
1530        kick_iocb(iocb);
1531        return 1;
1532}
1533
1534static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1535                         struct iocb *iocb)
1536{
1537        struct kiocb *req;
1538        struct file *file;
1539        ssize_t ret;
1540
1541        /* enforce forwards compatibility on users */
1542        if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1543                pr_debug("EINVAL: io_submit: reserve field set\n");
1544                return -EINVAL;
1545        }
1546
1547        /* prevent overflows */
1548        if (unlikely(
1549            (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1550            (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1551            ((ssize_t)iocb->aio_nbytes < 0)
1552           )) {
1553                pr_debug("EINVAL: io_submit: overflow check\n");
1554                return -EINVAL;
1555        }
1556
1557        file = fget(iocb->aio_fildes);
1558        if (unlikely(!file))
1559                return -EBADF;
1560
1561        req = aio_get_req(ctx);         /* returns with 2 references to req */
1562        if (unlikely(!req)) {
1563                fput(file);
1564                return -EAGAIN;
1565        }
1566        req->ki_filp = file;
1567        if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1568                /*
1569                 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1570                 * instance of the file* now. The file descriptor must be
1571                 * an eventfd() fd, and will be signaled for each completed
1572                 * event using the eventfd_signal() function.
1573                 */
1574                req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1575                if (IS_ERR(req->ki_eventfd)) {
1576                        ret = PTR_ERR(req->ki_eventfd);
1577                        req->ki_eventfd = NULL;
1578                        goto out_put_req;
1579                }
1580        }
1581
1582        ret = put_user(req->ki_key, &user_iocb->aio_key);
1583        if (unlikely(ret)) {
1584                dprintk("EFAULT: aio_key\n");
1585                goto out_put_req;
1586        }
1587
1588        req->ki_obj.user = user_iocb;
1589        req->ki_user_data = iocb->aio_data;
1590        req->ki_pos = iocb->aio_offset;
1591
1592        req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1593        req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1594        req->ki_opcode = iocb->aio_lio_opcode;
1595        init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
1596        INIT_LIST_HEAD(&req->ki_wait.task_list);
1597
1598        ret = aio_setup_iocb(req);
1599
1600        if (ret)
1601                goto out_put_req;
1602
1603        spin_lock_irq(&ctx->ctx_lock);
1604        aio_run_iocb(req);
1605        if (!list_empty(&ctx->run_list)) {
1606                /* drain the run list */
1607                while (__aio_run_iocbs(ctx))
1608                        ;
1609        }
1610        spin_unlock_irq(&ctx->ctx_lock);
1611        aio_put_req(req);       /* drop extra ref to req */
1612        return 0;
1613
1614out_put_req:
1615        aio_put_req(req);       /* drop extra ref to req */
1616        aio_put_req(req);       /* drop i/o ref to req */
1617        return ret;
1618}
1619
1620/* sys_io_submit:
1621 *      Queue the nr iocbs pointed to by iocbpp for processing.  Returns
1622 *      the number of iocbs queued.  May return -EINVAL if the aio_context
1623 *      specified by ctx_id is invalid, if nr is < 0, if the iocb at
1624 *      *iocbpp[0] is not properly initialized, if the operation specified
1625 *      is invalid for the file descriptor in the iocb.  May fail with
1626 *      -EFAULT if any of the data structures point to invalid data.  May
1627 *      fail with -EBADF if the file descriptor specified in the first
1628 *      iocb is invalid.  May fail with -EAGAIN if insufficient resources
1629 *      are available to queue any iocbs.  Will return 0 if nr is 0.  Will
1630 *      fail with -ENOSYS if not implemented.
1631 */
1632SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1633                struct iocb __user * __user *, iocbpp)
1634{
1635        struct kioctx *ctx;
1636        long ret = 0;
1637        int i;
1638
1639        if (unlikely(nr < 0))
1640                return -EINVAL;
1641
1642        if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1643                return -EFAULT;
1644
1645        ctx = lookup_ioctx(ctx_id);
1646        if (unlikely(!ctx)) {
1647                pr_debug("EINVAL: io_submit: invalid context id\n");
1648                return -EINVAL;
1649        }
1650
1651        /*
1652         * AKPM: should this return a partial result if some of the IOs were
1653         * successfully submitted?
1654         */
1655        for (i=0; i<nr; i++) {
1656                struct iocb __user *user_iocb;
1657                struct iocb tmp;
1658
1659                if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1660                        ret = -EFAULT;
1661                        break;
1662                }
1663
1664                if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1665                        ret = -EFAULT;
1666                        break;
1667                }
1668
1669                ret = io_submit_one(ctx, user_iocb, &tmp);
1670                if (ret)
1671                        break;
1672        }
1673
1674        put_ioctx(ctx);
1675        return i ? i : ret;
1676}
1677
1678/* lookup_kiocb
1679 *      Finds a given iocb for cancellation.
1680 */
1681static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1682                                  u32 key)
1683{
1684        struct list_head *pos;
1685
1686        assert_spin_locked(&ctx->ctx_lock);
1687
1688        /* TODO: use a hash or array, this sucks. */
1689        list_for_each(pos, &ctx->active_reqs) {
1690                struct kiocb *kiocb = list_kiocb(pos);
1691                if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
1692                        return kiocb;
1693        }
1694        return NULL;
1695}
1696
1697/* sys_io_cancel:
1698 *      Attempts to cancel an iocb previously passed to io_submit.  If
1699 *      the operation is successfully cancelled, the resulting event is
1700 *      copied into the memory pointed to by result without being placed
1701 *      into the completion queue and 0 is returned.  May fail with
1702 *      -EFAULT if any of the data structures pointed to are invalid.
1703 *      May fail with -EINVAL if aio_context specified by ctx_id is
1704 *      invalid.  May fail with -EAGAIN if the iocb specified was not
1705 *      cancelled.  Will fail with -ENOSYS if not implemented.
1706 */
1707SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1708                struct io_event __user *, result)
1709{
1710        int (*cancel)(struct kiocb *iocb, struct io_event *res);
1711        struct kioctx *ctx;
1712        struct kiocb *kiocb;
1713        u32 key;
1714        int ret;
1715
1716        ret = get_user(key, &iocb->aio_key);
1717        if (unlikely(ret))
1718                return -EFAULT;
1719
1720        ctx = lookup_ioctx(ctx_id);
1721        if (unlikely(!ctx))
1722                return -EINVAL;
1723
1724        spin_lock_irq(&ctx->ctx_lock);
1725        ret = -EAGAIN;
1726        kiocb = lookup_kiocb(ctx, iocb, key);
1727        if (kiocb && kiocb->ki_cancel) {
1728                cancel = kiocb->ki_cancel;
1729                kiocb->ki_users ++;
1730                kiocbSetCancelled(kiocb);
1731        } else
1732                cancel = NULL;
1733        spin_unlock_irq(&ctx->ctx_lock);
1734
1735        if (NULL != cancel) {
1736                struct io_event tmp;
1737                pr_debug("calling cancel\n");
1738                memset(&tmp, 0, sizeof(tmp));
1739                tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
1740                tmp.data = kiocb->ki_user_data;
1741                ret = cancel(kiocb, &tmp);
1742                if (!ret) {
1743                        /* Cancellation succeeded -- copy the result
1744                         * into the user's buffer.
1745                         */
1746                        if (copy_to_user(result, &tmp, sizeof(tmp)))
1747                                ret = -EFAULT;
1748                }
1749        } else
1750                ret = -EINVAL;
1751
1752        put_ioctx(ctx);
1753
1754        return ret;
1755}
1756
1757/* io_getevents:
1758 *      Attempts to read at least min_nr events and up to nr events from
1759 *      the completion queue for the aio_context specified by ctx_id.  May
1760 *      fail with -EINVAL if ctx_id is invalid, if min_nr is out of range,
1761 *      if nr is out of range, if when is out of range.  May fail with
1762 *      -EFAULT if any of the memory specified to is invalid.  May return
1763 *      0 or < min_nr if no events are available and the timeout specified
1764 *      by when has elapsed, where when == NULL specifies an infinite
1765 *      timeout.  Note that the timeout pointed to by when is relative and
1766 *      will be updated if not NULL and the operation blocks.  Will fail
1767 *      with -ENOSYS if not implemented.
1768 */
1769SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1770                long, min_nr,
1771                long, nr,
1772                struct io_event __user *, events,
1773                struct timespec __user *, timeout)
1774{
1775        struct kioctx *ioctx = lookup_ioctx(ctx_id);
1776        long ret = -EINVAL;
1777
1778        if (likely(ioctx)) {
1779                if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0))
1780                        ret = read_events(ioctx, min_nr, nr, events, timeout);
1781                put_ioctx(ioctx);
1782        }
1783
1784        asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
1785        return ret;
1786}
1787