linux/fs/aio.c
<<
>>
Prefs
   1/*
   2 *      An async IO implementation for Linux
   3 *      Written by Benjamin LaHaise <bcrl@kvack.org>
   4 *
   5 *      Implements an efficient asynchronous io interface.
   6 *
   7 *      Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
   8 *
   9 *      See ../COPYING for licensing terms.
  10 */
  11#include <linux/kernel.h>
  12#include <linux/init.h>
  13#include <linux/errno.h>
  14#include <linux/time.h>
  15#include <linux/aio_abi.h>
  16#include <linux/module.h>
  17#include <linux/syscalls.h>
  18#include <linux/backing-dev.h>
  19#include <linux/uio.h>
  20
  21#define DEBUG 0
  22
  23#include <linux/sched.h>
  24#include <linux/fs.h>
  25#include <linux/file.h>
  26#include <linux/mm.h>
  27#include <linux/mman.h>
  28#include <linux/mmu_context.h>
  29#include <linux/slab.h>
  30#include <linux/timer.h>
  31#include <linux/aio.h>
  32#include <linux/highmem.h>
  33#include <linux/workqueue.h>
  34#include <linux/security.h>
  35#include <linux/eventfd.h>
  36#include <linux/blkdev.h>
  37#include <linux/compat.h>
  38
  39#include <asm/kmap_types.h>
  40#include <asm/uaccess.h>
  41
  42#if DEBUG > 1
  43#define dprintk         printk
  44#else
  45#define dprintk(x...)   do { ; } while (0)
  46#endif
  47
  48/*------ sysctl variables----*/
  49static DEFINE_SPINLOCK(aio_nr_lock);
  50unsigned long aio_nr;           /* current system wide number of aio requests */
  51unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
  52/*----end sysctl variables---*/
  53
  54static struct kmem_cache        *kiocb_cachep;
  55static struct kmem_cache        *kioctx_cachep;
  56
  57static struct workqueue_struct *aio_wq;
  58
  59/* Used for rare fput completion. */
  60static void aio_fput_routine(struct work_struct *);
  61static DECLARE_WORK(fput_work, aio_fput_routine);
  62
  63static DEFINE_SPINLOCK(fput_lock);
  64static LIST_HEAD(fput_head);
  65
  66static void aio_kick_handler(struct work_struct *);
  67static void aio_queue_work(struct kioctx *);
  68
  69/* aio_setup
  70 *      Creates the slab caches used by the aio routines, panic on
  71 *      failure as this is done early during the boot sequence.
  72 */
  73static int __init aio_setup(void)
  74{
  75        kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  76        kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  77
  78        aio_wq = alloc_workqueue("aio", 0, 1);  /* used to limit concurrency */
  79        BUG_ON(!aio_wq);
  80
  81        pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
  82
  83        return 0;
  84}
  85__initcall(aio_setup);
  86
  87static void aio_free_ring(struct kioctx *ctx)
  88{
  89        struct aio_ring_info *info = &ctx->ring_info;
  90        long i;
  91
  92        for (i=0; i<info->nr_pages; i++)
  93                put_page(info->ring_pages[i]);
  94
  95        if (info->mmap_size) {
  96                down_write(&ctx->mm->mmap_sem);
  97                do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
  98                up_write(&ctx->mm->mmap_sem);
  99        }
 100
 101        if (info->ring_pages && info->ring_pages != info->internal_pages)
 102                kfree(info->ring_pages);
 103        info->ring_pages = NULL;
 104        info->nr = 0;
 105}
 106
 107static int aio_setup_ring(struct kioctx *ctx)
 108{
 109        struct aio_ring *ring;
 110        struct aio_ring_info *info = &ctx->ring_info;
 111        unsigned nr_events = ctx->max_reqs;
 112        unsigned long size;
 113        int nr_pages;
 114
 115        /* Compensate for the ring buffer's head/tail overlap entry */
 116        nr_events += 2; /* 1 is required, 2 for good luck */
 117
 118        size = sizeof(struct aio_ring);
 119        size += sizeof(struct io_event) * nr_events;
 120        nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
 121
 122        if (nr_pages < 0)
 123                return -EINVAL;
 124
 125        nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
 126
 127        info->nr = 0;
 128        info->ring_pages = info->internal_pages;
 129        if (nr_pages > AIO_RING_PAGES) {
 130                info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
 131                if (!info->ring_pages)
 132                        return -ENOMEM;
 133        }
 134
 135        info->mmap_size = nr_pages * PAGE_SIZE;
 136        dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
 137        down_write(&ctx->mm->mmap_sem);
 138        info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 
 139                                  PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
 140                                  0);
 141        if (IS_ERR((void *)info->mmap_base)) {
 142                up_write(&ctx->mm->mmap_sem);
 143                info->mmap_size = 0;
 144                aio_free_ring(ctx);
 145                return -EAGAIN;
 146        }
 147
 148        dprintk("mmap address: 0x%08lx\n", info->mmap_base);
 149        info->nr_pages = get_user_pages(current, ctx->mm,
 150                                        info->mmap_base, nr_pages, 
 151                                        1, 0, info->ring_pages, NULL);
 152        up_write(&ctx->mm->mmap_sem);
 153
 154        if (unlikely(info->nr_pages != nr_pages)) {
 155                aio_free_ring(ctx);
 156                return -EAGAIN;
 157        }
 158
 159        ctx->user_id = info->mmap_base;
 160
 161        info->nr = nr_events;           /* trusted copy */
 162
 163        ring = kmap_atomic(info->ring_pages[0], KM_USER0);
 164        ring->nr = nr_events;   /* user copy */
 165        ring->id = ctx->user_id;
 166        ring->head = ring->tail = 0;
 167        ring->magic = AIO_RING_MAGIC;
 168        ring->compat_features = AIO_RING_COMPAT_FEATURES;
 169        ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
 170        ring->header_length = sizeof(struct aio_ring);
 171        kunmap_atomic(ring, KM_USER0);
 172
 173        return 0;
 174}
 175
 176
 177/* aio_ring_event: returns a pointer to the event at the given index from
 178 * kmap_atomic(, km).  Release the pointer with put_aio_ring_event();
 179 */
 180#define AIO_EVENTS_PER_PAGE     (PAGE_SIZE / sizeof(struct io_event))
 181#define AIO_EVENTS_FIRST_PAGE   ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
 182#define AIO_EVENTS_OFFSET       (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
 183
 184#define aio_ring_event(info, nr, km) ({                                 \
 185        unsigned pos = (nr) + AIO_EVENTS_OFFSET;                        \
 186        struct io_event *__event;                                       \
 187        __event = kmap_atomic(                                          \
 188                        (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
 189        __event += pos % AIO_EVENTS_PER_PAGE;                           \
 190        __event;                                                        \
 191})
 192
 193#define put_aio_ring_event(event, km) do {      \
 194        struct io_event *__event = (event);     \
 195        (void)__event;                          \
 196        kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
 197} while(0)
 198
 199static void ctx_rcu_free(struct rcu_head *head)
 200{
 201        struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
 202        unsigned nr_events = ctx->max_reqs;
 203
 204        kmem_cache_free(kioctx_cachep, ctx);
 205
 206        if (nr_events) {
 207                spin_lock(&aio_nr_lock);
 208                BUG_ON(aio_nr - nr_events > aio_nr);
 209                aio_nr -= nr_events;
 210                spin_unlock(&aio_nr_lock);
 211        }
 212}
 213
 214/* __put_ioctx
 215 *      Called when the last user of an aio context has gone away,
 216 *      and the struct needs to be freed.
 217 */
 218static void __put_ioctx(struct kioctx *ctx)
 219{
 220        BUG_ON(ctx->reqs_active);
 221
 222        cancel_delayed_work(&ctx->wq);
 223        cancel_work_sync(&ctx->wq.work);
 224        aio_free_ring(ctx);
 225        mmdrop(ctx->mm);
 226        ctx->mm = NULL;
 227        pr_debug("__put_ioctx: freeing %p\n", ctx);
 228        call_rcu(&ctx->rcu_head, ctx_rcu_free);
 229}
 230
 231static inline int try_get_ioctx(struct kioctx *kioctx)
 232{
 233        return atomic_inc_not_zero(&kioctx->users);
 234}
 235
 236static inline void put_ioctx(struct kioctx *kioctx)
 237{
 238        BUG_ON(atomic_read(&kioctx->users) <= 0);
 239        if (unlikely(atomic_dec_and_test(&kioctx->users)))
 240                __put_ioctx(kioctx);
 241}
 242
 243/* ioctx_alloc
 244 *      Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
 245 */
 246static struct kioctx *ioctx_alloc(unsigned nr_events)
 247{
 248        struct mm_struct *mm;
 249        struct kioctx *ctx;
 250        int did_sync = 0;
 251
 252        /* Prevent overflows */
 253        if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
 254            (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
 255                pr_debug("ENOMEM: nr_events too high\n");
 256                return ERR_PTR(-EINVAL);
 257        }
 258
 259        if ((unsigned long)nr_events > aio_max_nr)
 260                return ERR_PTR(-EAGAIN);
 261
 262        ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
 263        if (!ctx)
 264                return ERR_PTR(-ENOMEM);
 265
 266        ctx->max_reqs = nr_events;
 267        mm = ctx->mm = current->mm;
 268        atomic_inc(&mm->mm_count);
 269
 270        atomic_set(&ctx->users, 2);
 271        spin_lock_init(&ctx->ctx_lock);
 272        spin_lock_init(&ctx->ring_info.ring_lock);
 273        init_waitqueue_head(&ctx->wait);
 274
 275        INIT_LIST_HEAD(&ctx->active_reqs);
 276        INIT_LIST_HEAD(&ctx->run_list);
 277        INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
 278
 279        if (aio_setup_ring(ctx) < 0)
 280                goto out_freectx;
 281
 282        /* limit the number of system wide aios */
 283        do {
 284                spin_lock_bh(&aio_nr_lock);
 285                if (aio_nr + nr_events > aio_max_nr ||
 286                    aio_nr + nr_events < aio_nr)
 287                        ctx->max_reqs = 0;
 288                else
 289                        aio_nr += ctx->max_reqs;
 290                spin_unlock_bh(&aio_nr_lock);
 291                if (ctx->max_reqs || did_sync)
 292                        break;
 293
 294                /* wait for rcu callbacks to have completed before giving up */
 295                synchronize_rcu();
 296                did_sync = 1;
 297                ctx->max_reqs = nr_events;
 298        } while (1);
 299
 300        if (ctx->max_reqs == 0)
 301                goto out_cleanup;
 302
 303        /* now link into global list. */
 304        spin_lock(&mm->ioctx_lock);
 305        hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
 306        spin_unlock(&mm->ioctx_lock);
 307
 308        dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
 309                ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
 310        return ctx;
 311
 312out_cleanup:
 313        __put_ioctx(ctx);
 314        return ERR_PTR(-EAGAIN);
 315
 316out_freectx:
 317        mmdrop(mm);
 318        kmem_cache_free(kioctx_cachep, ctx);
 319        ctx = ERR_PTR(-ENOMEM);
 320
 321        dprintk("aio: error allocating ioctx %p\n", ctx);
 322        return ctx;
 323}
 324
 325/* aio_cancel_all
 326 *      Cancels all outstanding aio requests on an aio context.  Used 
 327 *      when the processes owning a context have all exited to encourage 
 328 *      the rapid destruction of the kioctx.
 329 */
 330static void aio_cancel_all(struct kioctx *ctx)
 331{
 332        int (*cancel)(struct kiocb *, struct io_event *);
 333        struct io_event res;
 334        spin_lock_irq(&ctx->ctx_lock);
 335        ctx->dead = 1;
 336        while (!list_empty(&ctx->active_reqs)) {
 337                struct list_head *pos = ctx->active_reqs.next;
 338                struct kiocb *iocb = list_kiocb(pos);
 339                list_del_init(&iocb->ki_list);
 340                cancel = iocb->ki_cancel;
 341                kiocbSetCancelled(iocb);
 342                if (cancel) {
 343                        iocb->ki_users++;
 344                        spin_unlock_irq(&ctx->ctx_lock);
 345                        cancel(iocb, &res);
 346                        spin_lock_irq(&ctx->ctx_lock);
 347                }
 348        }
 349        spin_unlock_irq(&ctx->ctx_lock);
 350}
 351
 352static void wait_for_all_aios(struct kioctx *ctx)
 353{
 354        struct task_struct *tsk = current;
 355        DECLARE_WAITQUEUE(wait, tsk);
 356
 357        spin_lock_irq(&ctx->ctx_lock);
 358        if (!ctx->reqs_active)
 359                goto out;
 360
 361        add_wait_queue(&ctx->wait, &wait);
 362        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 363        while (ctx->reqs_active) {
 364                spin_unlock_irq(&ctx->ctx_lock);
 365                io_schedule();
 366                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 367                spin_lock_irq(&ctx->ctx_lock);
 368        }
 369        __set_task_state(tsk, TASK_RUNNING);
 370        remove_wait_queue(&ctx->wait, &wait);
 371
 372out:
 373        spin_unlock_irq(&ctx->ctx_lock);
 374}
 375
 376/* wait_on_sync_kiocb:
 377 *      Waits on the given sync kiocb to complete.
 378 */
 379ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
 380{
 381        while (iocb->ki_users) {
 382                set_current_state(TASK_UNINTERRUPTIBLE);
 383                if (!iocb->ki_users)
 384                        break;
 385                io_schedule();
 386        }
 387        __set_current_state(TASK_RUNNING);
 388        return iocb->ki_user_data;
 389}
 390EXPORT_SYMBOL(wait_on_sync_kiocb);
 391
 392/* exit_aio: called when the last user of mm goes away.  At this point, 
 393 * there is no way for any new requests to be submited or any of the 
 394 * io_* syscalls to be called on the context.  However, there may be 
 395 * outstanding requests which hold references to the context; as they 
 396 * go away, they will call put_ioctx and release any pinned memory
 397 * associated with the request (held via struct page * references).
 398 */
 399void exit_aio(struct mm_struct *mm)
 400{
 401        struct kioctx *ctx;
 402
 403        while (!hlist_empty(&mm->ioctx_list)) {
 404                ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
 405                hlist_del_rcu(&ctx->list);
 406
 407                aio_cancel_all(ctx);
 408
 409                wait_for_all_aios(ctx);
 410                /*
 411                 * Ensure we don't leave the ctx on the aio_wq
 412                 */
 413                cancel_work_sync(&ctx->wq.work);
 414
 415                if (1 != atomic_read(&ctx->users))
 416                        printk(KERN_DEBUG
 417                                "exit_aio:ioctx still alive: %d %d %d\n",
 418                                atomic_read(&ctx->users), ctx->dead,
 419                                ctx->reqs_active);
 420                put_ioctx(ctx);
 421        }
 422}
 423
 424/* aio_get_req
 425 *      Allocate a slot for an aio request.  Increments the users count
 426 * of the kioctx so that the kioctx stays around until all requests are
 427 * complete.  Returns NULL if no requests are free.
 428 *
 429 * Returns with kiocb->users set to 2.  The io submit code path holds
 430 * an extra reference while submitting the i/o.
 431 * This prevents races between the aio code path referencing the
 432 * req (after submitting it) and aio_complete() freeing the req.
 433 */
 434static struct kiocb *__aio_get_req(struct kioctx *ctx)
 435{
 436        struct kiocb *req = NULL;
 437
 438        req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
 439        if (unlikely(!req))
 440                return NULL;
 441
 442        req->ki_flags = 0;
 443        req->ki_users = 2;
 444        req->ki_key = 0;
 445        req->ki_ctx = ctx;
 446        req->ki_cancel = NULL;
 447        req->ki_retry = NULL;
 448        req->ki_dtor = NULL;
 449        req->private = NULL;
 450        req->ki_iovec = NULL;
 451        INIT_LIST_HEAD(&req->ki_run_list);
 452        req->ki_eventfd = NULL;
 453
 454        return req;
 455}
 456
 457/*
 458 * struct kiocb's are allocated in batches to reduce the number of
 459 * times the ctx lock is acquired and released.
 460 */
 461#define KIOCB_BATCH_SIZE        32L
 462struct kiocb_batch {
 463        struct list_head head;
 464        long count; /* number of requests left to allocate */
 465};
 466
 467static void kiocb_batch_init(struct kiocb_batch *batch, long total)
 468{
 469        INIT_LIST_HEAD(&batch->head);
 470        batch->count = total;
 471}
 472
 473static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
 474{
 475        struct kiocb *req, *n;
 476
 477        if (list_empty(&batch->head))
 478                return;
 479
 480        spin_lock_irq(&ctx->ctx_lock);
 481        list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
 482                list_del(&req->ki_batch);
 483                list_del(&req->ki_list);
 484                kmem_cache_free(kiocb_cachep, req);
 485                ctx->reqs_active--;
 486        }
 487        if (unlikely(!ctx->reqs_active && ctx->dead))
 488                wake_up_all(&ctx->wait);
 489        spin_unlock_irq(&ctx->ctx_lock);
 490}
 491
 492/*
 493 * Allocate a batch of kiocbs.  This avoids taking and dropping the
 494 * context lock a lot during setup.
 495 */
 496static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
 497{
 498        unsigned short allocated, to_alloc;
 499        long avail;
 500        bool called_fput = false;
 501        struct kiocb *req, *n;
 502        struct aio_ring *ring;
 503
 504        to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
 505        for (allocated = 0; allocated < to_alloc; allocated++) {
 506                req = __aio_get_req(ctx);
 507                if (!req)
 508                        /* allocation failed, go with what we've got */
 509                        break;
 510                list_add(&req->ki_batch, &batch->head);
 511        }
 512
 513        if (allocated == 0)
 514                goto out;
 515
 516retry:
 517        spin_lock_irq(&ctx->ctx_lock);
 518        ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
 519
 520        avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
 521        BUG_ON(avail < 0);
 522        if (avail == 0 && !called_fput) {
 523                /*
 524                 * Handle a potential starvation case.  It is possible that
 525                 * we hold the last reference on a struct file, causing us
 526                 * to delay the final fput to non-irq context.  In this case,
 527                 * ctx->reqs_active is artificially high.  Calling the fput
 528                 * routine here may free up a slot in the event completion
 529                 * ring, allowing this allocation to succeed.
 530                 */
 531                kunmap_atomic(ring);
 532                spin_unlock_irq(&ctx->ctx_lock);
 533                aio_fput_routine(NULL);
 534                called_fput = true;
 535                goto retry;
 536        }
 537
 538        if (avail < allocated) {
 539                /* Trim back the number of requests. */
 540                list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
 541                        list_del(&req->ki_batch);
 542                        kmem_cache_free(kiocb_cachep, req);
 543                        if (--allocated <= avail)
 544                                break;
 545                }
 546        }
 547
 548        batch->count -= allocated;
 549        list_for_each_entry(req, &batch->head, ki_batch) {
 550                list_add(&req->ki_list, &ctx->active_reqs);
 551                ctx->reqs_active++;
 552        }
 553
 554        kunmap_atomic(ring);
 555        spin_unlock_irq(&ctx->ctx_lock);
 556
 557out:
 558        return allocated;
 559}
 560
 561static inline struct kiocb *aio_get_req(struct kioctx *ctx,
 562                                        struct kiocb_batch *batch)
 563{
 564        struct kiocb *req;
 565
 566        if (list_empty(&batch->head))
 567                if (kiocb_batch_refill(ctx, batch) == 0)
 568                        return NULL;
 569        req = list_first_entry(&batch->head, struct kiocb, ki_batch);
 570        list_del(&req->ki_batch);
 571        return req;
 572}
 573
 574static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
 575{
 576        assert_spin_locked(&ctx->ctx_lock);
 577
 578        if (req->ki_eventfd != NULL)
 579                eventfd_ctx_put(req->ki_eventfd);
 580        if (req->ki_dtor)
 581                req->ki_dtor(req);
 582        if (req->ki_iovec != &req->ki_inline_vec)
 583                kfree(req->ki_iovec);
 584        kmem_cache_free(kiocb_cachep, req);
 585        ctx->reqs_active--;
 586
 587        if (unlikely(!ctx->reqs_active && ctx->dead))
 588                wake_up_all(&ctx->wait);
 589}
 590
 591static void aio_fput_routine(struct work_struct *data)
 592{
 593        spin_lock_irq(&fput_lock);
 594        while (likely(!list_empty(&fput_head))) {
 595                struct kiocb *req = list_kiocb(fput_head.next);
 596                struct kioctx *ctx = req->ki_ctx;
 597
 598                list_del(&req->ki_list);
 599                spin_unlock_irq(&fput_lock);
 600
 601                /* Complete the fput(s) */
 602                if (req->ki_filp != NULL)
 603                        fput(req->ki_filp);
 604
 605                /* Link the iocb into the context's free list */
 606                rcu_read_lock();
 607                spin_lock_irq(&ctx->ctx_lock);
 608                really_put_req(ctx, req);
 609                /*
 610                 * at that point ctx might've been killed, but actual
 611                 * freeing is RCU'd
 612                 */
 613                spin_unlock_irq(&ctx->ctx_lock);
 614                rcu_read_unlock();
 615
 616                spin_lock_irq(&fput_lock);
 617        }
 618        spin_unlock_irq(&fput_lock);
 619}
 620
 621/* __aio_put_req
 622 *      Returns true if this put was the last user of the request.
 623 */
 624static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
 625{
 626        dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
 627                req, atomic_long_read(&req->ki_filp->f_count));
 628
 629        assert_spin_locked(&ctx->ctx_lock);
 630
 631        req->ki_users--;
 632        BUG_ON(req->ki_users < 0);
 633        if (likely(req->ki_users))
 634                return 0;
 635        list_del(&req->ki_list);                /* remove from active_reqs */
 636        req->ki_cancel = NULL;
 637        req->ki_retry = NULL;
 638
 639        /*
 640         * Try to optimize the aio and eventfd file* puts, by avoiding to
 641         * schedule work in case it is not final fput() time. In normal cases,
 642         * we would not be holding the last reference to the file*, so
 643         * this function will be executed w/out any aio kthread wakeup.
 644         */
 645        if (unlikely(!fput_atomic(req->ki_filp))) {
 646                spin_lock(&fput_lock);
 647                list_add(&req->ki_list, &fput_head);
 648                spin_unlock(&fput_lock);
 649                schedule_work(&fput_work);
 650        } else {
 651                req->ki_filp = NULL;
 652                really_put_req(ctx, req);
 653        }
 654        return 1;
 655}
 656
 657/* aio_put_req
 658 *      Returns true if this put was the last user of the kiocb,
 659 *      false if the request is still in use.
 660 */
 661int aio_put_req(struct kiocb *req)
 662{
 663        struct kioctx *ctx = req->ki_ctx;
 664        int ret;
 665        spin_lock_irq(&ctx->ctx_lock);
 666        ret = __aio_put_req(ctx, req);
 667        spin_unlock_irq(&ctx->ctx_lock);
 668        return ret;
 669}
 670EXPORT_SYMBOL(aio_put_req);
 671
 672static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 673{
 674        struct mm_struct *mm = current->mm;
 675        struct kioctx *ctx, *ret = NULL;
 676        struct hlist_node *n;
 677
 678        rcu_read_lock();
 679
 680        hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
 681                /*
 682                 * RCU protects us against accessing freed memory but
 683                 * we have to be careful not to get a reference when the
 684                 * reference count already dropped to 0 (ctx->dead test
 685                 * is unreliable because of races).
 686                 */
 687                if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
 688                        ret = ctx;
 689                        break;
 690                }
 691        }
 692
 693        rcu_read_unlock();
 694        return ret;
 695}
 696
 697/*
 698 * Queue up a kiocb to be retried. Assumes that the kiocb
 699 * has already been marked as kicked, and places it on
 700 * the retry run list for the corresponding ioctx, if it
 701 * isn't already queued. Returns 1 if it actually queued
 702 * the kiocb (to tell the caller to activate the work
 703 * queue to process it), or 0, if it found that it was
 704 * already queued.
 705 */
 706static inline int __queue_kicked_iocb(struct kiocb *iocb)
 707{
 708        struct kioctx *ctx = iocb->ki_ctx;
 709
 710        assert_spin_locked(&ctx->ctx_lock);
 711
 712        if (list_empty(&iocb->ki_run_list)) {
 713                list_add_tail(&iocb->ki_run_list,
 714                        &ctx->run_list);
 715                return 1;
 716        }
 717        return 0;
 718}
 719
 720/* aio_run_iocb
 721 *      This is the core aio execution routine. It is
 722 *      invoked both for initial i/o submission and
 723 *      subsequent retries via the aio_kick_handler.
 724 *      Expects to be invoked with iocb->ki_ctx->lock
 725 *      already held. The lock is released and reacquired
 726 *      as needed during processing.
 727 *
 728 * Calls the iocb retry method (already setup for the
 729 * iocb on initial submission) for operation specific
 730 * handling, but takes care of most of common retry
 731 * execution details for a given iocb. The retry method
 732 * needs to be non-blocking as far as possible, to avoid
 733 * holding up other iocbs waiting to be serviced by the
 734 * retry kernel thread.
 735 *
 736 * The trickier parts in this code have to do with
 737 * ensuring that only one retry instance is in progress
 738 * for a given iocb at any time. Providing that guarantee
 739 * simplifies the coding of individual aio operations as
 740 * it avoids various potential races.
 741 */
 742static ssize_t aio_run_iocb(struct kiocb *iocb)
 743{
 744        struct kioctx   *ctx = iocb->ki_ctx;
 745        ssize_t (*retry)(struct kiocb *);
 746        ssize_t ret;
 747
 748        if (!(retry = iocb->ki_retry)) {
 749                printk("aio_run_iocb: iocb->ki_retry = NULL\n");
 750                return 0;
 751        }
 752
 753        /*
 754         * We don't want the next retry iteration for this
 755         * operation to start until this one has returned and
 756         * updated the iocb state. However, wait_queue functions
 757         * can trigger a kick_iocb from interrupt context in the
 758         * meantime, indicating that data is available for the next
 759         * iteration. We want to remember that and enable the
 760         * next retry iteration _after_ we are through with
 761         * this one.
 762         *
 763         * So, in order to be able to register a "kick", but
 764         * prevent it from being queued now, we clear the kick
 765         * flag, but make the kick code *think* that the iocb is
 766         * still on the run list until we are actually done.
 767         * When we are done with this iteration, we check if
 768         * the iocb was kicked in the meantime and if so, queue
 769         * it up afresh.
 770         */
 771
 772        kiocbClearKicked(iocb);
 773
 774        /*
 775         * This is so that aio_complete knows it doesn't need to
 776         * pull the iocb off the run list (We can't just call
 777         * INIT_LIST_HEAD because we don't want a kick_iocb to
 778         * queue this on the run list yet)
 779         */
 780        iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
 781        spin_unlock_irq(&ctx->ctx_lock);
 782
 783        /* Quit retrying if the i/o has been cancelled */
 784        if (kiocbIsCancelled(iocb)) {
 785                ret = -EINTR;
 786                aio_complete(iocb, ret, 0);
 787                /* must not access the iocb after this */
 788                goto out;
 789        }
 790
 791        /*
 792         * Now we are all set to call the retry method in async
 793         * context.
 794         */
 795        ret = retry(iocb);
 796
 797        if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
 798                /*
 799                 * There's no easy way to restart the syscall since other AIO's
 800                 * may be already running. Just fail this IO with EINTR.
 801                 */
 802                if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
 803                             ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
 804                        ret = -EINTR;
 805                aio_complete(iocb, ret, 0);
 806        }
 807out:
 808        spin_lock_irq(&ctx->ctx_lock);
 809
 810        if (-EIOCBRETRY == ret) {
 811                /*
 812                 * OK, now that we are done with this iteration
 813                 * and know that there is more left to go,
 814                 * this is where we let go so that a subsequent
 815                 * "kick" can start the next iteration
 816                 */
 817
 818                /* will make __queue_kicked_iocb succeed from here on */
 819                INIT_LIST_HEAD(&iocb->ki_run_list);
 820                /* we must queue the next iteration ourselves, if it
 821                 * has already been kicked */
 822                if (kiocbIsKicked(iocb)) {
 823                        __queue_kicked_iocb(iocb);
 824
 825                        /*
 826                         * __queue_kicked_iocb will always return 1 here, because
 827                         * iocb->ki_run_list is empty at this point so it should
 828                         * be safe to unconditionally queue the context into the
 829                         * work queue.
 830                         */
 831                        aio_queue_work(ctx);
 832                }
 833        }
 834        return ret;
 835}
 836
 837/*
 838 * __aio_run_iocbs:
 839 *      Process all pending retries queued on the ioctx
 840 *      run list.
 841 * Assumes it is operating within the aio issuer's mm
 842 * context.
 843 */
 844static int __aio_run_iocbs(struct kioctx *ctx)
 845{
 846        struct kiocb *iocb;
 847        struct list_head run_list;
 848
 849        assert_spin_locked(&ctx->ctx_lock);
 850
 851        list_replace_init(&ctx->run_list, &run_list);
 852        while (!list_empty(&run_list)) {
 853                iocb = list_entry(run_list.next, struct kiocb,
 854                        ki_run_list);
 855                list_del(&iocb->ki_run_list);
 856                /*
 857                 * Hold an extra reference while retrying i/o.
 858                 */
 859                iocb->ki_users++;       /* grab extra reference */
 860                aio_run_iocb(iocb);
 861                __aio_put_req(ctx, iocb);
 862        }
 863        if (!list_empty(&ctx->run_list))
 864                return 1;
 865        return 0;
 866}
 867
 868static void aio_queue_work(struct kioctx * ctx)
 869{
 870        unsigned long timeout;
 871        /*
 872         * if someone is waiting, get the work started right
 873         * away, otherwise, use a longer delay
 874         */
 875        smp_mb();
 876        if (waitqueue_active(&ctx->wait))
 877                timeout = 1;
 878        else
 879                timeout = HZ/10;
 880        queue_delayed_work(aio_wq, &ctx->wq, timeout);
 881}
 882
 883/*
 884 * aio_run_all_iocbs:
 885 *      Process all pending retries queued on the ioctx
 886 *      run list, and keep running them until the list
 887 *      stays empty.
 888 * Assumes it is operating within the aio issuer's mm context.
 889 */
 890static inline void aio_run_all_iocbs(struct kioctx *ctx)
 891{
 892        spin_lock_irq(&ctx->ctx_lock);
 893        while (__aio_run_iocbs(ctx))
 894                ;
 895        spin_unlock_irq(&ctx->ctx_lock);
 896}
 897
 898/*
 899 * aio_kick_handler:
 900 *      Work queue handler triggered to process pending
 901 *      retries on an ioctx. Takes on the aio issuer's
 902 *      mm context before running the iocbs, so that
 903 *      copy_xxx_user operates on the issuer's address
 904 *      space.
 905 * Run on aiod's context.
 906 */
 907static void aio_kick_handler(struct work_struct *work)
 908{
 909        struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
 910        mm_segment_t oldfs = get_fs();
 911        struct mm_struct *mm;
 912        int requeue;
 913
 914        set_fs(USER_DS);
 915        use_mm(ctx->mm);
 916        spin_lock_irq(&ctx->ctx_lock);
 917        requeue =__aio_run_iocbs(ctx);
 918        mm = ctx->mm;
 919        spin_unlock_irq(&ctx->ctx_lock);
 920        unuse_mm(mm);
 921        set_fs(oldfs);
 922        /*
 923         * we're in a worker thread already, don't use queue_delayed_work,
 924         */
 925        if (requeue)
 926                queue_delayed_work(aio_wq, &ctx->wq, 0);
 927}
 928
 929
 930/*
 931 * Called by kick_iocb to queue the kiocb for retry
 932 * and if required activate the aio work queue to process
 933 * it
 934 */
 935static void try_queue_kicked_iocb(struct kiocb *iocb)
 936{
 937        struct kioctx   *ctx = iocb->ki_ctx;
 938        unsigned long flags;
 939        int run = 0;
 940
 941        spin_lock_irqsave(&ctx->ctx_lock, flags);
 942        /* set this inside the lock so that we can't race with aio_run_iocb()
 943         * testing it and putting the iocb on the run list under the lock */
 944        if (!kiocbTryKick(iocb))
 945                run = __queue_kicked_iocb(iocb);
 946        spin_unlock_irqrestore(&ctx->ctx_lock, flags);
 947        if (run)
 948                aio_queue_work(ctx);
 949}
 950
 951/*
 952 * kick_iocb:
 953 *      Called typically from a wait queue callback context
 954 *      to trigger a retry of the iocb.
 955 *      The retry is usually executed by aio workqueue
 956 *      threads (See aio_kick_handler).
 957 */
 958void kick_iocb(struct kiocb *iocb)
 959{
 960        /* sync iocbs are easy: they can only ever be executing from a 
 961         * single context. */
 962        if (is_sync_kiocb(iocb)) {
 963                kiocbSetKicked(iocb);
 964                wake_up_process(iocb->ki_obj.tsk);
 965                return;
 966        }
 967
 968        try_queue_kicked_iocb(iocb);
 969}
 970EXPORT_SYMBOL(kick_iocb);
 971
 972/* aio_complete
 973 *      Called when the io request on the given iocb is complete.
 974 *      Returns true if this is the last user of the request.  The 
 975 *      only other user of the request can be the cancellation code.
 976 */
 977int aio_complete(struct kiocb *iocb, long res, long res2)
 978{
 979        struct kioctx   *ctx = iocb->ki_ctx;
 980        struct aio_ring_info    *info;
 981        struct aio_ring *ring;
 982        struct io_event *event;
 983        unsigned long   flags;
 984        unsigned long   tail;
 985        int             ret;
 986
 987        /*
 988         * Special case handling for sync iocbs:
 989         *  - events go directly into the iocb for fast handling
 990         *  - the sync task with the iocb in its stack holds the single iocb
 991         *    ref, no other paths have a way to get another ref
 992         *  - the sync task helpfully left a reference to itself in the iocb
 993         */
 994        if (is_sync_kiocb(iocb)) {
 995                BUG_ON(iocb->ki_users != 1);
 996                iocb->ki_user_data = res;
 997                iocb->ki_users = 0;
 998                wake_up_process(iocb->ki_obj.tsk);
 999                return 1;
1000        }
1001
1002        info = &ctx->ring_info;
1003
1004        /* add a completion event to the ring buffer.
1005         * must be done holding ctx->ctx_lock to prevent
1006         * other code from messing with the tail
1007         * pointer since we might be called from irq
1008         * context.
1009         */
1010        spin_lock_irqsave(&ctx->ctx_lock, flags);
1011
1012        if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
1013                list_del_init(&iocb->ki_run_list);
1014
1015        /*
1016         * cancelled requests don't get events, userland was given one
1017         * when the event got cancelled.
1018         */
1019        if (kiocbIsCancelled(iocb))
1020                goto put_rq;
1021
1022        ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
1023
1024        tail = info->tail;
1025        event = aio_ring_event(info, tail, KM_IRQ0);
1026        if (++tail >= info->nr)
1027                tail = 0;
1028
1029        event->obj = (u64)(unsigned long)iocb->ki_obj.user;
1030        event->data = iocb->ki_user_data;
1031        event->res = res;
1032        event->res2 = res2;
1033
1034        dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
1035                ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
1036                res, res2);
1037
1038        /* after flagging the request as done, we
1039         * must never even look at it again
1040         */
1041        smp_wmb();      /* make event visible before updating tail */
1042
1043        info->tail = tail;
1044        ring->tail = tail;
1045
1046        put_aio_ring_event(event, KM_IRQ0);
1047        kunmap_atomic(ring, KM_IRQ1);
1048
1049        pr_debug("added to ring %p at [%lu]\n", iocb, tail);
1050
1051        /*
1052         * Check if the user asked us to deliver the result through an
1053         * eventfd. The eventfd_signal() function is safe to be called
1054         * from IRQ context.
1055         */
1056        if (iocb->ki_eventfd != NULL)
1057                eventfd_signal(iocb->ki_eventfd, 1);
1058
1059put_rq:
1060        /* everything turned out well, dispose of the aiocb. */
1061        ret = __aio_put_req(ctx, iocb);
1062
1063        /*
1064         * We have to order our ring_info tail store above and test
1065         * of the wait list below outside the wait lock.  This is
1066         * like in wake_up_bit() where clearing a bit has to be
1067         * ordered with the unlocked test.
1068         */
1069        smp_mb();
1070
1071        if (waitqueue_active(&ctx->wait))
1072                wake_up(&ctx->wait);
1073
1074        spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1075        return ret;
1076}
1077EXPORT_SYMBOL(aio_complete);
1078
1079/* aio_read_evt
1080 *      Pull an event off of the ioctx's event ring.  Returns the number of 
1081 *      events fetched (0 or 1 ;-)
1082 *      FIXME: make this use cmpxchg.
1083 *      TODO: make the ringbuffer user mmap()able (requires FIXME).
1084 */
1085static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1086{
1087        struct aio_ring_info *info = &ioctx->ring_info;
1088        struct aio_ring *ring;
1089        unsigned long head;
1090        int ret = 0;
1091
1092        ring = kmap_atomic(info->ring_pages[0], KM_USER0);
1093        dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1094                 (unsigned long)ring->head, (unsigned long)ring->tail,
1095                 (unsigned long)ring->nr);
1096
1097        if (ring->head == ring->tail)
1098                goto out;
1099
1100        spin_lock(&info->ring_lock);
1101
1102        head = ring->head % info->nr;
1103        if (head != ring->tail) {
1104                struct io_event *evp = aio_ring_event(info, head, KM_USER1);
1105                *ent = *evp;
1106                head = (head + 1) % info->nr;
1107                smp_mb(); /* finish reading the event before updatng the head */
1108                ring->head = head;
1109                ret = 1;
1110                put_aio_ring_event(evp, KM_USER1);
1111        }
1112        spin_unlock(&info->ring_lock);
1113
1114out:
1115        kunmap_atomic(ring, KM_USER0);
1116        dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
1117                 (unsigned long)ring->head, (unsigned long)ring->tail);
1118        return ret;
1119}
1120
1121struct aio_timeout {
1122        struct timer_list       timer;
1123        int                     timed_out;
1124        struct task_struct      *p;
1125};
1126
1127static void timeout_func(unsigned long data)
1128{
1129        struct aio_timeout *to = (struct aio_timeout *)data;
1130
1131        to->timed_out = 1;
1132        wake_up_process(to->p);
1133}
1134
1135static inline void init_timeout(struct aio_timeout *to)
1136{
1137        setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
1138        to->timed_out = 0;
1139        to->p = current;
1140}
1141
1142static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
1143                               const struct timespec *ts)
1144{
1145        to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
1146        if (time_after(to->timer.expires, jiffies))
1147                add_timer(&to->timer);
1148        else
1149                to->timed_out = 1;
1150}
1151
1152static inline void clear_timeout(struct aio_timeout *to)
1153{
1154        del_singleshot_timer_sync(&to->timer);
1155}
1156
1157static int read_events(struct kioctx *ctx,
1158                        long min_nr, long nr,
1159                        struct io_event __user *event,
1160                        struct timespec __user *timeout)
1161{
1162        long                    start_jiffies = jiffies;
1163        struct task_struct      *tsk = current;
1164        DECLARE_WAITQUEUE(wait, tsk);
1165        int                     ret;
1166        int                     i = 0;
1167        struct io_event         ent;
1168        struct aio_timeout      to;
1169        int                     retry = 0;
1170
1171        /* needed to zero any padding within an entry (there shouldn't be 
1172         * any, but C is fun!
1173         */
1174        memset(&ent, 0, sizeof(ent));
1175retry:
1176        ret = 0;
1177        while (likely(i < nr)) {
1178                ret = aio_read_evt(ctx, &ent);
1179                if (unlikely(ret <= 0))
1180                        break;
1181
1182                dprintk("read event: %Lx %Lx %Lx %Lx\n",
1183                        ent.data, ent.obj, ent.res, ent.res2);
1184
1185                /* Could we split the check in two? */
1186                ret = -EFAULT;
1187                if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1188                        dprintk("aio: lost an event due to EFAULT.\n");
1189                        break;
1190                }
1191                ret = 0;
1192
1193                /* Good, event copied to userland, update counts. */
1194                event ++;
1195                i ++;
1196        }
1197
1198        if (min_nr <= i)
1199                return i;
1200        if (ret)
1201                return ret;
1202
1203        /* End fast path */
1204
1205        /* racey check, but it gets redone */
1206        if (!retry && unlikely(!list_empty(&ctx->run_list))) {
1207                retry = 1;
1208                aio_run_all_iocbs(ctx);
1209                goto retry;
1210        }
1211
1212        init_timeout(&to);
1213        if (timeout) {
1214                struct timespec ts;
1215                ret = -EFAULT;
1216                if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1217                        goto out;
1218
1219                set_timeout(start_jiffies, &to, &ts);
1220        }
1221
1222        while (likely(i < nr)) {
1223                add_wait_queue_exclusive(&ctx->wait, &wait);
1224                do {
1225                        set_task_state(tsk, TASK_INTERRUPTIBLE);
1226                        ret = aio_read_evt(ctx, &ent);
1227                        if (ret)
1228                                break;
1229                        if (min_nr <= i)
1230                                break;
1231                        if (unlikely(ctx->dead)) {
1232                                ret = -EINVAL;
1233                                break;
1234                        }
1235                        if (to.timed_out)       /* Only check after read evt */
1236                                break;
1237                        /* Try to only show up in io wait if there are ops
1238                         *  in flight */
1239                        if (ctx->reqs_active)
1240                                io_schedule();
1241                        else
1242                                schedule();
1243                        if (signal_pending(tsk)) {
1244                                ret = -EINTR;
1245                                break;
1246                        }
1247                        /*ret = aio_read_evt(ctx, &ent);*/
1248                } while (1) ;
1249
1250                set_task_state(tsk, TASK_RUNNING);
1251                remove_wait_queue(&ctx->wait, &wait);
1252
1253                if (unlikely(ret <= 0))
1254                        break;
1255
1256                ret = -EFAULT;
1257                if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1258                        dprintk("aio: lost an event due to EFAULT.\n");
1259                        break;
1260                }
1261
1262                /* Good, event copied to userland, update counts. */
1263                event ++;
1264                i ++;
1265        }
1266
1267        if (timeout)
1268                clear_timeout(&to);
1269out:
1270        destroy_timer_on_stack(&to.timer);
1271        return i ? i : ret;
1272}
1273
1274/* Take an ioctx and remove it from the list of ioctx's.  Protects 
1275 * against races with itself via ->dead.
1276 */
1277static void io_destroy(struct kioctx *ioctx)
1278{
1279        struct mm_struct *mm = current->mm;
1280        int was_dead;
1281
1282        /* delete the entry from the list is someone else hasn't already */
1283        spin_lock(&mm->ioctx_lock);
1284        was_dead = ioctx->dead;
1285        ioctx->dead = 1;
1286        hlist_del_rcu(&ioctx->list);
1287        spin_unlock(&mm->ioctx_lock);
1288
1289        dprintk("aio_release(%p)\n", ioctx);
1290        if (likely(!was_dead))
1291                put_ioctx(ioctx);       /* twice for the list */
1292
1293        aio_cancel_all(ioctx);
1294        wait_for_all_aios(ioctx);
1295
1296        /*
1297         * Wake up any waiters.  The setting of ctx->dead must be seen
1298         * by other CPUs at this point.  Right now, we rely on the
1299         * locking done by the above calls to ensure this consistency.
1300         */
1301        wake_up_all(&ioctx->wait);
1302        put_ioctx(ioctx);       /* once for the lookup */
1303}
1304
1305/* sys_io_setup:
1306 *      Create an aio_context capable of receiving at least nr_events.
1307 *      ctxp must not point to an aio_context that already exists, and
1308 *      must be initialized to 0 prior to the call.  On successful
1309 *      creation of the aio_context, *ctxp is filled in with the resulting 
1310 *      handle.  May fail with -EINVAL if *ctxp is not initialized,
1311 *      if the specified nr_events exceeds internal limits.  May fail 
1312 *      with -EAGAIN if the specified nr_events exceeds the user's limit 
1313 *      of available events.  May fail with -ENOMEM if insufficient kernel
1314 *      resources are available.  May fail with -EFAULT if an invalid
1315 *      pointer is passed for ctxp.  Will fail with -ENOSYS if not
1316 *      implemented.
1317 */
1318SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1319{
1320        struct kioctx *ioctx = NULL;
1321        unsigned long ctx;
1322        long ret;
1323
1324        ret = get_user(ctx, ctxp);
1325        if (unlikely(ret))
1326                goto out;
1327
1328        ret = -EINVAL;
1329        if (unlikely(ctx || nr_events == 0)) {
1330                pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1331                         ctx, nr_events);
1332                goto out;
1333        }
1334
1335        ioctx = ioctx_alloc(nr_events);
1336        ret = PTR_ERR(ioctx);
1337        if (!IS_ERR(ioctx)) {
1338                ret = put_user(ioctx->user_id, ctxp);
1339                if (!ret) {
1340                        put_ioctx(ioctx);
1341                        return 0;
1342                }
1343                io_destroy(ioctx);
1344        }
1345
1346out:
1347        return ret;
1348}
1349
1350/* sys_io_destroy:
1351 *      Destroy the aio_context specified.  May cancel any outstanding 
1352 *      AIOs and block on completion.  Will fail with -ENOSYS if not
1353 *      implemented.  May fail with -EINVAL if the context pointed to
1354 *      is invalid.
1355 */
1356SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1357{
1358        struct kioctx *ioctx = lookup_ioctx(ctx);
1359        if (likely(NULL != ioctx)) {
1360                io_destroy(ioctx);
1361                return 0;
1362        }
1363        pr_debug("EINVAL: io_destroy: invalid context id\n");
1364        return -EINVAL;
1365}
1366
1367static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
1368{
1369        struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
1370
1371        BUG_ON(ret <= 0);
1372
1373        while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
1374                ssize_t this = min((ssize_t)iov->iov_len, ret);
1375                iov->iov_base += this;
1376                iov->iov_len -= this;
1377                iocb->ki_left -= this;
1378                ret -= this;
1379                if (iov->iov_len == 0) {
1380                        iocb->ki_cur_seg++;
1381                        iov++;
1382                }
1383        }
1384
1385        /* the caller should not have done more io than what fit in
1386         * the remaining iovecs */
1387        BUG_ON(ret > 0 && iocb->ki_left == 0);
1388}
1389
1390static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1391{
1392        struct file *file = iocb->ki_filp;
1393        struct address_space *mapping = file->f_mapping;
1394        struct inode *inode = mapping->host;
1395        ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1396                         unsigned long, loff_t);
1397        ssize_t ret = 0;
1398        unsigned short opcode;
1399
1400        if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1401                (iocb->ki_opcode == IOCB_CMD_PREAD)) {
1402                rw_op = file->f_op->aio_read;
1403                opcode = IOCB_CMD_PREADV;
1404        } else {
1405                rw_op = file->f_op->aio_write;
1406                opcode = IOCB_CMD_PWRITEV;
1407        }
1408
1409        /* This matches the pread()/pwrite() logic */
1410        if (iocb->ki_pos < 0)
1411                return -EINVAL;
1412
1413        do {
1414                ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1415                            iocb->ki_nr_segs - iocb->ki_cur_seg,
1416                            iocb->ki_pos);
1417                if (ret > 0)
1418                        aio_advance_iovec(iocb, ret);
1419
1420        /* retry all partial writes.  retry partial reads as long as its a
1421         * regular file. */
1422        } while (ret > 0 && iocb->ki_left > 0 &&
1423                 (opcode == IOCB_CMD_PWRITEV ||
1424                  (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
1425
1426        /* This means we must have transferred all that we could */
1427        /* No need to retry anymore */
1428        if ((ret == 0) || (iocb->ki_left == 0))
1429                ret = iocb->ki_nbytes - iocb->ki_left;
1430
1431        /* If we managed to write some out we return that, rather than
1432         * the eventual error. */
1433        if (opcode == IOCB_CMD_PWRITEV
1434            && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
1435            && iocb->ki_nbytes - iocb->ki_left)
1436                ret = iocb->ki_nbytes - iocb->ki_left;
1437
1438        return ret;
1439}
1440
1441static ssize_t aio_fdsync(struct kiocb *iocb)
1442{
1443        struct file *file = iocb->ki_filp;
1444        ssize_t ret = -EINVAL;
1445
1446        if (file->f_op->aio_fsync)
1447                ret = file->f_op->aio_fsync(iocb, 1);
1448        return ret;
1449}
1450
1451static ssize_t aio_fsync(struct kiocb *iocb)
1452{
1453        struct file *file = iocb->ki_filp;
1454        ssize_t ret = -EINVAL;
1455
1456        if (file->f_op->aio_fsync)
1457                ret = file->f_op->aio_fsync(iocb, 0);
1458        return ret;
1459}
1460
1461static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1462{
1463        ssize_t ret;
1464
1465#ifdef CONFIG_COMPAT
1466        if (compat)
1467                ret = compat_rw_copy_check_uvector(type,
1468                                (struct compat_iovec __user *)kiocb->ki_buf,
1469                                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1470                                &kiocb->ki_iovec, 1);
1471        else
1472#endif
1473                ret = rw_copy_check_uvector(type,
1474                                (struct iovec __user *)kiocb->ki_buf,
1475                                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1476                                &kiocb->ki_iovec, 1);
1477        if (ret < 0)
1478                goto out;
1479
1480        kiocb->ki_nr_segs = kiocb->ki_nbytes;
1481        kiocb->ki_cur_seg = 0;
1482        /* ki_nbytes/left now reflect bytes instead of segs */
1483        kiocb->ki_nbytes = ret;
1484        kiocb->ki_left = ret;
1485
1486        ret = 0;
1487out:
1488        return ret;
1489}
1490
1491static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
1492{
1493        kiocb->ki_iovec = &kiocb->ki_inline_vec;
1494        kiocb->ki_iovec->iov_base = kiocb->ki_buf;
1495        kiocb->ki_iovec->iov_len = kiocb->ki_left;
1496        kiocb->ki_nr_segs = 1;
1497        kiocb->ki_cur_seg = 0;
1498        return 0;
1499}
1500
1501/*
1502 * aio_setup_iocb:
1503 *      Performs the initial checks and aio retry method
1504 *      setup for the kiocb at the time of io submission.
1505 */
1506static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1507{
1508        struct file *file = kiocb->ki_filp;
1509        ssize_t ret = 0;
1510
1511        switch (kiocb->ki_opcode) {
1512        case IOCB_CMD_PREAD:
1513                ret = -EBADF;
1514                if (unlikely(!(file->f_mode & FMODE_READ)))
1515                        break;
1516                ret = -EFAULT;
1517                if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1518                        kiocb->ki_left)))
1519                        break;
1520                ret = security_file_permission(file, MAY_READ);
1521                if (unlikely(ret))
1522                        break;
1523                ret = aio_setup_single_vector(kiocb);
1524                if (ret)
1525                        break;
1526                ret = -EINVAL;
1527                if (file->f_op->aio_read)
1528                        kiocb->ki_retry = aio_rw_vect_retry;
1529                break;
1530        case IOCB_CMD_PWRITE:
1531                ret = -EBADF;
1532                if (unlikely(!(file->f_mode & FMODE_WRITE)))
1533                        break;
1534                ret = -EFAULT;
1535                if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1536                        kiocb->ki_left)))
1537                        break;
1538                ret = security_file_permission(file, MAY_WRITE);
1539                if (unlikely(ret))
1540                        break;
1541                ret = aio_setup_single_vector(kiocb);
1542                if (ret)
1543                        break;
1544                ret = -EINVAL;
1545                if (file->f_op->aio_write)
1546                        kiocb->ki_retry = aio_rw_vect_retry;
1547                break;
1548        case IOCB_CMD_PREADV:
1549                ret = -EBADF;
1550                if (unlikely(!(file->f_mode & FMODE_READ)))
1551                        break;
1552                ret = security_file_permission(file, MAY_READ);
1553                if (unlikely(ret))
1554                        break;
1555                ret = aio_setup_vectored_rw(READ, kiocb, compat);
1556                if (ret)
1557                        break;
1558                ret = -EINVAL;
1559                if (file->f_op->aio_read)
1560                        kiocb->ki_retry = aio_rw_vect_retry;
1561                break;
1562        case IOCB_CMD_PWRITEV:
1563                ret = -EBADF;
1564                if (unlikely(!(file->f_mode & FMODE_WRITE)))
1565                        break;
1566                ret = security_file_permission(file, MAY_WRITE);
1567                if (unlikely(ret))
1568                        break;
1569                ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
1570                if (ret)
1571                        break;
1572                ret = -EINVAL;
1573                if (file->f_op->aio_write)
1574                        kiocb->ki_retry = aio_rw_vect_retry;
1575                break;
1576        case IOCB_CMD_FDSYNC:
1577                ret = -EINVAL;
1578                if (file->f_op->aio_fsync)
1579                        kiocb->ki_retry = aio_fdsync;
1580                break;
1581        case IOCB_CMD_FSYNC:
1582                ret = -EINVAL;
1583                if (file->f_op->aio_fsync)
1584                        kiocb->ki_retry = aio_fsync;
1585                break;
1586        default:
1587                dprintk("EINVAL: io_submit: no operation provided\n");
1588                ret = -EINVAL;
1589        }
1590
1591        if (!kiocb->ki_retry)
1592                return ret;
1593
1594        return 0;
1595}
1596
1597static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1598                         struct iocb *iocb, struct kiocb_batch *batch,
1599                         bool compat)
1600{
1601        struct kiocb *req;
1602        struct file *file;
1603        ssize_t ret;
1604
1605        /* enforce forwards compatibility on users */
1606        if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1607                pr_debug("EINVAL: io_submit: reserve field set\n");
1608                return -EINVAL;
1609        }
1610
1611        /* prevent overflows */
1612        if (unlikely(
1613            (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1614            (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1615            ((ssize_t)iocb->aio_nbytes < 0)
1616           )) {
1617                pr_debug("EINVAL: io_submit: overflow check\n");
1618                return -EINVAL;
1619        }
1620
1621        file = fget(iocb->aio_fildes);
1622        if (unlikely(!file))
1623                return -EBADF;
1624
1625        req = aio_get_req(ctx, batch);  /* returns with 2 references to req */
1626        if (unlikely(!req)) {
1627                fput(file);
1628                return -EAGAIN;
1629        }
1630        req->ki_filp = file;
1631        if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1632                /*
1633                 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1634                 * instance of the file* now. The file descriptor must be
1635                 * an eventfd() fd, and will be signaled for each completed
1636                 * event using the eventfd_signal() function.
1637                 */
1638                req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1639                if (IS_ERR(req->ki_eventfd)) {
1640                        ret = PTR_ERR(req->ki_eventfd);
1641                        req->ki_eventfd = NULL;
1642                        goto out_put_req;
1643                }
1644        }
1645
1646        ret = put_user(req->ki_key, &user_iocb->aio_key);
1647        if (unlikely(ret)) {
1648                dprintk("EFAULT: aio_key\n");
1649                goto out_put_req;
1650        }
1651
1652        req->ki_obj.user = user_iocb;
1653        req->ki_user_data = iocb->aio_data;
1654        req->ki_pos = iocb->aio_offset;
1655
1656        req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1657        req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1658        req->ki_opcode = iocb->aio_lio_opcode;
1659
1660        ret = aio_setup_iocb(req, compat);
1661
1662        if (ret)
1663                goto out_put_req;
1664
1665        spin_lock_irq(&ctx->ctx_lock);
1666        /*
1667         * We could have raced with io_destroy() and are currently holding a
1668         * reference to ctx which should be destroyed. We cannot submit IO
1669         * since ctx gets freed as soon as io_submit() puts its reference.  The
1670         * check here is reliable: io_destroy() sets ctx->dead before waiting
1671         * for outstanding IO and the barrier between these two is realized by
1672         * unlock of mm->ioctx_lock and lock of ctx->ctx_lock.  Analogously we
1673         * increment ctx->reqs_active before checking for ctx->dead and the
1674         * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1675         * don't see ctx->dead set here, io_destroy() waits for our IO to
1676         * finish.
1677         */
1678        if (ctx->dead) {
1679                spin_unlock_irq(&ctx->ctx_lock);
1680                ret = -EINVAL;
1681                goto out_put_req;
1682        }
1683        aio_run_iocb(req);
1684        if (!list_empty(&ctx->run_list)) {
1685                /* drain the run list */
1686                while (__aio_run_iocbs(ctx))
1687                        ;
1688        }
1689        spin_unlock_irq(&ctx->ctx_lock);
1690
1691        aio_put_req(req);       /* drop extra ref to req */
1692        return 0;
1693
1694out_put_req:
1695        aio_put_req(req);       /* drop extra ref to req */
1696        aio_put_req(req);       /* drop i/o ref to req */
1697        return ret;
1698}
1699
1700long do_io_submit(aio_context_t ctx_id, long nr,
1701                  struct iocb __user *__user *iocbpp, bool compat)
1702{
1703        struct kioctx *ctx;
1704        long ret = 0;
1705        int i = 0;
1706        struct blk_plug plug;
1707        struct kiocb_batch batch;
1708
1709        if (unlikely(nr < 0))
1710                return -EINVAL;
1711
1712        if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1713                nr = LONG_MAX/sizeof(*iocbpp);
1714
1715        if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1716                return -EFAULT;
1717
1718        ctx = lookup_ioctx(ctx_id);
1719        if (unlikely(!ctx)) {
1720                pr_debug("EINVAL: io_submit: invalid context id\n");
1721                return -EINVAL;
1722        }
1723
1724        kiocb_batch_init(&batch, nr);
1725
1726        blk_start_plug(&plug);
1727
1728        /*
1729         * AKPM: should this return a partial result if some of the IOs were
1730         * successfully submitted?
1731         */
1732        for (i=0; i<nr; i++) {
1733                struct iocb __user *user_iocb;
1734                struct iocb tmp;
1735
1736                if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1737                        ret = -EFAULT;
1738                        break;
1739                }
1740
1741                if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1742                        ret = -EFAULT;
1743                        break;
1744                }
1745
1746                ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
1747                if (ret)
1748                        break;
1749        }
1750        blk_finish_plug(&plug);
1751
1752        kiocb_batch_free(ctx, &batch);
1753        put_ioctx(ctx);
1754        return i ? i : ret;
1755}
1756
1757/* sys_io_submit:
1758 *      Queue the nr iocbs pointed to by iocbpp for processing.  Returns
1759 *      the number of iocbs queued.  May return -EINVAL if the aio_context
1760 *      specified by ctx_id is invalid, if nr is < 0, if the iocb at
1761 *      *iocbpp[0] is not properly initialized, if the operation specified
1762 *      is invalid for the file descriptor in the iocb.  May fail with
1763 *      -EFAULT if any of the data structures point to invalid data.  May
1764 *      fail with -EBADF if the file descriptor specified in the first
1765 *      iocb is invalid.  May fail with -EAGAIN if insufficient resources
1766 *      are available to queue any iocbs.  Will return 0 if nr is 0.  Will
1767 *      fail with -ENOSYS if not implemented.
1768 */
1769SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1770                struct iocb __user * __user *, iocbpp)
1771{
1772        return do_io_submit(ctx_id, nr, iocbpp, 0);
1773}
1774
1775/* lookup_kiocb
1776 *      Finds a given iocb for cancellation.
1777 */
1778static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1779                                  u32 key)
1780{
1781        struct list_head *pos;
1782
1783        assert_spin_locked(&ctx->ctx_lock);
1784
1785        /* TODO: use a hash or array, this sucks. */
1786        list_for_each(pos, &ctx->active_reqs) {
1787                struct kiocb *kiocb = list_kiocb(pos);
1788                if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
1789                        return kiocb;
1790        }
1791        return NULL;
1792}
1793
1794/* sys_io_cancel:
1795 *      Attempts to cancel an iocb previously passed to io_submit.  If
1796 *      the operation is successfully cancelled, the resulting event is
1797 *      copied into the memory pointed to by result without being placed
1798 *      into the completion queue and 0 is returned.  May fail with
1799 *      -EFAULT if any of the data structures pointed to are invalid.
1800 *      May fail with -EINVAL if aio_context specified by ctx_id is
1801 *      invalid.  May fail with -EAGAIN if the iocb specified was not
1802 *      cancelled.  Will fail with -ENOSYS if not implemented.
1803 */
1804SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1805                struct io_event __user *, result)
1806{
1807        int (*cancel)(struct kiocb *iocb, struct io_event *res);
1808        struct kioctx *ctx;
1809        struct kiocb *kiocb;
1810        u32 key;
1811        int ret;
1812
1813        ret = get_user(key, &iocb->aio_key);
1814        if (unlikely(ret))
1815                return -EFAULT;
1816
1817        ctx = lookup_ioctx(ctx_id);
1818        if (unlikely(!ctx))
1819                return -EINVAL;
1820
1821        spin_lock_irq(&ctx->ctx_lock);
1822        ret = -EAGAIN;
1823        kiocb = lookup_kiocb(ctx, iocb, key);
1824        if (kiocb && kiocb->ki_cancel) {
1825                cancel = kiocb->ki_cancel;
1826                kiocb->ki_users ++;
1827                kiocbSetCancelled(kiocb);
1828        } else
1829                cancel = NULL;
1830        spin_unlock_irq(&ctx->ctx_lock);
1831
1832        if (NULL != cancel) {
1833                struct io_event tmp;
1834                pr_debug("calling cancel\n");
1835                memset(&tmp, 0, sizeof(tmp));
1836                tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
1837                tmp.data = kiocb->ki_user_data;
1838                ret = cancel(kiocb, &tmp);
1839                if (!ret) {
1840                        /* Cancellation succeeded -- copy the result
1841                         * into the user's buffer.
1842                         */
1843                        if (copy_to_user(result, &tmp, sizeof(tmp)))
1844                                ret = -EFAULT;
1845                }
1846        } else
1847                ret = -EINVAL;
1848
1849        put_ioctx(ctx);
1850
1851        return ret;
1852}
1853
1854/* io_getevents:
1855 *      Attempts to read at least min_nr events and up to nr events from
1856 *      the completion queue for the aio_context specified by ctx_id. If
1857 *      it succeeds, the number of read events is returned. May fail with
1858 *      -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1859 *      out of range, if timeout is out of range.  May fail with -EFAULT
1860 *      if any of the memory specified is invalid.  May return 0 or
1861 *      < min_nr if the timeout specified by timeout has elapsed
1862 *      before sufficient events are available, where timeout == NULL
1863 *      specifies an infinite timeout. Note that the timeout pointed to by
1864 *      timeout is relative and will be updated if not NULL and the
1865 *      operation blocks. Will fail with -ENOSYS if not implemented.
1866 */
1867SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1868                long, min_nr,
1869                long, nr,
1870                struct io_event __user *, events,
1871                struct timespec __user *, timeout)
1872{
1873        struct kioctx *ioctx = lookup_ioctx(ctx_id);
1874        long ret = -EINVAL;
1875
1876        if (likely(ioctx)) {
1877                if (likely(min_nr <= nr && min_nr >= 0))
1878                        ret = read_events(ioctx, min_nr, nr, events, timeout);
1879                put_ioctx(ioctx);
1880        }
1881
1882        asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
1883        return ret;
1884}
1885