linux/fs/direct-io.c
<<
>>
Prefs
   1/*
   2 * fs/direct-io.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 *
   6 * O_DIRECT
   7 *
   8 * 04Jul2002    Andrew Morton
   9 *              Initial version
  10 * 11Sep2002    janetinc@us.ibm.com
  11 *              added readv/writev support.
  12 * 29Oct2002    Andrew Morton
  13 *              rewrote bio_add_page() support.
  14 * 30Oct2002    pbadari@us.ibm.com
  15 *              added support for non-aligned IO.
  16 * 06Nov2002    pbadari@us.ibm.com
  17 *              added asynchronous IO support.
  18 * 21Jul2003    nathans@sgi.com
  19 *              added IO completion notifier.
  20 */
  21
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/types.h>
  25#include <linux/fs.h>
  26#include <linux/mm.h>
  27#include <linux/slab.h>
  28#include <linux/highmem.h>
  29#include <linux/pagemap.h>
  30#include <linux/task_io_accounting_ops.h>
  31#include <linux/bio.h>
  32#include <linux/wait.h>
  33#include <linux/err.h>
  34#include <linux/blkdev.h>
  35#include <linux/buffer_head.h>
  36#include <linux/rwsem.h>
  37#include <linux/uio.h>
  38#include <linux/atomic.h>
  39#include <linux/prefetch.h>
  40
  41/*
  42 * How many user pages to map in one call to get_user_pages().  This determines
  43 * the size of a structure in the slab cache
  44 */
  45#define DIO_PAGES       64
  46
  47/*
  48 * This code generally works in units of "dio_blocks".  A dio_block is
  49 * somewhere between the hard sector size and the filesystem block size.  it
  50 * is determined on a per-invocation basis.   When talking to the filesystem
  51 * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
  52 * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
  53 * to bio_block quantities by shifting left by blkfactor.
  54 *
  55 * If blkfactor is zero then the user's request was aligned to the filesystem's
  56 * blocksize.
  57 */
  58
  59/* dio_state only used in the submission path */
  60
  61struct dio_submit {
  62        struct bio *bio;                /* bio under assembly */
  63        unsigned blkbits;               /* doesn't change */
  64        unsigned blkfactor;             /* When we're using an alignment which
  65                                           is finer than the filesystem's soft
  66                                           blocksize, this specifies how much
  67                                           finer.  blkfactor=2 means 1/4-block
  68                                           alignment.  Does not change */
  69        unsigned start_zero_done;       /* flag: sub-blocksize zeroing has
  70                                           been performed at the start of a
  71                                           write */
  72        int pages_in_io;                /* approximate total IO pages */
  73        sector_t block_in_file;         /* Current offset into the underlying
  74                                           file in dio_block units. */
  75        unsigned blocks_available;      /* At block_in_file.  changes */
  76        int reap_counter;               /* rate limit reaping */
  77        sector_t final_block_in_request;/* doesn't change */
  78        int boundary;                   /* prev block is at a boundary */
  79        get_block_t *get_block;         /* block mapping function */
  80        dio_submit_t *submit_io;        /* IO submition function */
  81
  82        loff_t logical_offset_in_bio;   /* current first logical block in bio */
  83        sector_t final_block_in_bio;    /* current final block in bio + 1 */
  84        sector_t next_block_for_io;     /* next block to be put under IO,
  85                                           in dio_blocks units */
  86
  87        /*
  88         * Deferred addition of a page to the dio.  These variables are
  89         * private to dio_send_cur_page(), submit_page_section() and
  90         * dio_bio_add_page().
  91         */
  92        struct page *cur_page;          /* The page */
  93        unsigned cur_page_offset;       /* Offset into it, in bytes */
  94        unsigned cur_page_len;          /* Nr of bytes at cur_page_offset */
  95        sector_t cur_page_block;        /* Where it starts */
  96        loff_t cur_page_fs_offset;      /* Offset in file */
  97
  98        struct iov_iter *iter;
  99        /*
 100         * Page queue.  These variables belong to dio_refill_pages() and
 101         * dio_get_page().
 102         */
 103        unsigned head;                  /* next page to process */
 104        unsigned tail;                  /* last valid page + 1 */
 105        size_t from, to;
 106};
 107
 108/* dio_state communicated between submission path and end_io */
 109struct dio {
 110        int flags;                      /* doesn't change */
 111        int rw;
 112        blk_qc_t bio_cookie;
 113        struct block_device *bio_bdev;
 114        struct inode *inode;
 115        loff_t i_size;                  /* i_size when submitted */
 116        dio_iodone_t *end_io;           /* IO completion function */
 117
 118        void *private;                  /* copy from map_bh.b_private */
 119
 120        /* BIO completion state */
 121        spinlock_t bio_lock;            /* protects BIO fields below */
 122        int page_errors;                /* errno from get_user_pages() */
 123        int is_async;                   /* is IO async ? */
 124        bool defer_completion;          /* defer AIO completion to workqueue? */
 125        bool should_dirty;              /* if pages should be dirtied */
 126        int io_error;                   /* IO error in completion path */
 127        unsigned long refcount;         /* direct_io_worker() and bios */
 128        struct bio *bio_list;           /* singly linked via bi_private */
 129        struct task_struct *waiter;     /* waiting task (NULL if none) */
 130
 131        /* AIO related stuff */
 132        struct kiocb *iocb;             /* kiocb */
 133        ssize_t result;                 /* IO result */
 134
 135        /*
 136         * pages[] (and any fields placed after it) are not zeroed out at
 137         * allocation time.  Don't add new fields after pages[] unless you
 138         * wish that they not be zeroed.
 139         */
 140        union {
 141                struct page *pages[DIO_PAGES];  /* page buffer */
 142                struct work_struct complete_work;/* deferred AIO completion */
 143        };
 144} ____cacheline_aligned_in_smp;
 145
 146static struct kmem_cache *dio_cache __read_mostly;
 147
 148/*
 149 * How many pages are in the queue?
 150 */
 151static inline unsigned dio_pages_present(struct dio_submit *sdio)
 152{
 153        return sdio->tail - sdio->head;
 154}
 155
 156/*
 157 * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
 158 */
 159static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
 160{
 161        ssize_t ret;
 162
 163        ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
 164                                &sdio->from);
 165
 166        if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
 167                struct page *page = ZERO_PAGE(0);
 168                /*
 169                 * A memory fault, but the filesystem has some outstanding
 170                 * mapped blocks.  We need to use those blocks up to avoid
 171                 * leaking stale data in the file.
 172                 */
 173                if (dio->page_errors == 0)
 174                        dio->page_errors = ret;
 175                get_page(page);
 176                dio->pages[0] = page;
 177                sdio->head = 0;
 178                sdio->tail = 1;
 179                sdio->from = 0;
 180                sdio->to = PAGE_SIZE;
 181                return 0;
 182        }
 183
 184        if (ret >= 0) {
 185                iov_iter_advance(sdio->iter, ret);
 186                ret += sdio->from;
 187                sdio->head = 0;
 188                sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
 189                sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
 190                return 0;
 191        }
 192        return ret;     
 193}
 194
 195/*
 196 * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
 197 * buffered inside the dio so that we can call get_user_pages() against a
 198 * decent number of pages, less frequently.  To provide nicer use of the
 199 * L1 cache.
 200 */
 201static inline struct page *dio_get_page(struct dio *dio,
 202                                        struct dio_submit *sdio)
 203{
 204        if (dio_pages_present(sdio) == 0) {
 205                int ret;
 206
 207                ret = dio_refill_pages(dio, sdio);
 208                if (ret)
 209                        return ERR_PTR(ret);
 210                BUG_ON(dio_pages_present(sdio) == 0);
 211        }
 212        return dio->pages[sdio->head];
 213}
 214
 215/**
 216 * dio_complete() - called when all DIO BIO I/O has been completed
 217 * @offset: the byte offset in the file of the completed operation
 218 *
 219 * This drops i_dio_count, lets interested parties know that a DIO operation
 220 * has completed, and calculates the resulting return code for the operation.
 221 *
 222 * It lets the filesystem know if it registered an interest earlier via
 223 * get_block.  Pass the private field of the map buffer_head so that
 224 * filesystems can use it to hold additional state between get_block calls and
 225 * dio_complete.
 226 */
 227static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
 228                bool is_async)
 229{
 230        ssize_t transferred = 0;
 231
 232        /*
 233         * AIO submission can race with bio completion to get here while
 234         * expecting to have the last io completed by bio completion.
 235         * In that case -EIOCBQUEUED is in fact not an error we want
 236         * to preserve through this call.
 237         */
 238        if (ret == -EIOCBQUEUED)
 239                ret = 0;
 240
 241        if (dio->result) {
 242                transferred = dio->result;
 243
 244                /* Check for short read case */
 245                if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
 246                        transferred = dio->i_size - offset;
 247        }
 248
 249        if (ret == 0)
 250                ret = dio->page_errors;
 251        if (ret == 0)
 252                ret = dio->io_error;
 253        if (ret == 0)
 254                ret = transferred;
 255
 256        if (dio->end_io) {
 257                int err;
 258
 259                err = dio->end_io(dio->iocb, offset, ret, dio->private);
 260                if (err)
 261                        ret = err;
 262        }
 263
 264        if (!(dio->flags & DIO_SKIP_DIO_COUNT))
 265                inode_dio_end(dio->inode);
 266
 267        if (is_async) {
 268                if (dio->rw & WRITE) {
 269                        int err;
 270
 271                        err = generic_write_sync(dio->iocb->ki_filp, offset,
 272                                                 transferred);
 273                        if (err < 0 && ret > 0)
 274                                ret = err;
 275                }
 276
 277                dio->iocb->ki_complete(dio->iocb, ret, 0);
 278        }
 279
 280        kmem_cache_free(dio_cache, dio);
 281        return ret;
 282}
 283
 284static void dio_aio_complete_work(struct work_struct *work)
 285{
 286        struct dio *dio = container_of(work, struct dio, complete_work);
 287
 288        dio_complete(dio, dio->iocb->ki_pos, 0, true);
 289}
 290
 291static int dio_bio_complete(struct dio *dio, struct bio *bio);
 292
 293/*
 294 * Asynchronous IO callback. 
 295 */
 296static void dio_bio_end_aio(struct bio *bio)
 297{
 298        struct dio *dio = bio->bi_private;
 299        unsigned long remaining;
 300        unsigned long flags;
 301
 302        /* cleanup the bio */
 303        dio_bio_complete(dio, bio);
 304
 305        spin_lock_irqsave(&dio->bio_lock, flags);
 306        remaining = --dio->refcount;
 307        if (remaining == 1 && dio->waiter)
 308                wake_up_process(dio->waiter);
 309        spin_unlock_irqrestore(&dio->bio_lock, flags);
 310
 311        if (remaining == 0) {
 312                if (dio->result && dio->defer_completion) {
 313                        INIT_WORK(&dio->complete_work, dio_aio_complete_work);
 314                        queue_work(dio->inode->i_sb->s_dio_done_wq,
 315                                   &dio->complete_work);
 316                } else {
 317                        dio_complete(dio, dio->iocb->ki_pos, 0, true);
 318                }
 319        }
 320}
 321
 322/*
 323 * The BIO completion handler simply queues the BIO up for the process-context
 324 * handler.
 325 *
 326 * During I/O bi_private points at the dio.  After I/O, bi_private is used to
 327 * implement a singly-linked list of completed BIOs, at dio->bio_list.
 328 */
 329static void dio_bio_end_io(struct bio *bio)
 330{
 331        struct dio *dio = bio->bi_private;
 332        unsigned long flags;
 333
 334        spin_lock_irqsave(&dio->bio_lock, flags);
 335        bio->bi_private = dio->bio_list;
 336        dio->bio_list = bio;
 337        if (--dio->refcount == 1 && dio->waiter)
 338                wake_up_process(dio->waiter);
 339        spin_unlock_irqrestore(&dio->bio_lock, flags);
 340}
 341
 342/**
 343 * dio_end_io - handle the end io action for the given bio
 344 * @bio: The direct io bio thats being completed
 345 * @error: Error if there was one
 346 *
 347 * This is meant to be called by any filesystem that uses their own dio_submit_t
 348 * so that the DIO specific endio actions are dealt with after the filesystem
 349 * has done it's completion work.
 350 */
 351void dio_end_io(struct bio *bio, int error)
 352{
 353        struct dio *dio = bio->bi_private;
 354
 355        if (dio->is_async)
 356                dio_bio_end_aio(bio);
 357        else
 358                dio_bio_end_io(bio);
 359}
 360EXPORT_SYMBOL_GPL(dio_end_io);
 361
 362static inline void
 363dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
 364              struct block_device *bdev,
 365              sector_t first_sector, int nr_vecs)
 366{
 367        struct bio *bio;
 368
 369        /*
 370         * bio_alloc() is guaranteed to return a bio when called with
 371         * __GFP_RECLAIM and we request a valid number of vectors.
 372         */
 373        bio = bio_alloc(GFP_KERNEL, nr_vecs);
 374
 375        bio->bi_bdev = bdev;
 376        bio->bi_iter.bi_sector = first_sector;
 377        if (dio->is_async)
 378                bio->bi_end_io = dio_bio_end_aio;
 379        else
 380                bio->bi_end_io = dio_bio_end_io;
 381
 382        sdio->bio = bio;
 383        sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
 384}
 385
 386/*
 387 * In the AIO read case we speculatively dirty the pages before starting IO.
 388 * During IO completion, any of these pages which happen to have been written
 389 * back will be redirtied by bio_check_pages_dirty().
 390 *
 391 * bios hold a dio reference between submit_bio and ->end_io.
 392 */
 393static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
 394{
 395        struct bio *bio = sdio->bio;
 396        unsigned long flags;
 397
 398        bio->bi_private = dio;
 399
 400        spin_lock_irqsave(&dio->bio_lock, flags);
 401        dio->refcount++;
 402        spin_unlock_irqrestore(&dio->bio_lock, flags);
 403
 404        if (dio->is_async && dio->rw == READ && dio->should_dirty)
 405                bio_set_pages_dirty(bio);
 406
 407        dio->bio_bdev = bio->bi_bdev;
 408
 409        if (sdio->submit_io) {
 410                sdio->submit_io(dio->rw, bio, dio->inode,
 411                               sdio->logical_offset_in_bio);
 412                dio->bio_cookie = BLK_QC_T_NONE;
 413        } else
 414                dio->bio_cookie = submit_bio(dio->rw, bio);
 415
 416        sdio->bio = NULL;
 417        sdio->boundary = 0;
 418        sdio->logical_offset_in_bio = 0;
 419}
 420
 421/*
 422 * Release any resources in case of a failure
 423 */
 424static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
 425{
 426        while (sdio->head < sdio->tail)
 427                put_page(dio->pages[sdio->head++]);
 428}
 429
 430/*
 431 * Wait for the next BIO to complete.  Remove it and return it.  NULL is
 432 * returned once all BIOs have been completed.  This must only be called once
 433 * all bios have been issued so that dio->refcount can only decrease.  This
 434 * requires that that the caller hold a reference on the dio.
 435 */
 436static struct bio *dio_await_one(struct dio *dio)
 437{
 438        unsigned long flags;
 439        struct bio *bio = NULL;
 440
 441        spin_lock_irqsave(&dio->bio_lock, flags);
 442
 443        /*
 444         * Wait as long as the list is empty and there are bios in flight.  bio
 445         * completion drops the count, maybe adds to the list, and wakes while
 446         * holding the bio_lock so we don't need set_current_state()'s barrier
 447         * and can call it after testing our condition.
 448         */
 449        while (dio->refcount > 1 && dio->bio_list == NULL) {
 450                __set_current_state(TASK_UNINTERRUPTIBLE);
 451                dio->waiter = current;
 452                spin_unlock_irqrestore(&dio->bio_lock, flags);
 453                if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
 454                    !blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
 455                        io_schedule();
 456                /* wake up sets us TASK_RUNNING */
 457                spin_lock_irqsave(&dio->bio_lock, flags);
 458                dio->waiter = NULL;
 459        }
 460        if (dio->bio_list) {
 461                bio = dio->bio_list;
 462                dio->bio_list = bio->bi_private;
 463        }
 464        spin_unlock_irqrestore(&dio->bio_lock, flags);
 465        return bio;
 466}
 467
 468/*
 469 * Process one completed BIO.  No locks are held.
 470 */
 471static int dio_bio_complete(struct dio *dio, struct bio *bio)
 472{
 473        struct bio_vec *bvec;
 474        unsigned i;
 475        int err;
 476
 477        if (bio->bi_error)
 478                dio->io_error = -EIO;
 479
 480        if (dio->is_async && dio->rw == READ && dio->should_dirty) {
 481                err = bio->bi_error;
 482                bio_check_pages_dirty(bio);     /* transfers ownership */
 483        } else {
 484                bio_for_each_segment_all(bvec, bio, i) {
 485                        struct page *page = bvec->bv_page;
 486
 487                        if (dio->rw == READ && !PageCompound(page) &&
 488                                        dio->should_dirty)
 489                                set_page_dirty_lock(page);
 490                        put_page(page);
 491                }
 492                err = bio->bi_error;
 493                bio_put(bio);
 494        }
 495        return err;
 496}
 497
 498/*
 499 * Wait on and process all in-flight BIOs.  This must only be called once
 500 * all bios have been issued so that the refcount can only decrease.
 501 * This just waits for all bios to make it through dio_bio_complete.  IO
 502 * errors are propagated through dio->io_error and should be propagated via
 503 * dio_complete().
 504 */
 505static void dio_await_completion(struct dio *dio)
 506{
 507        struct bio *bio;
 508        do {
 509                bio = dio_await_one(dio);
 510                if (bio)
 511                        dio_bio_complete(dio, bio);
 512        } while (bio);
 513}
 514
 515/*
 516 * A really large O_DIRECT read or write can generate a lot of BIOs.  So
 517 * to keep the memory consumption sane we periodically reap any completed BIOs
 518 * during the BIO generation phase.
 519 *
 520 * This also helps to limit the peak amount of pinned userspace memory.
 521 */
 522static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
 523{
 524        int ret = 0;
 525
 526        if (sdio->reap_counter++ >= 64) {
 527                while (dio->bio_list) {
 528                        unsigned long flags;
 529                        struct bio *bio;
 530                        int ret2;
 531
 532                        spin_lock_irqsave(&dio->bio_lock, flags);
 533                        bio = dio->bio_list;
 534                        dio->bio_list = bio->bi_private;
 535                        spin_unlock_irqrestore(&dio->bio_lock, flags);
 536                        ret2 = dio_bio_complete(dio, bio);
 537                        if (ret == 0)
 538                                ret = ret2;
 539                }
 540                sdio->reap_counter = 0;
 541        }
 542        return ret;
 543}
 544
 545/*
 546 * Create workqueue for deferred direct IO completions. We allocate the
 547 * workqueue when it's first needed. This avoids creating workqueue for
 548 * filesystems that don't need it and also allows us to create the workqueue
 549 * late enough so the we can include s_id in the name of the workqueue.
 550 */
 551static int sb_init_dio_done_wq(struct super_block *sb)
 552{
 553        struct workqueue_struct *old;
 554        struct workqueue_struct *wq = alloc_workqueue("dio/%s",
 555                                                      WQ_MEM_RECLAIM, 0,
 556                                                      sb->s_id);
 557        if (!wq)
 558                return -ENOMEM;
 559        /*
 560         * This has to be atomic as more DIOs can race to create the workqueue
 561         */
 562        old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
 563        /* Someone created workqueue before us? Free ours... */
 564        if (old)
 565                destroy_workqueue(wq);
 566        return 0;
 567}
 568
 569static int dio_set_defer_completion(struct dio *dio)
 570{
 571        struct super_block *sb = dio->inode->i_sb;
 572
 573        if (dio->defer_completion)
 574                return 0;
 575        dio->defer_completion = true;
 576        if (!sb->s_dio_done_wq)
 577                return sb_init_dio_done_wq(sb);
 578        return 0;
 579}
 580
 581/*
 582 * Call into the fs to map some more disk blocks.  We record the current number
 583 * of available blocks at sdio->blocks_available.  These are in units of the
 584 * fs blocksize, (1 << inode->i_blkbits).
 585 *
 586 * The fs is allowed to map lots of blocks at once.  If it wants to do that,
 587 * it uses the passed inode-relative block number as the file offset, as usual.
 588 *
 589 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
 590 * has remaining to do.  The fs should not map more than this number of blocks.
 591 *
 592 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
 593 * indicate how much contiguous disk space has been made available at
 594 * bh->b_blocknr.
 595 *
 596 * If *any* of the mapped blocks are new, then the fs must set buffer_new().
 597 * This isn't very efficient...
 598 *
 599 * In the case of filesystem holes: the fs may return an arbitrarily-large
 600 * hole by returning an appropriate value in b_size and by clearing
 601 * buffer_mapped().  However the direct-io code will only process holes one
 602 * block at a time - it will repeatedly call get_block() as it walks the hole.
 603 */
 604static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
 605                           struct buffer_head *map_bh)
 606{
 607        int ret;
 608        sector_t fs_startblk;   /* Into file, in filesystem-sized blocks */
 609        sector_t fs_endblk;     /* Into file, in filesystem-sized blocks */
 610        unsigned long fs_count; /* Number of filesystem-sized blocks */
 611        int create;
 612        unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
 613
 614        /*
 615         * If there was a memory error and we've overwritten all the
 616         * mapped blocks then we can now return that memory error
 617         */
 618        ret = dio->page_errors;
 619        if (ret == 0) {
 620                BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
 621                fs_startblk = sdio->block_in_file >> sdio->blkfactor;
 622                fs_endblk = (sdio->final_block_in_request - 1) >>
 623                                        sdio->blkfactor;
 624                fs_count = fs_endblk - fs_startblk + 1;
 625
 626                map_bh->b_state = 0;
 627                map_bh->b_size = fs_count << i_blkbits;
 628
 629                /*
 630                 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
 631                 * forbid block creations: only overwrites are permitted.
 632                 * We will return early to the caller once we see an
 633                 * unmapped buffer head returned, and the caller will fall
 634                 * back to buffered I/O.
 635                 *
 636                 * Otherwise the decision is left to the get_blocks method,
 637                 * which may decide to handle it or also return an unmapped
 638                 * buffer head.
 639                 */
 640                create = dio->rw & WRITE;
 641                if (dio->flags & DIO_SKIP_HOLES) {
 642                        if (sdio->block_in_file < (i_size_read(dio->inode) >>
 643                                                        sdio->blkbits))
 644                                create = 0;
 645                }
 646
 647                ret = (*sdio->get_block)(dio->inode, fs_startblk,
 648                                                map_bh, create);
 649
 650                /* Store for completion */
 651                dio->private = map_bh->b_private;
 652
 653                if (ret == 0 && buffer_defer_completion(map_bh))
 654                        ret = dio_set_defer_completion(dio);
 655        }
 656        return ret;
 657}
 658
 659/*
 660 * There is no bio.  Make one now.
 661 */
 662static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
 663                sector_t start_sector, struct buffer_head *map_bh)
 664{
 665        sector_t sector;
 666        int ret, nr_pages;
 667
 668        ret = dio_bio_reap(dio, sdio);
 669        if (ret)
 670                goto out;
 671        sector = start_sector << (sdio->blkbits - 9);
 672        nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
 673        BUG_ON(nr_pages <= 0);
 674        dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
 675        sdio->boundary = 0;
 676out:
 677        return ret;
 678}
 679
 680/*
 681 * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
 682 * that was successful then update final_block_in_bio and take a ref against
 683 * the just-added page.
 684 *
 685 * Return zero on success.  Non-zero means the caller needs to start a new BIO.
 686 */
 687static inline int dio_bio_add_page(struct dio_submit *sdio)
 688{
 689        int ret;
 690
 691        ret = bio_add_page(sdio->bio, sdio->cur_page,
 692                        sdio->cur_page_len, sdio->cur_page_offset);
 693        if (ret == sdio->cur_page_len) {
 694                /*
 695                 * Decrement count only, if we are done with this page
 696                 */
 697                if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
 698                        sdio->pages_in_io--;
 699                get_page(sdio->cur_page);
 700                sdio->final_block_in_bio = sdio->cur_page_block +
 701                        (sdio->cur_page_len >> sdio->blkbits);
 702                ret = 0;
 703        } else {
 704                ret = 1;
 705        }
 706        return ret;
 707}
 708                
 709/*
 710 * Put cur_page under IO.  The section of cur_page which is described by
 711 * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
 712 * starts on-disk at cur_page_block.
 713 *
 714 * We take a ref against the page here (on behalf of its presence in the bio).
 715 *
 716 * The caller of this function is responsible for removing cur_page from the
 717 * dio, and for dropping the refcount which came from that presence.
 718 */
 719static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
 720                struct buffer_head *map_bh)
 721{
 722        int ret = 0;
 723
 724        if (sdio->bio) {
 725                loff_t cur_offset = sdio->cur_page_fs_offset;
 726                loff_t bio_next_offset = sdio->logical_offset_in_bio +
 727                        sdio->bio->bi_iter.bi_size;
 728
 729                /*
 730                 * See whether this new request is contiguous with the old.
 731                 *
 732                 * Btrfs cannot handle having logically non-contiguous requests
 733                 * submitted.  For example if you have
 734                 *
 735                 * Logical:  [0-4095][HOLE][8192-12287]
 736                 * Physical: [0-4095]      [4096-8191]
 737                 *
 738                 * We cannot submit those pages together as one BIO.  So if our
 739                 * current logical offset in the file does not equal what would
 740                 * be the next logical offset in the bio, submit the bio we
 741                 * have.
 742                 */
 743                if (sdio->final_block_in_bio != sdio->cur_page_block ||
 744                    cur_offset != bio_next_offset)
 745                        dio_bio_submit(dio, sdio);
 746        }
 747
 748        if (sdio->bio == NULL) {
 749                ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
 750                if (ret)
 751                        goto out;
 752        }
 753
 754        if (dio_bio_add_page(sdio) != 0) {
 755                dio_bio_submit(dio, sdio);
 756                ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
 757                if (ret == 0) {
 758                        ret = dio_bio_add_page(sdio);
 759                        BUG_ON(ret != 0);
 760                }
 761        }
 762out:
 763        return ret;
 764}
 765
 766/*
 767 * An autonomous function to put a chunk of a page under deferred IO.
 768 *
 769 * The caller doesn't actually know (or care) whether this piece of page is in
 770 * a BIO, or is under IO or whatever.  We just take care of all possible 
 771 * situations here.  The separation between the logic of do_direct_IO() and
 772 * that of submit_page_section() is important for clarity.  Please don't break.
 773 *
 774 * The chunk of page starts on-disk at blocknr.
 775 *
 776 * We perform deferred IO, by recording the last-submitted page inside our
 777 * private part of the dio structure.  If possible, we just expand the IO
 778 * across that page here.
 779 *
 780 * If that doesn't work out then we put the old page into the bio and add this
 781 * page to the dio instead.
 782 */
 783static inline int
 784submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
 785                    unsigned offset, unsigned len, sector_t blocknr,
 786                    struct buffer_head *map_bh)
 787{
 788        int ret = 0;
 789
 790        if (dio->rw & WRITE) {
 791                /*
 792                 * Read accounting is performed in submit_bio()
 793                 */
 794                task_io_account_write(len);
 795        }
 796
 797        /*
 798         * Can we just grow the current page's presence in the dio?
 799         */
 800        if (sdio->cur_page == page &&
 801            sdio->cur_page_offset + sdio->cur_page_len == offset &&
 802            sdio->cur_page_block +
 803            (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
 804                sdio->cur_page_len += len;
 805                goto out;
 806        }
 807
 808        /*
 809         * If there's a deferred page already there then send it.
 810         */
 811        if (sdio->cur_page) {
 812                ret = dio_send_cur_page(dio, sdio, map_bh);
 813                put_page(sdio->cur_page);
 814                sdio->cur_page = NULL;
 815                if (ret)
 816                        return ret;
 817        }
 818
 819        get_page(page);         /* It is in dio */
 820        sdio->cur_page = page;
 821        sdio->cur_page_offset = offset;
 822        sdio->cur_page_len = len;
 823        sdio->cur_page_block = blocknr;
 824        sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
 825out:
 826        /*
 827         * If sdio->boundary then we want to schedule the IO now to
 828         * avoid metadata seeks.
 829         */
 830        if (sdio->boundary) {
 831                ret = dio_send_cur_page(dio, sdio, map_bh);
 832                dio_bio_submit(dio, sdio);
 833                put_page(sdio->cur_page);
 834                sdio->cur_page = NULL;
 835        }
 836        return ret;
 837}
 838
 839/*
 840 * Clean any dirty buffers in the blockdev mapping which alias newly-created
 841 * file blocks.  Only called for S_ISREG files - blockdevs do not set
 842 * buffer_new
 843 */
 844static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh)
 845{
 846        unsigned i;
 847        unsigned nblocks;
 848
 849        nblocks = map_bh->b_size >> dio->inode->i_blkbits;
 850
 851        for (i = 0; i < nblocks; i++) {
 852                unmap_underlying_metadata(map_bh->b_bdev,
 853                                          map_bh->b_blocknr + i);
 854        }
 855}
 856
 857/*
 858 * If we are not writing the entire block and get_block() allocated
 859 * the block for us, we need to fill-in the unused portion of the
 860 * block with zeros. This happens only if user-buffer, fileoffset or
 861 * io length is not filesystem block-size multiple.
 862 *
 863 * `end' is zero if we're doing the start of the IO, 1 at the end of the
 864 * IO.
 865 */
 866static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
 867                int end, struct buffer_head *map_bh)
 868{
 869        unsigned dio_blocks_per_fs_block;
 870        unsigned this_chunk_blocks;     /* In dio_blocks */
 871        unsigned this_chunk_bytes;
 872        struct page *page;
 873
 874        sdio->start_zero_done = 1;
 875        if (!sdio->blkfactor || !buffer_new(map_bh))
 876                return;
 877
 878        dio_blocks_per_fs_block = 1 << sdio->blkfactor;
 879        this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
 880
 881        if (!this_chunk_blocks)
 882                return;
 883
 884        /*
 885         * We need to zero out part of an fs block.  It is either at the
 886         * beginning or the end of the fs block.
 887         */
 888        if (end) 
 889                this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
 890
 891        this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
 892
 893        page = ZERO_PAGE(0);
 894        if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
 895                                sdio->next_block_for_io, map_bh))
 896                return;
 897
 898        sdio->next_block_for_io += this_chunk_blocks;
 899}
 900
 901/*
 902 * Walk the user pages, and the file, mapping blocks to disk and generating
 903 * a sequence of (page,offset,len,block) mappings.  These mappings are injected
 904 * into submit_page_section(), which takes care of the next stage of submission
 905 *
 906 * Direct IO against a blockdev is different from a file.  Because we can
 907 * happily perform page-sized but 512-byte aligned IOs.  It is important that
 908 * blockdev IO be able to have fine alignment and large sizes.
 909 *
 910 * So what we do is to permit the ->get_block function to populate bh.b_size
 911 * with the size of IO which is permitted at this offset and this i_blkbits.
 912 *
 913 * For best results, the blockdev should be set up with 512-byte i_blkbits and
 914 * it should set b_size to PAGE_SIZE or more inside get_block().  This gives
 915 * fine alignment but still allows this function to work in PAGE_SIZE units.
 916 */
 917static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
 918                        struct buffer_head *map_bh)
 919{
 920        const unsigned blkbits = sdio->blkbits;
 921        int ret = 0;
 922
 923        while (sdio->block_in_file < sdio->final_block_in_request) {
 924                struct page *page;
 925                size_t from, to;
 926
 927                page = dio_get_page(dio, sdio);
 928                if (IS_ERR(page)) {
 929                        ret = PTR_ERR(page);
 930                        goto out;
 931                }
 932                from = sdio->head ? 0 : sdio->from;
 933                to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
 934                sdio->head++;
 935
 936                while (from < to) {
 937                        unsigned this_chunk_bytes;      /* # of bytes mapped */
 938                        unsigned this_chunk_blocks;     /* # of blocks */
 939                        unsigned u;
 940
 941                        if (sdio->blocks_available == 0) {
 942                                /*
 943                                 * Need to go and map some more disk
 944                                 */
 945                                unsigned long blkmask;
 946                                unsigned long dio_remainder;
 947
 948                                ret = get_more_blocks(dio, sdio, map_bh);
 949                                if (ret) {
 950                                        put_page(page);
 951                                        goto out;
 952                                }
 953                                if (!buffer_mapped(map_bh))
 954                                        goto do_holes;
 955
 956                                sdio->blocks_available =
 957                                                map_bh->b_size >> sdio->blkbits;
 958                                sdio->next_block_for_io =
 959                                        map_bh->b_blocknr << sdio->blkfactor;
 960                                if (buffer_new(map_bh))
 961                                        clean_blockdev_aliases(dio, map_bh);
 962
 963                                if (!sdio->blkfactor)
 964                                        goto do_holes;
 965
 966                                blkmask = (1 << sdio->blkfactor) - 1;
 967                                dio_remainder = (sdio->block_in_file & blkmask);
 968
 969                                /*
 970                                 * If we are at the start of IO and that IO
 971                                 * starts partway into a fs-block,
 972                                 * dio_remainder will be non-zero.  If the IO
 973                                 * is a read then we can simply advance the IO
 974                                 * cursor to the first block which is to be
 975                                 * read.  But if the IO is a write and the
 976                                 * block was newly allocated we cannot do that;
 977                                 * the start of the fs block must be zeroed out
 978                                 * on-disk
 979                                 */
 980                                if (!buffer_new(map_bh))
 981                                        sdio->next_block_for_io += dio_remainder;
 982                                sdio->blocks_available -= dio_remainder;
 983                        }
 984do_holes:
 985                        /* Handle holes */
 986                        if (!buffer_mapped(map_bh)) {
 987                                loff_t i_size_aligned;
 988
 989                                /* AKPM: eargh, -ENOTBLK is a hack */
 990                                if (dio->rw & WRITE) {
 991                                        put_page(page);
 992                                        return -ENOTBLK;
 993                                }
 994
 995                                /*
 996                                 * Be sure to account for a partial block as the
 997                                 * last block in the file
 998                                 */
 999                                i_size_aligned = ALIGN(i_size_read(dio->inode),
1000                                                        1 << blkbits);
1001                                if (sdio->block_in_file >=
1002                                                i_size_aligned >> blkbits) {
1003                                        /* We hit eof */
1004                                        put_page(page);
1005                                        goto out;
1006                                }
1007                                zero_user(page, from, 1 << blkbits);
1008                                sdio->block_in_file++;
1009                                from += 1 << blkbits;
1010                                dio->result += 1 << blkbits;
1011                                goto next_block;
1012                        }
1013
1014                        /*
1015                         * If we're performing IO which has an alignment which
1016                         * is finer than the underlying fs, go check to see if
1017                         * we must zero out the start of this block.
1018                         */
1019                        if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
1020                                dio_zero_block(dio, sdio, 0, map_bh);
1021
1022                        /*
1023                         * Work out, in this_chunk_blocks, how much disk we
1024                         * can add to this page
1025                         */
1026                        this_chunk_blocks = sdio->blocks_available;
1027                        u = (to - from) >> blkbits;
1028                        if (this_chunk_blocks > u)
1029                                this_chunk_blocks = u;
1030                        u = sdio->final_block_in_request - sdio->block_in_file;
1031                        if (this_chunk_blocks > u)
1032                                this_chunk_blocks = u;
1033                        this_chunk_bytes = this_chunk_blocks << blkbits;
1034                        BUG_ON(this_chunk_bytes == 0);
1035
1036                        if (this_chunk_blocks == sdio->blocks_available)
1037                                sdio->boundary = buffer_boundary(map_bh);
1038                        ret = submit_page_section(dio, sdio, page,
1039                                                  from,
1040                                                  this_chunk_bytes,
1041                                                  sdio->next_block_for_io,
1042                                                  map_bh);
1043                        if (ret) {
1044                                put_page(page);
1045                                goto out;
1046                        }
1047                        sdio->next_block_for_io += this_chunk_blocks;
1048
1049                        sdio->block_in_file += this_chunk_blocks;
1050                        from += this_chunk_bytes;
1051                        dio->result += this_chunk_bytes;
1052                        sdio->blocks_available -= this_chunk_blocks;
1053next_block:
1054                        BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
1055                        if (sdio->block_in_file == sdio->final_block_in_request)
1056                                break;
1057                }
1058
1059                /* Drop the ref which was taken in get_user_pages() */
1060                put_page(page);
1061        }
1062out:
1063        return ret;
1064}
1065
1066static inline int drop_refcount(struct dio *dio)
1067{
1068        int ret2;
1069        unsigned long flags;
1070
1071        /*
1072         * Sync will always be dropping the final ref and completing the
1073         * operation.  AIO can if it was a broken operation described above or
1074         * in fact if all the bios race to complete before we get here.  In
1075         * that case dio_complete() translates the EIOCBQUEUED into the proper
1076         * return code that the caller will hand to ->complete().
1077         *
1078         * This is managed by the bio_lock instead of being an atomic_t so that
1079         * completion paths can drop their ref and use the remaining count to
1080         * decide to wake the submission path atomically.
1081         */
1082        spin_lock_irqsave(&dio->bio_lock, flags);
1083        ret2 = --dio->refcount;
1084        spin_unlock_irqrestore(&dio->bio_lock, flags);
1085        return ret2;
1086}
1087
1088/*
1089 * This is a library function for use by filesystem drivers.
1090 *
1091 * The locking rules are governed by the flags parameter:
1092 *  - if the flags value contains DIO_LOCKING we use a fancy locking
1093 *    scheme for dumb filesystems.
1094 *    For writes this function is called under i_mutex and returns with
1095 *    i_mutex held, for reads, i_mutex is not held on entry, but it is
1096 *    taken and dropped again before returning.
1097 *  - if the flags value does NOT contain DIO_LOCKING we don't use any
1098 *    internal locking but rather rely on the filesystem to synchronize
1099 *    direct I/O reads/writes versus each other and truncate.
1100 *
1101 * To help with locking against truncate we incremented the i_dio_count
1102 * counter before starting direct I/O, and decrement it once we are done.
1103 * Truncate can wait for it to reach zero to provide exclusion.  It is
1104 * expected that filesystem provide exclusion between new direct I/O
1105 * and truncates.  For DIO_LOCKING filesystems this is done by i_mutex,
1106 * but other filesystems need to take care of this on their own.
1107 *
1108 * NOTE: if you pass "sdio" to anything by pointer make sure that function
1109 * is always inlined. Otherwise gcc is unable to split the structure into
1110 * individual fields and will generate much worse code. This is important
1111 * for the whole file.
1112 */
1113static inline ssize_t
1114do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1115                      struct block_device *bdev, struct iov_iter *iter,
1116                      loff_t offset, get_block_t get_block, dio_iodone_t end_io,
1117                      dio_submit_t submit_io, int flags)
1118{
1119        unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
1120        unsigned blkbits = i_blkbits;
1121        unsigned blocksize_mask = (1 << blkbits) - 1;
1122        ssize_t retval = -EINVAL;
1123        size_t count = iov_iter_count(iter);
1124        loff_t end = offset + count;
1125        struct dio *dio;
1126        struct dio_submit sdio = { 0, };
1127        struct buffer_head map_bh = { 0, };
1128        struct blk_plug plug;
1129        unsigned long align = offset | iov_iter_alignment(iter);
1130
1131        /*
1132         * Avoid references to bdev if not absolutely needed to give
1133         * the early prefetch in the caller enough time.
1134         */
1135
1136        if (align & blocksize_mask) {
1137                if (bdev)
1138                        blkbits = blksize_bits(bdev_logical_block_size(bdev));
1139                blocksize_mask = (1 << blkbits) - 1;
1140                if (align & blocksize_mask)
1141                        goto out;
1142        }
1143
1144        /* watch out for a 0 len io from a tricksy fs */
1145        if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
1146                return 0;
1147
1148        dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
1149        retval = -ENOMEM;
1150        if (!dio)
1151                goto out;
1152        /*
1153         * Believe it or not, zeroing out the page array caused a .5%
1154         * performance regression in a database benchmark.  So, we take
1155         * care to only zero out what's needed.
1156         */
1157        memset(dio, 0, offsetof(struct dio, pages));
1158
1159        dio->flags = flags;
1160        if (dio->flags & DIO_LOCKING) {
1161                if (iov_iter_rw(iter) == READ) {
1162                        struct address_space *mapping =
1163                                        iocb->ki_filp->f_mapping;
1164
1165                        /* will be released by direct_io_worker */
1166                        inode_lock(inode);
1167
1168                        retval = filemap_write_and_wait_range(mapping, offset,
1169                                                              end - 1);
1170                        if (retval) {
1171                                inode_unlock(inode);
1172                                kmem_cache_free(dio_cache, dio);
1173                                goto out;
1174                        }
1175                }
1176        }
1177
1178        /* Once we sampled i_size check for reads beyond EOF */
1179        dio->i_size = i_size_read(inode);
1180        if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1181                if (dio->flags & DIO_LOCKING)
1182                        inode_unlock(inode);
1183                kmem_cache_free(dio_cache, dio);
1184                retval = 0;
1185                goto out;
1186        }
1187
1188        /*
1189         * For file extending writes updating i_size before data writeouts
1190         * complete can expose uninitialized blocks in dumb filesystems.
1191         * In that case we need to wait for I/O completion even if asked
1192         * for an asynchronous write.
1193         */
1194        if (is_sync_kiocb(iocb))
1195                dio->is_async = false;
1196        else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
1197                 iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
1198                dio->is_async = false;
1199        else
1200                dio->is_async = true;
1201
1202        dio->inode = inode;
1203        dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
1204
1205        /*
1206         * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
1207         * so that we can call ->fsync.
1208         */
1209        if (dio->is_async && iov_iter_rw(iter) == WRITE &&
1210            ((iocb->ki_filp->f_flags & O_DSYNC) ||
1211             IS_SYNC(iocb->ki_filp->f_mapping->host))) {
1212                retval = dio_set_defer_completion(dio);
1213                if (retval) {
1214                        /*
1215                         * We grab i_mutex only for reads so we don't have
1216                         * to release it here
1217                         */
1218                        kmem_cache_free(dio_cache, dio);
1219                        goto out;
1220                }
1221        }
1222
1223        /*
1224         * Will be decremented at I/O completion time.
1225         */
1226        if (!(dio->flags & DIO_SKIP_DIO_COUNT))
1227                inode_dio_begin(inode);
1228
1229        retval = 0;
1230        sdio.blkbits = blkbits;
1231        sdio.blkfactor = i_blkbits - blkbits;
1232        sdio.block_in_file = offset >> blkbits;
1233
1234        sdio.get_block = get_block;
1235        dio->end_io = end_io;
1236        sdio.submit_io = submit_io;
1237        sdio.final_block_in_bio = -1;
1238        sdio.next_block_for_io = -1;
1239
1240        dio->iocb = iocb;
1241
1242        spin_lock_init(&dio->bio_lock);
1243        dio->refcount = 1;
1244
1245        dio->should_dirty = (iter->type == ITER_IOVEC);
1246        sdio.iter = iter;
1247        sdio.final_block_in_request =
1248                (offset + iov_iter_count(iter)) >> blkbits;
1249
1250        /*
1251         * In case of non-aligned buffers, we may need 2 more
1252         * pages since we need to zero out first and last block.
1253         */
1254        if (unlikely(sdio.blkfactor))
1255                sdio.pages_in_io = 2;
1256
1257        sdio.pages_in_io += iov_iter_npages(iter, INT_MAX);
1258
1259        blk_start_plug(&plug);
1260
1261        retval = do_direct_IO(dio, &sdio, &map_bh);
1262        if (retval)
1263                dio_cleanup(dio, &sdio);
1264
1265        if (retval == -ENOTBLK) {
1266                /*
1267                 * The remaining part of the request will be
1268                 * be handled by buffered I/O when we return
1269                 */
1270                retval = 0;
1271        }
1272        /*
1273         * There may be some unwritten disk at the end of a part-written
1274         * fs-block-sized block.  Go zero that now.
1275         */
1276        dio_zero_block(dio, &sdio, 1, &map_bh);
1277
1278        if (sdio.cur_page) {
1279                ssize_t ret2;
1280
1281                ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1282                if (retval == 0)
1283                        retval = ret2;
1284                put_page(sdio.cur_page);
1285                sdio.cur_page = NULL;
1286        }
1287        if (sdio.bio)
1288                dio_bio_submit(dio, &sdio);
1289
1290        blk_finish_plug(&plug);
1291
1292        /*
1293         * It is possible that, we return short IO due to end of file.
1294         * In that case, we need to release all the pages we got hold on.
1295         */
1296        dio_cleanup(dio, &sdio);
1297
1298        /*
1299         * All block lookups have been performed. For READ requests
1300         * we can let i_mutex go now that its achieved its purpose
1301         * of protecting us from looking up uninitialized blocks.
1302         */
1303        if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
1304                inode_unlock(dio->inode);
1305
1306        /*
1307         * The only time we want to leave bios in flight is when a successful
1308         * partial aio read or full aio write have been setup.  In that case
1309         * bio completion will call aio_complete.  The only time it's safe to
1310         * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
1311         * This had *better* be the only place that raises -EIOCBQUEUED.
1312         */
1313        BUG_ON(retval == -EIOCBQUEUED);
1314        if (dio->is_async && retval == 0 && dio->result &&
1315            (iov_iter_rw(iter) == READ || dio->result == count))
1316                retval = -EIOCBQUEUED;
1317        else
1318                dio_await_completion(dio);
1319
1320        if (drop_refcount(dio) == 0) {
1321                retval = dio_complete(dio, offset, retval, false);
1322        } else
1323                BUG_ON(retval != -EIOCBQUEUED);
1324
1325out:
1326        return retval;
1327}
1328
1329ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1330                             struct block_device *bdev, struct iov_iter *iter,
1331                             loff_t offset, get_block_t get_block,
1332                             dio_iodone_t end_io, dio_submit_t submit_io,
1333                             int flags)
1334{
1335        /*
1336         * The block device state is needed in the end to finally
1337         * submit everything.  Since it's likely to be cache cold
1338         * prefetch it here as first thing to hide some of the
1339         * latency.
1340         *
1341         * Attempt to prefetch the pieces we likely need later.
1342         */
1343        prefetch(&bdev->bd_disk->part_tbl);
1344        prefetch(bdev->bd_queue);
1345        prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
1346
1347        return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
1348                                     end_io, submit_io, flags);
1349}
1350
1351EXPORT_SYMBOL(__blockdev_direct_IO);
1352
1353static __init int dio_init(void)
1354{
1355        dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
1356        return 0;
1357}
1358module_init(dio_init)
1359