linux/fs/ext4/page-io.c
<<
>>
Prefs
   1/*
   2 * linux/fs/ext4/page-io.c
   3 *
   4 * This contains the new page_io functions for ext4
   5 *
   6 * Written by Theodore Ts'o, 2010.
   7 */
   8
   9#include <linux/fs.h>
  10#include <linux/time.h>
  11#include <linux/jbd2.h>
  12#include <linux/highuid.h>
  13#include <linux/pagemap.h>
  14#include <linux/quotaops.h>
  15#include <linux/string.h>
  16#include <linux/buffer_head.h>
  17#include <linux/writeback.h>
  18#include <linux/pagevec.h>
  19#include <linux/mpage.h>
  20#include <linux/namei.h>
  21#include <linux/aio.h>
  22#include <linux/uio.h>
  23#include <linux/bio.h>
  24#include <linux/workqueue.h>
  25#include <linux/kernel.h>
  26#include <linux/slab.h>
  27#include <linux/mm.h>
  28#include <linux/ratelimit.h>
  29
  30#include "ext4_jbd2.h"
  31#include "xattr.h"
  32#include "acl.h"
  33
  34static struct kmem_cache *io_end_cachep;
  35
  36int __init ext4_init_pageio(void)
  37{
  38        io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
  39        if (io_end_cachep == NULL)
  40                return -ENOMEM;
  41        return 0;
  42}
  43
  44void ext4_exit_pageio(void)
  45{
  46        kmem_cache_destroy(io_end_cachep);
  47}
  48
  49/*
  50 * Print an buffer I/O error compatible with the fs/buffer.c.  This
  51 * provides compatibility with dmesg scrapers that look for a specific
  52 * buffer I/O error message.  We really need a unified error reporting
  53 * structure to userspace ala Digital Unix's uerf system, but it's
  54 * probably not going to happen in my lifetime, due to LKML politics...
  55 */
  56static void buffer_io_error(struct buffer_head *bh)
  57{
  58        char b[BDEVNAME_SIZE];
  59        printk_ratelimited(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
  60                        bdevname(bh->b_bdev, b),
  61                        (unsigned long long)bh->b_blocknr);
  62}
  63
  64static void ext4_finish_bio(struct bio *bio)
  65{
  66        int i;
  67        int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
  68        struct bio_vec *bvec;
  69
  70        bio_for_each_segment_all(bvec, bio, i) {
  71                struct page *page = bvec->bv_page;
  72                struct buffer_head *bh, *head;
  73                unsigned bio_start = bvec->bv_offset;
  74                unsigned bio_end = bio_start + bvec->bv_len;
  75                unsigned under_io = 0;
  76                unsigned long flags;
  77
  78                if (!page)
  79                        continue;
  80
  81                if (error) {
  82                        SetPageError(page);
  83                        set_bit(AS_EIO, &page->mapping->flags);
  84                }
  85                bh = head = page_buffers(page);
  86                /*
  87                 * We check all buffers in the page under BH_Uptodate_Lock
  88                 * to avoid races with other end io clearing async_write flags
  89                 */
  90                local_irq_save(flags);
  91                bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
  92                do {
  93                        if (bh_offset(bh) < bio_start ||
  94                            bh_offset(bh) + bh->b_size > bio_end) {
  95                                if (buffer_async_write(bh))
  96                                        under_io++;
  97                                continue;
  98                        }
  99                        clear_buffer_async_write(bh);
 100                        if (error)
 101                                buffer_io_error(bh);
 102                } while ((bh = bh->b_this_page) != head);
 103                bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
 104                local_irq_restore(flags);
 105                if (!under_io)
 106                        end_page_writeback(page);
 107        }
 108}
 109
 110static void ext4_release_io_end(ext4_io_end_t *io_end)
 111{
 112        struct bio *bio, *next_bio;
 113
 114        BUG_ON(!list_empty(&io_end->list));
 115        BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
 116        WARN_ON(io_end->handle);
 117
 118        if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
 119                wake_up_all(ext4_ioend_wq(io_end->inode));
 120
 121        for (bio = io_end->bio; bio; bio = next_bio) {
 122                next_bio = bio->bi_private;
 123                ext4_finish_bio(bio);
 124                bio_put(bio);
 125        }
 126        kmem_cache_free(io_end_cachep, io_end);
 127}
 128
 129static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
 130{
 131        struct inode *inode = io_end->inode;
 132
 133        io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
 134        /* Wake up anyone waiting on unwritten extent conversion */
 135        if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
 136                wake_up_all(ext4_ioend_wq(inode));
 137}
 138
 139/*
 140 * Check a range of space and convert unwritten extents to written. Note that
 141 * we are protected from truncate touching same part of extent tree by the
 142 * fact that truncate code waits for all DIO to finish (thus exclusion from
 143 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
 144 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
 145 * completed (happens from ext4_free_ioend()).
 146 */
 147static int ext4_end_io(ext4_io_end_t *io)
 148{
 149        struct inode *inode = io->inode;
 150        loff_t offset = io->offset;
 151        ssize_t size = io->size;
 152        handle_t *handle = io->handle;
 153        int ret = 0;
 154
 155        ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
 156                   "list->prev 0x%p\n",
 157                   io, inode->i_ino, io->list.next, io->list.prev);
 158
 159        io->handle = NULL;      /* Following call will use up the handle */
 160        ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
 161        if (ret < 0) {
 162                ext4_msg(inode->i_sb, KERN_EMERG,
 163                         "failed to convert unwritten extents to written "
 164                         "extents -- potential data loss!  "
 165                         "(inode %lu, offset %llu, size %zd, error %d)",
 166                         inode->i_ino, offset, size, ret);
 167        }
 168        ext4_clear_io_unwritten_flag(io);
 169        ext4_release_io_end(io);
 170        return ret;
 171}
 172
 173static void dump_completed_IO(struct inode *inode, struct list_head *head)
 174{
 175#ifdef  EXT4FS_DEBUG
 176        struct list_head *cur, *before, *after;
 177        ext4_io_end_t *io, *io0, *io1;
 178
 179        if (list_empty(head))
 180                return;
 181
 182        ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
 183        list_for_each_entry(io, head, list) {
 184                cur = &io->list;
 185                before = cur->prev;
 186                io0 = container_of(before, ext4_io_end_t, list);
 187                after = cur->next;
 188                io1 = container_of(after, ext4_io_end_t, list);
 189
 190                ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
 191                            io, inode->i_ino, io0, io1);
 192        }
 193#endif
 194}
 195
 196/* Add the io_end to per-inode completed end_io list. */
 197static void ext4_add_complete_io(ext4_io_end_t *io_end)
 198{
 199        struct ext4_inode_info *ei = EXT4_I(io_end->inode);
 200        struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
 201        struct workqueue_struct *wq;
 202        unsigned long flags;
 203
 204        /* Only reserved conversions from writeback should enter here */
 205        WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
 206        WARN_ON(!io_end->handle && sbi->s_journal);
 207        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 208        wq = sbi->rsv_conversion_wq;
 209        if (list_empty(&ei->i_rsv_conversion_list))
 210                queue_work(wq, &ei->i_rsv_conversion_work);
 211        list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
 212        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 213}
 214
 215static int ext4_do_flush_completed_IO(struct inode *inode,
 216                                      struct list_head *head)
 217{
 218        ext4_io_end_t *io;
 219        struct list_head unwritten;
 220        unsigned long flags;
 221        struct ext4_inode_info *ei = EXT4_I(inode);
 222        int err, ret = 0;
 223
 224        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 225        dump_completed_IO(inode, head);
 226        list_replace_init(head, &unwritten);
 227        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 228
 229        while (!list_empty(&unwritten)) {
 230                io = list_entry(unwritten.next, ext4_io_end_t, list);
 231                BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
 232                list_del_init(&io->list);
 233
 234                err = ext4_end_io(io);
 235                if (unlikely(!ret && err))
 236                        ret = err;
 237        }
 238        return ret;
 239}
 240
 241/*
 242 * work on completed IO, to convert unwritten extents to extents
 243 */
 244void ext4_end_io_rsv_work(struct work_struct *work)
 245{
 246        struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
 247                                                  i_rsv_conversion_work);
 248        ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
 249}
 250
 251ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
 252{
 253        ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
 254        if (io) {
 255                atomic_inc(&EXT4_I(inode)->i_ioend_count);
 256                io->inode = inode;
 257                INIT_LIST_HEAD(&io->list);
 258                atomic_set(&io->count, 1);
 259        }
 260        return io;
 261}
 262
 263void ext4_put_io_end_defer(ext4_io_end_t *io_end)
 264{
 265        if (atomic_dec_and_test(&io_end->count)) {
 266                if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
 267                        ext4_release_io_end(io_end);
 268                        return;
 269                }
 270                ext4_add_complete_io(io_end);
 271        }
 272}
 273
 274int ext4_put_io_end(ext4_io_end_t *io_end)
 275{
 276        int err = 0;
 277
 278        if (atomic_dec_and_test(&io_end->count)) {
 279                if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
 280                        err = ext4_convert_unwritten_extents(io_end->handle,
 281                                                io_end->inode, io_end->offset,
 282                                                io_end->size);
 283                        io_end->handle = NULL;
 284                        ext4_clear_io_unwritten_flag(io_end);
 285                }
 286                ext4_release_io_end(io_end);
 287        }
 288        return err;
 289}
 290
 291ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
 292{
 293        atomic_inc(&io_end->count);
 294        return io_end;
 295}
 296
 297/* BIO completion function for page writeback */
 298static void ext4_end_bio(struct bio *bio, int error)
 299{
 300        ext4_io_end_t *io_end = bio->bi_private;
 301        sector_t bi_sector = bio->bi_iter.bi_sector;
 302
 303        BUG_ON(!io_end);
 304        bio->bi_end_io = NULL;
 305        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
 306                error = 0;
 307
 308        if (error) {
 309                struct inode *inode = io_end->inode;
 310
 311                ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
 312                             "(offset %llu size %ld starting block %llu)",
 313                             error, inode->i_ino,
 314                             (unsigned long long) io_end->offset,
 315                             (long) io_end->size,
 316                             (unsigned long long)
 317                             bi_sector >> (inode->i_blkbits - 9));
 318                mapping_set_error(inode->i_mapping, error);
 319        }
 320
 321        if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
 322                /*
 323                 * Link bio into list hanging from io_end. We have to do it
 324                 * atomically as bio completions can be racing against each
 325                 * other.
 326                 */
 327                bio->bi_private = xchg(&io_end->bio, bio);
 328                ext4_put_io_end_defer(io_end);
 329        } else {
 330                /*
 331                 * Drop io_end reference early. Inode can get freed once
 332                 * we finish the bio.
 333                 */
 334                ext4_put_io_end_defer(io_end);
 335                ext4_finish_bio(bio);
 336                bio_put(bio);
 337        }
 338}
 339
 340void ext4_io_submit(struct ext4_io_submit *io)
 341{
 342        struct bio *bio = io->io_bio;
 343
 344        if (bio) {
 345                bio_get(io->io_bio);
 346                submit_bio(io->io_op, io->io_bio);
 347                BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
 348                bio_put(io->io_bio);
 349        }
 350        io->io_bio = NULL;
 351}
 352
 353void ext4_io_submit_init(struct ext4_io_submit *io,
 354                         struct writeback_control *wbc)
 355{
 356        io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
 357        io->io_bio = NULL;
 358        io->io_end = NULL;
 359}
 360
 361static int io_submit_init_bio(struct ext4_io_submit *io,
 362                              struct buffer_head *bh)
 363{
 364        int nvecs = bio_get_nr_vecs(bh->b_bdev);
 365        struct bio *bio;
 366
 367        bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
 368        if (!bio)
 369                return -ENOMEM;
 370        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 371        bio->bi_bdev = bh->b_bdev;
 372        bio->bi_end_io = ext4_end_bio;
 373        bio->bi_private = ext4_get_io_end(io->io_end);
 374        io->io_bio = bio;
 375        io->io_next_block = bh->b_blocknr;
 376        return 0;
 377}
 378
 379static int io_submit_add_bh(struct ext4_io_submit *io,
 380                            struct inode *inode,
 381                            struct buffer_head *bh)
 382{
 383        int ret;
 384
 385        if (io->io_bio && bh->b_blocknr != io->io_next_block) {
 386submit_and_retry:
 387                ext4_io_submit(io);
 388        }
 389        if (io->io_bio == NULL) {
 390                ret = io_submit_init_bio(io, bh);
 391                if (ret)
 392                        return ret;
 393        }
 394        ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
 395        if (ret != bh->b_size)
 396                goto submit_and_retry;
 397        io->io_next_block++;
 398        return 0;
 399}
 400
 401int ext4_bio_write_page(struct ext4_io_submit *io,
 402                        struct page *page,
 403                        int len,
 404                        struct writeback_control *wbc,
 405                        bool keep_towrite)
 406{
 407        struct inode *inode = page->mapping->host;
 408        unsigned block_start, blocksize;
 409        struct buffer_head *bh, *head;
 410        int ret = 0;
 411        int nr_submitted = 0;
 412
 413        blocksize = 1 << inode->i_blkbits;
 414
 415        BUG_ON(!PageLocked(page));
 416        BUG_ON(PageWriteback(page));
 417
 418        if (keep_towrite)
 419                set_page_writeback_keepwrite(page);
 420        else
 421                set_page_writeback(page);
 422        ClearPageError(page);
 423
 424        /*
 425         * Comments copied from block_write_full_page:
 426         *
 427         * The page straddles i_size.  It must be zeroed out on each and every
 428         * writepage invocation because it may be mmapped.  "A file is mapped
 429         * in multiples of the page size.  For a file that is not a multiple of
 430         * the page size, the remaining memory is zeroed when mapped, and
 431         * writes to that region are not written out to the file."
 432         */
 433        if (len < PAGE_CACHE_SIZE)
 434                zero_user_segment(page, len, PAGE_CACHE_SIZE);
 435        /*
 436         * In the first loop we prepare and mark buffers to submit. We have to
 437         * mark all buffers in the page before submitting so that
 438         * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
 439         * on the first buffer finishes and we are still working on submitting
 440         * the second buffer.
 441         */
 442        bh = head = page_buffers(page);
 443        do {
 444                block_start = bh_offset(bh);
 445                if (block_start >= len) {
 446                        clear_buffer_dirty(bh);
 447                        set_buffer_uptodate(bh);
 448                        continue;
 449                }
 450                if (!buffer_dirty(bh) || buffer_delay(bh) ||
 451                    !buffer_mapped(bh) || buffer_unwritten(bh)) {
 452                        /* A hole? We can safely clear the dirty bit */
 453                        if (!buffer_mapped(bh))
 454                                clear_buffer_dirty(bh);
 455                        if (io->io_bio)
 456                                ext4_io_submit(io);
 457                        continue;
 458                }
 459                if (buffer_new(bh)) {
 460                        clear_buffer_new(bh);
 461                        unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
 462                }
 463                set_buffer_async_write(bh);
 464        } while ((bh = bh->b_this_page) != head);
 465
 466        /* Now submit buffers to write */
 467        bh = head = page_buffers(page);
 468        do {
 469                if (!buffer_async_write(bh))
 470                        continue;
 471                ret = io_submit_add_bh(io, inode, bh);
 472                if (ret) {
 473                        /*
 474                         * We only get here on ENOMEM.  Not much else
 475                         * we can do but mark the page as dirty, and
 476                         * better luck next time.
 477                         */
 478                        redirty_page_for_writepage(wbc, page);
 479                        break;
 480                }
 481                nr_submitted++;
 482                clear_buffer_dirty(bh);
 483        } while ((bh = bh->b_this_page) != head);
 484
 485        /* Error stopped previous loop? Clean up buffers... */
 486        if (ret) {
 487                do {
 488                        clear_buffer_async_write(bh);
 489                        bh = bh->b_this_page;
 490                } while (bh != head);
 491        }
 492        unlock_page(page);
 493        /* Nothing submitted - we have to end page writeback */
 494        if (!nr_submitted)
 495                end_page_writeback(page);
 496        return ret;
 497}
 498