linux/fs/ext4/page-io.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/fs/ext4/page-io.c
   4 *
   5 * This contains the new page_io functions for ext4
   6 *
   7 * Written by Theodore Ts'o, 2010.
   8 */
   9
  10#include <linux/fs.h>
  11#include <linux/time.h>
  12#include <linux/highuid.h>
  13#include <linux/pagemap.h>
  14#include <linux/quotaops.h>
  15#include <linux/string.h>
  16#include <linux/buffer_head.h>
  17#include <linux/writeback.h>
  18#include <linux/pagevec.h>
  19#include <linux/mpage.h>
  20#include <linux/namei.h>
  21#include <linux/uio.h>
  22#include <linux/bio.h>
  23#include <linux/workqueue.h>
  24#include <linux/kernel.h>
  25#include <linux/slab.h>
  26#include <linux/mm.h>
  27#include <linux/backing-dev.h>
  28
  29#include "ext4_jbd2.h"
  30#include "xattr.h"
  31#include "acl.h"
  32
  33static struct kmem_cache *io_end_cachep;
  34
  35int __init ext4_init_pageio(void)
  36{
  37        io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
  38        if (io_end_cachep == NULL)
  39                return -ENOMEM;
  40        return 0;
  41}
  42
  43void ext4_exit_pageio(void)
  44{
  45        kmem_cache_destroy(io_end_cachep);
  46}
  47
  48/*
  49 * Print an buffer I/O error compatible with the fs/buffer.c.  This
  50 * provides compatibility with dmesg scrapers that look for a specific
  51 * buffer I/O error message.  We really need a unified error reporting
  52 * structure to userspace ala Digital Unix's uerf system, but it's
  53 * probably not going to happen in my lifetime, due to LKML politics...
  54 */
  55static void buffer_io_error(struct buffer_head *bh)
  56{
  57        printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
  58                       bh->b_bdev,
  59                        (unsigned long long)bh->b_blocknr);
  60}
  61
  62static void ext4_finish_bio(struct bio *bio)
  63{
  64        int i;
  65        struct bio_vec *bvec;
  66
  67        bio_for_each_segment_all(bvec, bio, i) {
  68                struct page *page = bvec->bv_page;
  69#ifdef CONFIG_EXT4_FS_ENCRYPTION
  70                struct page *data_page = NULL;
  71#endif
  72                struct buffer_head *bh, *head;
  73                unsigned bio_start = bvec->bv_offset;
  74                unsigned bio_end = bio_start + bvec->bv_len;
  75                unsigned under_io = 0;
  76                unsigned long flags;
  77
  78                if (!page)
  79                        continue;
  80
  81#ifdef CONFIG_EXT4_FS_ENCRYPTION
  82                if (!page->mapping) {
  83                        /* The bounce data pages are unmapped. */
  84                        data_page = page;
  85                        fscrypt_pullback_bio_page(&page, false);
  86                }
  87#endif
  88
  89                if (bio->bi_status) {
  90                        SetPageError(page);
  91                        mapping_set_error(page->mapping, -EIO);
  92                }
  93                bh = head = page_buffers(page);
  94                /*
  95                 * We check all buffers in the page under BH_Uptodate_Lock
  96                 * to avoid races with other end io clearing async_write flags
  97                 */
  98                local_irq_save(flags);
  99                bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
 100                do {
 101                        if (bh_offset(bh) < bio_start ||
 102                            bh_offset(bh) + bh->b_size > bio_end) {
 103                                if (buffer_async_write(bh))
 104                                        under_io++;
 105                                continue;
 106                        }
 107                        clear_buffer_async_write(bh);
 108                        if (bio->bi_status)
 109                                buffer_io_error(bh);
 110                } while ((bh = bh->b_this_page) != head);
 111                bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
 112                local_irq_restore(flags);
 113                if (!under_io) {
 114#ifdef CONFIG_EXT4_FS_ENCRYPTION
 115                        if (data_page)
 116                                fscrypt_restore_control_page(data_page);
 117#endif
 118                        end_page_writeback(page);
 119                }
 120        }
 121}
 122
 123static void ext4_release_io_end(ext4_io_end_t *io_end)
 124{
 125        struct bio *bio, *next_bio;
 126
 127        BUG_ON(!list_empty(&io_end->list));
 128        BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
 129        WARN_ON(io_end->handle);
 130
 131        for (bio = io_end->bio; bio; bio = next_bio) {
 132                next_bio = bio->bi_private;
 133                ext4_finish_bio(bio);
 134                bio_put(bio);
 135        }
 136        kmem_cache_free(io_end_cachep, io_end);
 137}
 138
 139/*
 140 * Check a range of space and convert unwritten extents to written. Note that
 141 * we are protected from truncate touching same part of extent tree by the
 142 * fact that truncate code waits for all DIO to finish (thus exclusion from
 143 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
 144 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
 145 * completed (happens from ext4_free_ioend()).
 146 */
 147static int ext4_end_io(ext4_io_end_t *io)
 148{
 149        struct inode *inode = io->inode;
 150        loff_t offset = io->offset;
 151        ssize_t size = io->size;
 152        handle_t *handle = io->handle;
 153        int ret = 0;
 154
 155        ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
 156                   "list->prev 0x%p\n",
 157                   io, inode->i_ino, io->list.next, io->list.prev);
 158
 159        io->handle = NULL;      /* Following call will use up the handle */
 160        ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
 161        if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
 162                ext4_msg(inode->i_sb, KERN_EMERG,
 163                         "failed to convert unwritten extents to written "
 164                         "extents -- potential data loss!  "
 165                         "(inode %lu, offset %llu, size %zd, error %d)",
 166                         inode->i_ino, offset, size, ret);
 167        }
 168        ext4_clear_io_unwritten_flag(io);
 169        ext4_release_io_end(io);
 170        return ret;
 171}
 172
 173static void dump_completed_IO(struct inode *inode, struct list_head *head)
 174{
 175#ifdef  EXT4FS_DEBUG
 176        struct list_head *cur, *before, *after;
 177        ext4_io_end_t *io, *io0, *io1;
 178
 179        if (list_empty(head))
 180                return;
 181
 182        ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
 183        list_for_each_entry(io, head, list) {
 184                cur = &io->list;
 185                before = cur->prev;
 186                io0 = container_of(before, ext4_io_end_t, list);
 187                after = cur->next;
 188                io1 = container_of(after, ext4_io_end_t, list);
 189
 190                ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
 191                            io, inode->i_ino, io0, io1);
 192        }
 193#endif
 194}
 195
 196/* Add the io_end to per-inode completed end_io list. */
 197static void ext4_add_complete_io(ext4_io_end_t *io_end)
 198{
 199        struct ext4_inode_info *ei = EXT4_I(io_end->inode);
 200        struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
 201        struct workqueue_struct *wq;
 202        unsigned long flags;
 203
 204        /* Only reserved conversions from writeback should enter here */
 205        WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
 206        WARN_ON(!io_end->handle && sbi->s_journal);
 207        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 208        wq = sbi->rsv_conversion_wq;
 209        if (list_empty(&ei->i_rsv_conversion_list))
 210                queue_work(wq, &ei->i_rsv_conversion_work);
 211        list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
 212        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 213}
 214
 215static int ext4_do_flush_completed_IO(struct inode *inode,
 216                                      struct list_head *head)
 217{
 218        ext4_io_end_t *io;
 219        struct list_head unwritten;
 220        unsigned long flags;
 221        struct ext4_inode_info *ei = EXT4_I(inode);
 222        int err, ret = 0;
 223
 224        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 225        dump_completed_IO(inode, head);
 226        list_replace_init(head, &unwritten);
 227        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 228
 229        while (!list_empty(&unwritten)) {
 230                io = list_entry(unwritten.next, ext4_io_end_t, list);
 231                BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
 232                list_del_init(&io->list);
 233
 234                err = ext4_end_io(io);
 235                if (unlikely(!ret && err))
 236                        ret = err;
 237        }
 238        return ret;
 239}
 240
 241/*
 242 * work on completed IO, to convert unwritten extents to extents
 243 */
 244void ext4_end_io_rsv_work(struct work_struct *work)
 245{
 246        struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
 247                                                  i_rsv_conversion_work);
 248        ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
 249}
 250
 251ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
 252{
 253        ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
 254        if (io) {
 255                io->inode = inode;
 256                INIT_LIST_HEAD(&io->list);
 257                atomic_set(&io->count, 1);
 258        }
 259        return io;
 260}
 261
 262void ext4_put_io_end_defer(ext4_io_end_t *io_end)
 263{
 264        if (atomic_dec_and_test(&io_end->count)) {
 265                if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
 266                        ext4_release_io_end(io_end);
 267                        return;
 268                }
 269                ext4_add_complete_io(io_end);
 270        }
 271}
 272
 273int ext4_put_io_end(ext4_io_end_t *io_end)
 274{
 275        int err = 0;
 276
 277        if (atomic_dec_and_test(&io_end->count)) {
 278                if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
 279                        err = ext4_convert_unwritten_extents(io_end->handle,
 280                                                io_end->inode, io_end->offset,
 281                                                io_end->size);
 282                        io_end->handle = NULL;
 283                        ext4_clear_io_unwritten_flag(io_end);
 284                }
 285                ext4_release_io_end(io_end);
 286        }
 287        return err;
 288}
 289
 290ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
 291{
 292        atomic_inc(&io_end->count);
 293        return io_end;
 294}
 295
 296/* BIO completion function for page writeback */
 297static void ext4_end_bio(struct bio *bio)
 298{
 299        ext4_io_end_t *io_end = bio->bi_private;
 300        sector_t bi_sector = bio->bi_iter.bi_sector;
 301        char b[BDEVNAME_SIZE];
 302
 303        if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
 304                      bio_devname(bio, b),
 305                      (long long) bio->bi_iter.bi_sector,
 306                      (unsigned) bio_sectors(bio),
 307                      bio->bi_status)) {
 308                ext4_finish_bio(bio);
 309                bio_put(bio);
 310                return;
 311        }
 312        bio->bi_end_io = NULL;
 313
 314        if (bio->bi_status) {
 315                struct inode *inode = io_end->inode;
 316
 317                ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
 318                             "(offset %llu size %ld starting block %llu)",
 319                             bio->bi_status, inode->i_ino,
 320                             (unsigned long long) io_end->offset,
 321                             (long) io_end->size,
 322                             (unsigned long long)
 323                             bi_sector >> (inode->i_blkbits - 9));
 324                mapping_set_error(inode->i_mapping,
 325                                blk_status_to_errno(bio->bi_status));
 326        }
 327
 328        if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
 329                /*
 330                 * Link bio into list hanging from io_end. We have to do it
 331                 * atomically as bio completions can be racing against each
 332                 * other.
 333                 */
 334                bio->bi_private = xchg(&io_end->bio, bio);
 335                ext4_put_io_end_defer(io_end);
 336        } else {
 337                /*
 338                 * Drop io_end reference early. Inode can get freed once
 339                 * we finish the bio.
 340                 */
 341                ext4_put_io_end_defer(io_end);
 342                ext4_finish_bio(bio);
 343                bio_put(bio);
 344        }
 345}
 346
 347void ext4_io_submit(struct ext4_io_submit *io)
 348{
 349        struct bio *bio = io->io_bio;
 350
 351        if (bio) {
 352                int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
 353                                  REQ_SYNC : 0;
 354                io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
 355                bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
 356                submit_bio(io->io_bio);
 357        }
 358        io->io_bio = NULL;
 359}
 360
 361void ext4_io_submit_init(struct ext4_io_submit *io,
 362                         struct writeback_control *wbc)
 363{
 364        io->io_wbc = wbc;
 365        io->io_bio = NULL;
 366        io->io_end = NULL;
 367}
 368
 369static int io_submit_init_bio(struct ext4_io_submit *io,
 370                              struct buffer_head *bh)
 371{
 372        struct bio *bio;
 373
 374        bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
 375        if (!bio)
 376                return -ENOMEM;
 377        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 378        bio_set_dev(bio, bh->b_bdev);
 379        bio->bi_end_io = ext4_end_bio;
 380        bio->bi_private = ext4_get_io_end(io->io_end);
 381        io->io_bio = bio;
 382        io->io_next_block = bh->b_blocknr;
 383        wbc_init_bio(io->io_wbc, bio);
 384        return 0;
 385}
 386
 387static int io_submit_add_bh(struct ext4_io_submit *io,
 388                            struct inode *inode,
 389                            struct page *page,
 390                            struct buffer_head *bh)
 391{
 392        int ret;
 393
 394        if (io->io_bio && bh->b_blocknr != io->io_next_block) {
 395submit_and_retry:
 396                ext4_io_submit(io);
 397        }
 398        if (io->io_bio == NULL) {
 399                ret = io_submit_init_bio(io, bh);
 400                if (ret)
 401                        return ret;
 402                io->io_bio->bi_write_hint = inode->i_write_hint;
 403        }
 404        ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
 405        if (ret != bh->b_size)
 406                goto submit_and_retry;
 407        wbc_account_io(io->io_wbc, page, bh->b_size);
 408        io->io_next_block++;
 409        return 0;
 410}
 411
 412int ext4_bio_write_page(struct ext4_io_submit *io,
 413                        struct page *page,
 414                        int len,
 415                        struct writeback_control *wbc,
 416                        bool keep_towrite)
 417{
 418        struct page *data_page = NULL;
 419        struct inode *inode = page->mapping->host;
 420        unsigned block_start;
 421        struct buffer_head *bh, *head;
 422        int ret = 0;
 423        int nr_submitted = 0;
 424        int nr_to_submit = 0;
 425
 426        BUG_ON(!PageLocked(page));
 427        BUG_ON(PageWriteback(page));
 428
 429        if (keep_towrite)
 430                set_page_writeback_keepwrite(page);
 431        else
 432                set_page_writeback(page);
 433        ClearPageError(page);
 434
 435        /*
 436         * Comments copied from block_write_full_page:
 437         *
 438         * The page straddles i_size.  It must be zeroed out on each and every
 439         * writepage invocation because it may be mmapped.  "A file is mapped
 440         * in multiples of the page size.  For a file that is not a multiple of
 441         * the page size, the remaining memory is zeroed when mapped, and
 442         * writes to that region are not written out to the file."
 443         */
 444        if (len < PAGE_SIZE)
 445                zero_user_segment(page, len, PAGE_SIZE);
 446        /*
 447         * In the first loop we prepare and mark buffers to submit. We have to
 448         * mark all buffers in the page before submitting so that
 449         * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
 450         * on the first buffer finishes and we are still working on submitting
 451         * the second buffer.
 452         */
 453        bh = head = page_buffers(page);
 454        do {
 455                block_start = bh_offset(bh);
 456                if (block_start >= len) {
 457                        clear_buffer_dirty(bh);
 458                        set_buffer_uptodate(bh);
 459                        continue;
 460                }
 461                if (!buffer_dirty(bh) || buffer_delay(bh) ||
 462                    !buffer_mapped(bh) || buffer_unwritten(bh)) {
 463                        /* A hole? We can safely clear the dirty bit */
 464                        if (!buffer_mapped(bh))
 465                                clear_buffer_dirty(bh);
 466                        if (io->io_bio)
 467                                ext4_io_submit(io);
 468                        continue;
 469                }
 470                if (buffer_new(bh)) {
 471                        clear_buffer_new(bh);
 472                        clean_bdev_bh_alias(bh);
 473                }
 474                set_buffer_async_write(bh);
 475                nr_to_submit++;
 476        } while ((bh = bh->b_this_page) != head);
 477
 478        bh = head = page_buffers(page);
 479
 480        if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
 481            nr_to_submit) {
 482                gfp_t gfp_flags = GFP_NOFS;
 483
 484        retry_encrypt:
 485                data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
 486                                                page->index, gfp_flags);
 487                if (IS_ERR(data_page)) {
 488                        ret = PTR_ERR(data_page);
 489                        if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
 490                                if (io->io_bio) {
 491                                        ext4_io_submit(io);
 492                                        congestion_wait(BLK_RW_ASYNC, HZ/50);
 493                                }
 494                                gfp_flags |= __GFP_NOFAIL;
 495                                goto retry_encrypt;
 496                        }
 497                        data_page = NULL;
 498                        goto out;
 499                }
 500        }
 501
 502        /* Now submit buffers to write */
 503        do {
 504                if (!buffer_async_write(bh))
 505                        continue;
 506                ret = io_submit_add_bh(io, inode,
 507                                       data_page ? data_page : page, bh);
 508                if (ret) {
 509                        /*
 510                         * We only get here on ENOMEM.  Not much else
 511                         * we can do but mark the page as dirty, and
 512                         * better luck next time.
 513                         */
 514                        break;
 515                }
 516                nr_submitted++;
 517                clear_buffer_dirty(bh);
 518        } while ((bh = bh->b_this_page) != head);
 519
 520        /* Error stopped previous loop? Clean up buffers... */
 521        if (ret) {
 522        out:
 523                if (data_page)
 524                        fscrypt_restore_control_page(data_page);
 525                printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
 526                redirty_page_for_writepage(wbc, page);
 527                do {
 528                        clear_buffer_async_write(bh);
 529                        bh = bh->b_this_page;
 530                } while (bh != head);
 531        }
 532        unlock_page(page);
 533        /* Nothing submitted - we have to end page writeback */
 534        if (!nr_submitted)
 535                end_page_writeback(page);
 536        return ret;
 537}
 538