linux/fs/ext4/page-io.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/fs/ext4/page-io.c
   4 *
   5 * This contains the new page_io functions for ext4
   6 *
   7 * Written by Theodore Ts'o, 2010.
   8 */
   9
  10#include <linux/fs.h>
  11#include <linux/time.h>
  12#include <linux/highuid.h>
  13#include <linux/pagemap.h>
  14#include <linux/quotaops.h>
  15#include <linux/string.h>
  16#include <linux/buffer_head.h>
  17#include <linux/writeback.h>
  18#include <linux/pagevec.h>
  19#include <linux/mpage.h>
  20#include <linux/namei.h>
  21#include <linux/uio.h>
  22#include <linux/bio.h>
  23#include <linux/workqueue.h>
  24#include <linux/kernel.h>
  25#include <linux/slab.h>
  26#include <linux/mm.h>
  27#include <linux/backing-dev.h>
  28
  29#include "ext4_jbd2.h"
  30#include "xattr.h"
  31#include "acl.h"
  32
  33static struct kmem_cache *io_end_cachep;
  34static struct kmem_cache *io_end_vec_cachep;
  35
  36int __init ext4_init_pageio(void)
  37{
  38        io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
  39        if (io_end_cachep == NULL)
  40                return -ENOMEM;
  41
  42        io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
  43        if (io_end_vec_cachep == NULL) {
  44                kmem_cache_destroy(io_end_cachep);
  45                return -ENOMEM;
  46        }
  47        return 0;
  48}
  49
  50void ext4_exit_pageio(void)
  51{
  52        kmem_cache_destroy(io_end_cachep);
  53        kmem_cache_destroy(io_end_vec_cachep);
  54}
  55
  56struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
  57{
  58        struct ext4_io_end_vec *io_end_vec;
  59
  60        io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
  61        if (!io_end_vec)
  62                return ERR_PTR(-ENOMEM);
  63        INIT_LIST_HEAD(&io_end_vec->list);
  64        list_add_tail(&io_end_vec->list, &io_end->list_vec);
  65        return io_end_vec;
  66}
  67
  68static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
  69{
  70        struct ext4_io_end_vec *io_end_vec, *tmp;
  71
  72        if (list_empty(&io_end->list_vec))
  73                return;
  74        list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
  75                list_del(&io_end_vec->list);
  76                kmem_cache_free(io_end_vec_cachep, io_end_vec);
  77        }
  78}
  79
  80struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
  81{
  82        BUG_ON(list_empty(&io_end->list_vec));
  83        return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
  84}
  85
  86/*
  87 * Print an buffer I/O error compatible with the fs/buffer.c.  This
  88 * provides compatibility with dmesg scrapers that look for a specific
  89 * buffer I/O error message.  We really need a unified error reporting
  90 * structure to userspace ala Digital Unix's uerf system, but it's
  91 * probably not going to happen in my lifetime, due to LKML politics...
  92 */
  93static void buffer_io_error(struct buffer_head *bh)
  94{
  95        printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
  96                       bh->b_bdev,
  97                        (unsigned long long)bh->b_blocknr);
  98}
  99
 100static void ext4_finish_bio(struct bio *bio)
 101{
 102        struct bio_vec *bvec;
 103        struct bvec_iter_all iter_all;
 104
 105        bio_for_each_segment_all(bvec, bio, iter_all) {
 106                struct page *page = bvec->bv_page;
 107                struct page *bounce_page = NULL;
 108                struct buffer_head *bh, *head;
 109                unsigned bio_start = bvec->bv_offset;
 110                unsigned bio_end = bio_start + bvec->bv_len;
 111                unsigned under_io = 0;
 112                unsigned long flags;
 113
 114                if (fscrypt_is_bounce_page(page)) {
 115                        bounce_page = page;
 116                        page = fscrypt_pagecache_page(bounce_page);
 117                }
 118
 119                if (bio->bi_status) {
 120                        SetPageError(page);
 121                        mapping_set_error(page->mapping, -EIO);
 122                }
 123                bh = head = page_buffers(page);
 124                /*
 125                 * We check all buffers in the page under b_uptodate_lock
 126                 * to avoid races with other end io clearing async_write flags
 127                 */
 128                spin_lock_irqsave(&head->b_uptodate_lock, flags);
 129                do {
 130                        if (bh_offset(bh) < bio_start ||
 131                            bh_offset(bh) + bh->b_size > bio_end) {
 132                                if (buffer_async_write(bh))
 133                                        under_io++;
 134                                continue;
 135                        }
 136                        clear_buffer_async_write(bh);
 137                        if (bio->bi_status)
 138                                buffer_io_error(bh);
 139                } while ((bh = bh->b_this_page) != head);
 140                spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
 141                if (!under_io) {
 142                        fscrypt_free_bounce_page(bounce_page);
 143                        end_page_writeback(page);
 144                }
 145        }
 146}
 147
 148static void ext4_release_io_end(ext4_io_end_t *io_end)
 149{
 150        struct bio *bio, *next_bio;
 151
 152        BUG_ON(!list_empty(&io_end->list));
 153        BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
 154        WARN_ON(io_end->handle);
 155
 156        for (bio = io_end->bio; bio; bio = next_bio) {
 157                next_bio = bio->bi_private;
 158                ext4_finish_bio(bio);
 159                bio_put(bio);
 160        }
 161        ext4_free_io_end_vec(io_end);
 162        kmem_cache_free(io_end_cachep, io_end);
 163}
 164
 165/*
 166 * Check a range of space and convert unwritten extents to written. Note that
 167 * we are protected from truncate touching same part of extent tree by the
 168 * fact that truncate code waits for all DIO to finish (thus exclusion from
 169 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
 170 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
 171 * completed (happens from ext4_free_ioend()).
 172 */
 173static int ext4_end_io_end(ext4_io_end_t *io_end)
 174{
 175        struct inode *inode = io_end->inode;
 176        handle_t *handle = io_end->handle;
 177        int ret = 0;
 178
 179        ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
 180                   "list->prev 0x%p\n",
 181                   io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
 182
 183        io_end->handle = NULL;  /* Following call will use up the handle */
 184        ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
 185        if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
 186                ext4_msg(inode->i_sb, KERN_EMERG,
 187                         "failed to convert unwritten extents to written "
 188                         "extents -- potential data loss!  "
 189                         "(inode %lu, error %d)", inode->i_ino, ret);
 190        }
 191        ext4_clear_io_unwritten_flag(io_end);
 192        ext4_release_io_end(io_end);
 193        return ret;
 194}
 195
 196static void dump_completed_IO(struct inode *inode, struct list_head *head)
 197{
 198#ifdef  EXT4FS_DEBUG
 199        struct list_head *cur, *before, *after;
 200        ext4_io_end_t *io_end, *io_end0, *io_end1;
 201
 202        if (list_empty(head))
 203                return;
 204
 205        ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
 206        list_for_each_entry(io_end, head, list) {
 207                cur = &io_end->list;
 208                before = cur->prev;
 209                io_end0 = container_of(before, ext4_io_end_t, list);
 210                after = cur->next;
 211                io_end1 = container_of(after, ext4_io_end_t, list);
 212
 213                ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
 214                            io_end, inode->i_ino, io_end0, io_end1);
 215        }
 216#endif
 217}
 218
 219/* Add the io_end to per-inode completed end_io list. */
 220static void ext4_add_complete_io(ext4_io_end_t *io_end)
 221{
 222        struct ext4_inode_info *ei = EXT4_I(io_end->inode);
 223        struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
 224        struct workqueue_struct *wq;
 225        unsigned long flags;
 226
 227        /* Only reserved conversions from writeback should enter here */
 228        WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
 229        WARN_ON(!io_end->handle && sbi->s_journal);
 230        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 231        wq = sbi->rsv_conversion_wq;
 232        if (list_empty(&ei->i_rsv_conversion_list))
 233                queue_work(wq, &ei->i_rsv_conversion_work);
 234        list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
 235        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 236}
 237
 238static int ext4_do_flush_completed_IO(struct inode *inode,
 239                                      struct list_head *head)
 240{
 241        ext4_io_end_t *io_end;
 242        struct list_head unwritten;
 243        unsigned long flags;
 244        struct ext4_inode_info *ei = EXT4_I(inode);
 245        int err, ret = 0;
 246
 247        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 248        dump_completed_IO(inode, head);
 249        list_replace_init(head, &unwritten);
 250        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 251
 252        while (!list_empty(&unwritten)) {
 253                io_end = list_entry(unwritten.next, ext4_io_end_t, list);
 254                BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
 255                list_del_init(&io_end->list);
 256
 257                err = ext4_end_io_end(io_end);
 258                if (unlikely(!ret && err))
 259                        ret = err;
 260        }
 261        return ret;
 262}
 263
 264/*
 265 * work on completed IO, to convert unwritten extents to extents
 266 */
 267void ext4_end_io_rsv_work(struct work_struct *work)
 268{
 269        struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
 270                                                  i_rsv_conversion_work);
 271        ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
 272}
 273
 274ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
 275{
 276        ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
 277
 278        if (io_end) {
 279                io_end->inode = inode;
 280                INIT_LIST_HEAD(&io_end->list);
 281                INIT_LIST_HEAD(&io_end->list_vec);
 282                atomic_set(&io_end->count, 1);
 283        }
 284        return io_end;
 285}
 286
 287void ext4_put_io_end_defer(ext4_io_end_t *io_end)
 288{
 289        if (atomic_dec_and_test(&io_end->count)) {
 290                if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
 291                                list_empty(&io_end->list_vec)) {
 292                        ext4_release_io_end(io_end);
 293                        return;
 294                }
 295                ext4_add_complete_io(io_end);
 296        }
 297}
 298
 299int ext4_put_io_end(ext4_io_end_t *io_end)
 300{
 301        int err = 0;
 302
 303        if (atomic_dec_and_test(&io_end->count)) {
 304                if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
 305                        err = ext4_convert_unwritten_io_end_vec(io_end->handle,
 306                                                                io_end);
 307                        io_end->handle = NULL;
 308                        ext4_clear_io_unwritten_flag(io_end);
 309                }
 310                ext4_release_io_end(io_end);
 311        }
 312        return err;
 313}
 314
 315ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
 316{
 317        atomic_inc(&io_end->count);
 318        return io_end;
 319}
 320
 321/* BIO completion function for page writeback */
 322static void ext4_end_bio(struct bio *bio)
 323{
 324        ext4_io_end_t *io_end = bio->bi_private;
 325        sector_t bi_sector = bio->bi_iter.bi_sector;
 326        char b[BDEVNAME_SIZE];
 327
 328        if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
 329                      bio_devname(bio, b),
 330                      (long long) bio->bi_iter.bi_sector,
 331                      (unsigned) bio_sectors(bio),
 332                      bio->bi_status)) {
 333                ext4_finish_bio(bio);
 334                bio_put(bio);
 335                return;
 336        }
 337        bio->bi_end_io = NULL;
 338
 339        if (bio->bi_status) {
 340                struct inode *inode = io_end->inode;
 341
 342                ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
 343                             "starting block %llu)",
 344                             bio->bi_status, inode->i_ino,
 345                             (unsigned long long)
 346                             bi_sector >> (inode->i_blkbits - 9));
 347                mapping_set_error(inode->i_mapping,
 348                                blk_status_to_errno(bio->bi_status));
 349        }
 350
 351        if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
 352                /*
 353                 * Link bio into list hanging from io_end. We have to do it
 354                 * atomically as bio completions can be racing against each
 355                 * other.
 356                 */
 357                bio->bi_private = xchg(&io_end->bio, bio);
 358                ext4_put_io_end_defer(io_end);
 359        } else {
 360                /*
 361                 * Drop io_end reference early. Inode can get freed once
 362                 * we finish the bio.
 363                 */
 364                ext4_put_io_end_defer(io_end);
 365                ext4_finish_bio(bio);
 366                bio_put(bio);
 367        }
 368}
 369
 370void ext4_io_submit(struct ext4_io_submit *io)
 371{
 372        struct bio *bio = io->io_bio;
 373
 374        if (bio) {
 375                int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
 376                                  REQ_SYNC : 0;
 377                io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
 378                bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
 379                submit_bio(io->io_bio);
 380        }
 381        io->io_bio = NULL;
 382}
 383
 384void ext4_io_submit_init(struct ext4_io_submit *io,
 385                         struct writeback_control *wbc)
 386{
 387        io->io_wbc = wbc;
 388        io->io_bio = NULL;
 389        io->io_end = NULL;
 390}
 391
 392static void io_submit_init_bio(struct ext4_io_submit *io,
 393                               struct buffer_head *bh)
 394{
 395        struct bio *bio;
 396
 397        /*
 398         * bio_alloc will _always_ be able to allocate a bio if
 399         * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
 400         */
 401        bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
 402        fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
 403        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 404        bio_set_dev(bio, bh->b_bdev);
 405        bio->bi_end_io = ext4_end_bio;
 406        bio->bi_private = ext4_get_io_end(io->io_end);
 407        io->io_bio = bio;
 408        io->io_next_block = bh->b_blocknr;
 409        wbc_init_bio(io->io_wbc, bio);
 410}
 411
 412static void io_submit_add_bh(struct ext4_io_submit *io,
 413                             struct inode *inode,
 414                             struct page *page,
 415                             struct buffer_head *bh)
 416{
 417        int ret;
 418
 419        if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
 420                           !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
 421submit_and_retry:
 422                ext4_io_submit(io);
 423        }
 424        if (io->io_bio == NULL) {
 425                io_submit_init_bio(io, bh);
 426                io->io_bio->bi_write_hint = inode->i_write_hint;
 427        }
 428        ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
 429        if (ret != bh->b_size)
 430                goto submit_and_retry;
 431        wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
 432        io->io_next_block++;
 433}
 434
 435int ext4_bio_write_page(struct ext4_io_submit *io,
 436                        struct page *page,
 437                        int len,
 438                        bool keep_towrite)
 439{
 440        struct page *bounce_page = NULL;
 441        struct inode *inode = page->mapping->host;
 442        unsigned block_start;
 443        struct buffer_head *bh, *head;
 444        int ret = 0;
 445        int nr_submitted = 0;
 446        int nr_to_submit = 0;
 447        struct writeback_control *wbc = io->io_wbc;
 448
 449        BUG_ON(!PageLocked(page));
 450        BUG_ON(PageWriteback(page));
 451
 452        if (keep_towrite)
 453                set_page_writeback_keepwrite(page);
 454        else
 455                set_page_writeback(page);
 456        ClearPageError(page);
 457
 458        /*
 459         * Comments copied from block_write_full_page:
 460         *
 461         * The page straddles i_size.  It must be zeroed out on each and every
 462         * writepage invocation because it may be mmapped.  "A file is mapped
 463         * in multiples of the page size.  For a file that is not a multiple of
 464         * the page size, the remaining memory is zeroed when mapped, and
 465         * writes to that region are not written out to the file."
 466         */
 467        if (len < PAGE_SIZE)
 468                zero_user_segment(page, len, PAGE_SIZE);
 469        /*
 470         * In the first loop we prepare and mark buffers to submit. We have to
 471         * mark all buffers in the page before submitting so that
 472         * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
 473         * on the first buffer finishes and we are still working on submitting
 474         * the second buffer.
 475         */
 476        bh = head = page_buffers(page);
 477        do {
 478                block_start = bh_offset(bh);
 479                if (block_start >= len) {
 480                        clear_buffer_dirty(bh);
 481                        set_buffer_uptodate(bh);
 482                        continue;
 483                }
 484                if (!buffer_dirty(bh) || buffer_delay(bh) ||
 485                    !buffer_mapped(bh) || buffer_unwritten(bh)) {
 486                        /* A hole? We can safely clear the dirty bit */
 487                        if (!buffer_mapped(bh))
 488                                clear_buffer_dirty(bh);
 489                        if (io->io_bio)
 490                                ext4_io_submit(io);
 491                        continue;
 492                }
 493                if (buffer_new(bh))
 494                        clear_buffer_new(bh);
 495                set_buffer_async_write(bh);
 496                nr_to_submit++;
 497        } while ((bh = bh->b_this_page) != head);
 498
 499        bh = head = page_buffers(page);
 500
 501        /*
 502         * If any blocks are being written to an encrypted file, encrypt them
 503         * into a bounce page.  For simplicity, just encrypt until the last
 504         * block which might be needed.  This may cause some unneeded blocks
 505         * (e.g. holes) to be unnecessarily encrypted, but this is rare and
 506         * can't happen in the common case of blocksize == PAGE_SIZE.
 507         */
 508        if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
 509                gfp_t gfp_flags = GFP_NOFS;
 510                unsigned int enc_bytes = round_up(len, i_blocksize(inode));
 511
 512                /*
 513                 * Since bounce page allocation uses a mempool, we can only use
 514                 * a waiting mask (i.e. request guaranteed allocation) on the
 515                 * first page of the bio.  Otherwise it can deadlock.
 516                 */
 517                if (io->io_bio)
 518                        gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
 519        retry_encrypt:
 520                bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
 521                                                               0, gfp_flags);
 522                if (IS_ERR(bounce_page)) {
 523                        ret = PTR_ERR(bounce_page);
 524                        if (ret == -ENOMEM &&
 525                            (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
 526                                gfp_flags = GFP_NOFS;
 527                                if (io->io_bio)
 528                                        ext4_io_submit(io);
 529                                else
 530                                        gfp_flags |= __GFP_NOFAIL;
 531                                congestion_wait(BLK_RW_ASYNC, HZ/50);
 532                                goto retry_encrypt;
 533                        }
 534
 535                        printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
 536                        redirty_page_for_writepage(wbc, page);
 537                        do {
 538                                clear_buffer_async_write(bh);
 539                                bh = bh->b_this_page;
 540                        } while (bh != head);
 541                        goto unlock;
 542                }
 543        }
 544
 545        /* Now submit buffers to write */
 546        do {
 547                if (!buffer_async_write(bh))
 548                        continue;
 549                io_submit_add_bh(io, inode,
 550                                 bounce_page ? bounce_page : page, bh);
 551                nr_submitted++;
 552                clear_buffer_dirty(bh);
 553        } while ((bh = bh->b_this_page) != head);
 554
 555unlock:
 556        unlock_page(page);
 557        /* Nothing submitted - we have to end page writeback */
 558        if (!nr_submitted)
 559                end_page_writeback(page);
 560        return ret;
 561}
 562