linux/fs/ext4/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ext4/file.c
   4 *
   5 * Copyright (C) 1992, 1993, 1994, 1995
   6 * Remy Card (card@masi.ibp.fr)
   7 * Laboratoire MASI - Institut Blaise Pascal
   8 * Universite Pierre et Marie Curie (Paris VI)
   9 *
  10 *  from
  11 *
  12 *  linux/fs/minix/file.c
  13 *
  14 *  Copyright (C) 1991, 1992  Linus Torvalds
  15 *
  16 *  ext4 fs regular file handling primitives
  17 *
  18 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  19 *      (jj@sunsite.ms.mff.cuni.cz)
  20 */
  21
  22#include <linux/time.h>
  23#include <linux/fs.h>
  24#include <linux/iomap.h>
  25#include <linux/mount.h>
  26#include <linux/path.h>
  27#include <linux/dax.h>
  28#include <linux/quotaops.h>
  29#include <linux/pagevec.h>
  30#include <linux/uio.h>
  31#include <linux/mman.h>
  32#include <linux/backing-dev.h>
  33#include "ext4.h"
  34#include "ext4_jbd2.h"
  35#include "xattr.h"
  36#include "acl.h"
  37#include "truncate.h"
  38
  39static bool ext4_dio_supported(struct inode *inode)
  40{
  41        if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
  42                return false;
  43        if (fsverity_active(inode))
  44                return false;
  45        if (ext4_should_journal_data(inode))
  46                return false;
  47        if (ext4_has_inline_data(inode))
  48                return false;
  49        return true;
  50}
  51
  52static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
  53{
  54        ssize_t ret;
  55        struct inode *inode = file_inode(iocb->ki_filp);
  56
  57        if (iocb->ki_flags & IOCB_NOWAIT) {
  58                if (!inode_trylock_shared(inode))
  59                        return -EAGAIN;
  60        } else {
  61                inode_lock_shared(inode);
  62        }
  63
  64        if (!ext4_dio_supported(inode)) {
  65                inode_unlock_shared(inode);
  66                /*
  67                 * Fallback to buffered I/O if the operation being performed on
  68                 * the inode is not supported by direct I/O. The IOCB_DIRECT
  69                 * flag needs to be cleared here in order to ensure that the
  70                 * direct I/O path within generic_file_read_iter() is not
  71                 * taken.
  72                 */
  73                iocb->ki_flags &= ~IOCB_DIRECT;
  74                return generic_file_read_iter(iocb, to);
  75        }
  76
  77        ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
  78                           is_sync_kiocb(iocb));
  79        inode_unlock_shared(inode);
  80
  81        file_accessed(iocb->ki_filp);
  82        return ret;
  83}
  84
  85#ifdef CONFIG_FS_DAX
  86static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  87{
  88        struct inode *inode = file_inode(iocb->ki_filp);
  89        ssize_t ret;
  90
  91        if (iocb->ki_flags & IOCB_NOWAIT) {
  92                if (!inode_trylock_shared(inode))
  93                        return -EAGAIN;
  94        } else {
  95                inode_lock_shared(inode);
  96        }
  97        /*
  98         * Recheck under inode lock - at this point we are sure it cannot
  99         * change anymore
 100         */
 101        if (!IS_DAX(inode)) {
 102                inode_unlock_shared(inode);
 103                /* Fallback to buffered IO in case we cannot support DAX */
 104                return generic_file_read_iter(iocb, to);
 105        }
 106        ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
 107        inode_unlock_shared(inode);
 108
 109        file_accessed(iocb->ki_filp);
 110        return ret;
 111}
 112#endif
 113
 114static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 115{
 116        struct inode *inode = file_inode(iocb->ki_filp);
 117
 118        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 119                return -EIO;
 120
 121        if (!iov_iter_count(to))
 122                return 0; /* skip atime */
 123
 124#ifdef CONFIG_FS_DAX
 125        if (IS_DAX(inode))
 126                return ext4_dax_read_iter(iocb, to);
 127#endif
 128        if (iocb->ki_flags & IOCB_DIRECT)
 129                return ext4_dio_read_iter(iocb, to);
 130
 131        return generic_file_read_iter(iocb, to);
 132}
 133
 134/*
 135 * Called when an inode is released. Note that this is different
 136 * from ext4_file_open: open gets called at every open, but release
 137 * gets called only when /all/ the files are closed.
 138 */
 139static int ext4_release_file(struct inode *inode, struct file *filp)
 140{
 141        if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
 142                ext4_alloc_da_blocks(inode);
 143                ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
 144        }
 145        /* if we are the last writer on the inode, drop the block reservation */
 146        if ((filp->f_mode & FMODE_WRITE) &&
 147                        (atomic_read(&inode->i_writecount) == 1) &&
 148                        !EXT4_I(inode)->i_reserved_data_blocks)
 149        {
 150                down_write(&EXT4_I(inode)->i_data_sem);
 151                ext4_discard_preallocations(inode);
 152                up_write(&EXT4_I(inode)->i_data_sem);
 153        }
 154        if (is_dx(inode) && filp->private_data)
 155                ext4_htree_free_dir_info(filp->private_data);
 156
 157        return 0;
 158}
 159
 160/*
 161 * This tests whether the IO in question is block-aligned or not.
 162 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 163 * are converted to written only after the IO is complete.  Until they are
 164 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 165 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 166 * threads are at work on the same unwritten block, they must be synchronized
 167 * or one thread will zero the other's data, causing corruption.
 168 */
 169static bool
 170ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
 171{
 172        struct super_block *sb = inode->i_sb;
 173        unsigned long blockmask = sb->s_blocksize - 1;
 174
 175        if ((pos | iov_iter_alignment(from)) & blockmask)
 176                return true;
 177
 178        return false;
 179}
 180
 181static bool
 182ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
 183{
 184        if (offset + len > i_size_read(inode) ||
 185            offset + len > EXT4_I(inode)->i_disksize)
 186                return true;
 187        return false;
 188}
 189
 190/* Is IO overwriting allocated and initialized blocks? */
 191static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
 192{
 193        struct ext4_map_blocks map;
 194        unsigned int blkbits = inode->i_blkbits;
 195        int err, blklen;
 196
 197        if (pos + len > i_size_read(inode))
 198                return false;
 199
 200        map.m_lblk = pos >> blkbits;
 201        map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
 202        blklen = map.m_len;
 203
 204        err = ext4_map_blocks(NULL, inode, &map, 0);
 205        /*
 206         * 'err==len' means that all of the blocks have been preallocated,
 207         * regardless of whether they have been initialized or not. To exclude
 208         * unwritten extents, we need to check m_flags.
 209         */
 210        return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
 211}
 212
 213static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
 214                                         struct iov_iter *from)
 215{
 216        struct inode *inode = file_inode(iocb->ki_filp);
 217        ssize_t ret;
 218
 219        if (unlikely(IS_IMMUTABLE(inode)))
 220                return -EPERM;
 221
 222        ret = generic_write_checks(iocb, from);
 223        if (ret <= 0)
 224                return ret;
 225
 226        /*
 227         * If we have encountered a bitmap-format file, the size limit
 228         * is smaller than s_maxbytes, which is for extent-mapped files.
 229         */
 230        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
 231                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 232
 233                if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
 234                        return -EFBIG;
 235                iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
 236        }
 237
 238        return iov_iter_count(from);
 239}
 240
 241static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
 242{
 243        ssize_t ret, count;
 244
 245        count = ext4_generic_write_checks(iocb, from);
 246        if (count <= 0)
 247                return count;
 248
 249        ret = file_modified(iocb->ki_filp);
 250        if (ret)
 251                return ret;
 252        return count;
 253}
 254
 255static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
 256                                        struct iov_iter *from)
 257{
 258        ssize_t ret;
 259        struct inode *inode = file_inode(iocb->ki_filp);
 260
 261        if (iocb->ki_flags & IOCB_NOWAIT)
 262                return -EOPNOTSUPP;
 263
 264        inode_lock(inode);
 265        ret = ext4_write_checks(iocb, from);
 266        if (ret <= 0)
 267                goto out;
 268
 269        current->backing_dev_info = inode_to_bdi(inode);
 270        ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
 271        current->backing_dev_info = NULL;
 272
 273out:
 274        inode_unlock(inode);
 275        if (likely(ret > 0)) {
 276                iocb->ki_pos += ret;
 277                ret = generic_write_sync(iocb, ret);
 278        }
 279
 280        return ret;
 281}
 282
 283static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
 284                                           ssize_t written, size_t count)
 285{
 286        handle_t *handle;
 287        bool truncate = false;
 288        u8 blkbits = inode->i_blkbits;
 289        ext4_lblk_t written_blk, end_blk;
 290        int ret;
 291
 292        /*
 293         * Note that EXT4_I(inode)->i_disksize can get extended up to
 294         * inode->i_size while the I/O was running due to writeback of delalloc
 295         * blocks. But, the code in ext4_iomap_alloc() is careful to use
 296         * zeroed/unwritten extents if this is possible; thus we won't leave
 297         * uninitialized blocks in a file even if we didn't succeed in writing
 298         * as much as we intended.
 299         */
 300        WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
 301        if (offset + count <= EXT4_I(inode)->i_disksize) {
 302                /*
 303                 * We need to ensure that the inode is removed from the orphan
 304                 * list if it has been added prematurely, due to writeback of
 305                 * delalloc blocks.
 306                 */
 307                if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
 308                        handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 309
 310                        if (IS_ERR(handle)) {
 311                                ext4_orphan_del(NULL, inode);
 312                                return PTR_ERR(handle);
 313                        }
 314
 315                        ext4_orphan_del(handle, inode);
 316                        ext4_journal_stop(handle);
 317                }
 318
 319                return written;
 320        }
 321
 322        if (written < 0)
 323                goto truncate;
 324
 325        handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 326        if (IS_ERR(handle)) {
 327                written = PTR_ERR(handle);
 328                goto truncate;
 329        }
 330
 331        if (ext4_update_inode_size(inode, offset + written)) {
 332                ret = ext4_mark_inode_dirty(handle, inode);
 333                if (unlikely(ret)) {
 334                        written = ret;
 335                        ext4_journal_stop(handle);
 336                        goto truncate;
 337                }
 338        }
 339
 340        /*
 341         * We may need to truncate allocated but not written blocks beyond EOF.
 342         */
 343        written_blk = ALIGN(offset + written, 1 << blkbits);
 344        end_blk = ALIGN(offset + count, 1 << blkbits);
 345        if (written_blk < end_blk && ext4_can_truncate(inode))
 346                truncate = true;
 347
 348        /*
 349         * Remove the inode from the orphan list if it has been extended and
 350         * everything went OK.
 351         */
 352        if (!truncate && inode->i_nlink)
 353                ext4_orphan_del(handle, inode);
 354        ext4_journal_stop(handle);
 355
 356        if (truncate) {
 357truncate:
 358                ext4_truncate_failed_write(inode);
 359                /*
 360                 * If the truncate operation failed early, then the inode may
 361                 * still be on the orphan list. In that case, we need to try
 362                 * remove the inode from the in-memory linked list.
 363                 */
 364                if (inode->i_nlink)
 365                        ext4_orphan_del(NULL, inode);
 366        }
 367
 368        return written;
 369}
 370
 371static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
 372                                 int error, unsigned int flags)
 373{
 374        loff_t offset = iocb->ki_pos;
 375        struct inode *inode = file_inode(iocb->ki_filp);
 376
 377        if (error)
 378                return error;
 379
 380        if (size && flags & IOMAP_DIO_UNWRITTEN)
 381                return ext4_convert_unwritten_extents(NULL, inode,
 382                                                      offset, size);
 383
 384        return 0;
 385}
 386
 387static const struct iomap_dio_ops ext4_dio_write_ops = {
 388        .end_io = ext4_dio_write_end_io,
 389};
 390
 391/*
 392 * The intention here is to start with shared lock acquired then see if any
 393 * condition requires an exclusive inode lock. If yes, then we restart the
 394 * whole operation by releasing the shared lock and acquiring exclusive lock.
 395 *
 396 * - For unaligned_io we never take shared lock as it may cause data corruption
 397 *   when two unaligned IO tries to modify the same block e.g. while zeroing.
 398 *
 399 * - For extending writes case we don't take the shared lock, since it requires
 400 *   updating inode i_disksize and/or orphan handling with exclusive lock.
 401 *
 402 * - shared locking will only be true mostly with overwrites. Otherwise we will
 403 *   switch to exclusive i_rwsem lock.
 404 */
 405static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
 406                                     bool *ilock_shared, bool *extend)
 407{
 408        struct file *file = iocb->ki_filp;
 409        struct inode *inode = file_inode(file);
 410        loff_t offset;
 411        size_t count;
 412        ssize_t ret;
 413
 414restart:
 415        ret = ext4_generic_write_checks(iocb, from);
 416        if (ret <= 0)
 417                goto out;
 418
 419        offset = iocb->ki_pos;
 420        count = ret;
 421        if (ext4_extending_io(inode, offset, count))
 422                *extend = true;
 423        /*
 424         * Determine whether the IO operation will overwrite allocated
 425         * and initialized blocks.
 426         * We need exclusive i_rwsem for changing security info
 427         * in file_modified().
 428         */
 429        if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
 430             !ext4_overwrite_io(inode, offset, count))) {
 431                inode_unlock_shared(inode);
 432                *ilock_shared = false;
 433                inode_lock(inode);
 434                goto restart;
 435        }
 436
 437        ret = file_modified(file);
 438        if (ret < 0)
 439                goto out;
 440
 441        return count;
 442out:
 443        if (*ilock_shared)
 444                inode_unlock_shared(inode);
 445        else
 446                inode_unlock(inode);
 447        return ret;
 448}
 449
 450static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
 451{
 452        ssize_t ret;
 453        handle_t *handle;
 454        struct inode *inode = file_inode(iocb->ki_filp);
 455        loff_t offset = iocb->ki_pos;
 456        size_t count = iov_iter_count(from);
 457        const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
 458        bool extend = false, unaligned_io = false;
 459        bool ilock_shared = true;
 460
 461        /*
 462         * We initially start with shared inode lock unless it is
 463         * unaligned IO which needs exclusive lock anyways.
 464         */
 465        if (ext4_unaligned_io(inode, from, offset)) {
 466                unaligned_io = true;
 467                ilock_shared = false;
 468        }
 469        /*
 470         * Quick check here without any i_rwsem lock to see if it is extending
 471         * IO. A more reliable check is done in ext4_dio_write_checks() with
 472         * proper locking in place.
 473         */
 474        if (offset + count > i_size_read(inode))
 475                ilock_shared = false;
 476
 477        if (iocb->ki_flags & IOCB_NOWAIT) {
 478                if (ilock_shared) {
 479                        if (!inode_trylock_shared(inode))
 480                                return -EAGAIN;
 481                } else {
 482                        if (!inode_trylock(inode))
 483                                return -EAGAIN;
 484                }
 485        } else {
 486                if (ilock_shared)
 487                        inode_lock_shared(inode);
 488                else
 489                        inode_lock(inode);
 490        }
 491
 492        /* Fallback to buffered I/O if the inode does not support direct I/O. */
 493        if (!ext4_dio_supported(inode)) {
 494                if (ilock_shared)
 495                        inode_unlock_shared(inode);
 496                else
 497                        inode_unlock(inode);
 498                return ext4_buffered_write_iter(iocb, from);
 499        }
 500
 501        ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
 502        if (ret <= 0)
 503                return ret;
 504
 505        /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
 506        if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
 507                ret = -EAGAIN;
 508                goto out;
 509        }
 510
 511        offset = iocb->ki_pos;
 512        count = ret;
 513
 514        /*
 515         * Unaligned direct IO must be serialized among each other as zeroing
 516         * of partial blocks of two competing unaligned IOs can result in data
 517         * corruption.
 518         *
 519         * So we make sure we don't allow any unaligned IO in flight.
 520         * For IOs where we need not wait (like unaligned non-AIO DIO),
 521         * below inode_dio_wait() may anyway become a no-op, since we start
 522         * with exclusive lock.
 523         */
 524        if (unaligned_io)
 525                inode_dio_wait(inode);
 526
 527        if (extend) {
 528                handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 529                if (IS_ERR(handle)) {
 530                        ret = PTR_ERR(handle);
 531                        goto out;
 532                }
 533
 534                ret = ext4_orphan_add(handle, inode);
 535                if (ret) {
 536                        ext4_journal_stop(handle);
 537                        goto out;
 538                }
 539
 540                ext4_journal_stop(handle);
 541        }
 542
 543        if (ilock_shared)
 544                iomap_ops = &ext4_iomap_overwrite_ops;
 545        ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
 546                           is_sync_kiocb(iocb) || unaligned_io || extend);
 547
 548        if (extend)
 549                ret = ext4_handle_inode_extension(inode, offset, ret, count);
 550
 551out:
 552        if (ilock_shared)
 553                inode_unlock_shared(inode);
 554        else
 555                inode_unlock(inode);
 556
 557        if (ret >= 0 && iov_iter_count(from)) {
 558                ssize_t err;
 559                loff_t endbyte;
 560
 561                offset = iocb->ki_pos;
 562                err = ext4_buffered_write_iter(iocb, from);
 563                if (err < 0)
 564                        return err;
 565
 566                /*
 567                 * We need to ensure that the pages within the page cache for
 568                 * the range covered by this I/O are written to disk and
 569                 * invalidated. This is in attempt to preserve the expected
 570                 * direct I/O semantics in the case we fallback to buffered I/O
 571                 * to complete off the I/O request.
 572                 */
 573                ret += err;
 574                endbyte = offset + err - 1;
 575                err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
 576                                                   offset, endbyte);
 577                if (!err)
 578                        invalidate_mapping_pages(iocb->ki_filp->f_mapping,
 579                                                 offset >> PAGE_SHIFT,
 580                                                 endbyte >> PAGE_SHIFT);
 581        }
 582
 583        return ret;
 584}
 585
 586#ifdef CONFIG_FS_DAX
 587static ssize_t
 588ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 589{
 590        ssize_t ret;
 591        size_t count;
 592        loff_t offset;
 593        handle_t *handle;
 594        bool extend = false;
 595        struct inode *inode = file_inode(iocb->ki_filp);
 596
 597        if (iocb->ki_flags & IOCB_NOWAIT) {
 598                if (!inode_trylock(inode))
 599                        return -EAGAIN;
 600        } else {
 601                inode_lock(inode);
 602        }
 603
 604        ret = ext4_write_checks(iocb, from);
 605        if (ret <= 0)
 606                goto out;
 607
 608        offset = iocb->ki_pos;
 609        count = iov_iter_count(from);
 610
 611        if (offset + count > EXT4_I(inode)->i_disksize) {
 612                handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 613                if (IS_ERR(handle)) {
 614                        ret = PTR_ERR(handle);
 615                        goto out;
 616                }
 617
 618                ret = ext4_orphan_add(handle, inode);
 619                if (ret) {
 620                        ext4_journal_stop(handle);
 621                        goto out;
 622                }
 623
 624                extend = true;
 625                ext4_journal_stop(handle);
 626        }
 627
 628        ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
 629
 630        if (extend)
 631                ret = ext4_handle_inode_extension(inode, offset, ret, count);
 632out:
 633        inode_unlock(inode);
 634        if (ret > 0)
 635                ret = generic_write_sync(iocb, ret);
 636        return ret;
 637}
 638#endif
 639
 640static ssize_t
 641ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 642{
 643        struct inode *inode = file_inode(iocb->ki_filp);
 644
 645        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 646                return -EIO;
 647
 648#ifdef CONFIG_FS_DAX
 649        if (IS_DAX(inode))
 650                return ext4_dax_write_iter(iocb, from);
 651#endif
 652        if (iocb->ki_flags & IOCB_DIRECT)
 653                return ext4_dio_write_iter(iocb, from);
 654
 655        return ext4_buffered_write_iter(iocb, from);
 656}
 657
 658#ifdef CONFIG_FS_DAX
 659static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
 660                enum page_entry_size pe_size)
 661{
 662        int error = 0;
 663        vm_fault_t result;
 664        int retries = 0;
 665        handle_t *handle = NULL;
 666        struct inode *inode = file_inode(vmf->vma->vm_file);
 667        struct super_block *sb = inode->i_sb;
 668
 669        /*
 670         * We have to distinguish real writes from writes which will result in a
 671         * COW page; COW writes should *not* poke the journal (the file will not
 672         * be changed). Doing so would cause unintended failures when mounted
 673         * read-only.
 674         *
 675         * We check for VM_SHARED rather than vmf->cow_page since the latter is
 676         * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
 677         * other sizes, dax_iomap_fault will handle splitting / fallback so that
 678         * we eventually come back with a COW page.
 679         */
 680        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
 681                (vmf->vma->vm_flags & VM_SHARED);
 682        pfn_t pfn;
 683
 684        if (write) {
 685                sb_start_pagefault(sb);
 686                file_update_time(vmf->vma->vm_file);
 687                down_read(&EXT4_I(inode)->i_mmap_sem);
 688retry:
 689                handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
 690                                               EXT4_DATA_TRANS_BLOCKS(sb));
 691                if (IS_ERR(handle)) {
 692                        up_read(&EXT4_I(inode)->i_mmap_sem);
 693                        sb_end_pagefault(sb);
 694                        return VM_FAULT_SIGBUS;
 695                }
 696        } else {
 697                down_read(&EXT4_I(inode)->i_mmap_sem);
 698        }
 699        result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
 700        if (write) {
 701                ext4_journal_stop(handle);
 702
 703                if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
 704                    ext4_should_retry_alloc(sb, &retries))
 705                        goto retry;
 706                /* Handling synchronous page fault? */
 707                if (result & VM_FAULT_NEEDDSYNC)
 708                        result = dax_finish_sync_fault(vmf, pe_size, pfn);
 709                up_read(&EXT4_I(inode)->i_mmap_sem);
 710                sb_end_pagefault(sb);
 711        } else {
 712                up_read(&EXT4_I(inode)->i_mmap_sem);
 713        }
 714
 715        return result;
 716}
 717
 718static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
 719{
 720        return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
 721}
 722
 723static const struct vm_operations_struct ext4_dax_vm_ops = {
 724        .fault          = ext4_dax_fault,
 725        .huge_fault     = ext4_dax_huge_fault,
 726        .page_mkwrite   = ext4_dax_fault,
 727        .pfn_mkwrite    = ext4_dax_fault,
 728};
 729#else
 730#define ext4_dax_vm_ops ext4_file_vm_ops
 731#endif
 732
 733static const struct vm_operations_struct ext4_file_vm_ops = {
 734        .fault          = ext4_filemap_fault,
 735        .map_pages      = filemap_map_pages,
 736        .page_mkwrite   = ext4_page_mkwrite,
 737};
 738
 739static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
 740{
 741        struct inode *inode = file->f_mapping->host;
 742        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 743        struct dax_device *dax_dev = sbi->s_daxdev;
 744
 745        if (unlikely(ext4_forced_shutdown(sbi)))
 746                return -EIO;
 747
 748        /*
 749         * We don't support synchronous mappings for non-DAX files and
 750         * for DAX files if underneath dax_device is not synchronous.
 751         */
 752        if (!daxdev_mapping_supported(vma, dax_dev))
 753                return -EOPNOTSUPP;
 754
 755        file_accessed(file);
 756        if (IS_DAX(file_inode(file))) {
 757                vma->vm_ops = &ext4_dax_vm_ops;
 758                vma->vm_flags |= VM_HUGEPAGE;
 759        } else {
 760                vma->vm_ops = &ext4_file_vm_ops;
 761        }
 762        return 0;
 763}
 764
 765static int ext4_sample_last_mounted(struct super_block *sb,
 766                                    struct vfsmount *mnt)
 767{
 768        struct ext4_sb_info *sbi = EXT4_SB(sb);
 769        struct path path;
 770        char buf[64], *cp;
 771        handle_t *handle;
 772        int err;
 773
 774        if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
 775                return 0;
 776
 777        if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
 778                return 0;
 779
 780        sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
 781        /*
 782         * Sample where the filesystem has been mounted and
 783         * store it in the superblock for sysadmin convenience
 784         * when trying to sort through large numbers of block
 785         * devices or filesystem images.
 786         */
 787        memset(buf, 0, sizeof(buf));
 788        path.mnt = mnt;
 789        path.dentry = mnt->mnt_root;
 790        cp = d_path(&path, buf, sizeof(buf));
 791        err = 0;
 792        if (IS_ERR(cp))
 793                goto out;
 794
 795        handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
 796        err = PTR_ERR(handle);
 797        if (IS_ERR(handle))
 798                goto out;
 799        BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 800        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 801        if (err)
 802                goto out_journal;
 803        strlcpy(sbi->s_es->s_last_mounted, cp,
 804                sizeof(sbi->s_es->s_last_mounted));
 805        ext4_handle_dirty_super(handle, sb);
 806out_journal:
 807        ext4_journal_stop(handle);
 808out:
 809        sb_end_intwrite(sb);
 810        return err;
 811}
 812
 813static int ext4_file_open(struct inode * inode, struct file * filp)
 814{
 815        int ret;
 816
 817        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 818                return -EIO;
 819
 820        ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
 821        if (ret)
 822                return ret;
 823
 824        ret = fscrypt_file_open(inode, filp);
 825        if (ret)
 826                return ret;
 827
 828        ret = fsverity_file_open(inode, filp);
 829        if (ret)
 830                return ret;
 831
 832        /*
 833         * Set up the jbd2_inode if we are opening the inode for
 834         * writing and the journal is present
 835         */
 836        if (filp->f_mode & FMODE_WRITE) {
 837                ret = ext4_inode_attach_jinode(inode);
 838                if (ret < 0)
 839                        return ret;
 840        }
 841
 842        filp->f_mode |= FMODE_NOWAIT;
 843        return dquot_file_open(inode, filp);
 844}
 845
 846/*
 847 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 848 * by calling generic_file_llseek_size() with the appropriate maxbytes
 849 * value for each.
 850 */
 851loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
 852{
 853        struct inode *inode = file->f_mapping->host;
 854        loff_t maxbytes;
 855
 856        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 857                maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
 858        else
 859                maxbytes = inode->i_sb->s_maxbytes;
 860
 861        switch (whence) {
 862        default:
 863                return generic_file_llseek_size(file, offset, whence,
 864                                                maxbytes, i_size_read(inode));
 865        case SEEK_HOLE:
 866                inode_lock_shared(inode);
 867                offset = iomap_seek_hole(inode, offset,
 868                                         &ext4_iomap_report_ops);
 869                inode_unlock_shared(inode);
 870                break;
 871        case SEEK_DATA:
 872                inode_lock_shared(inode);
 873                offset = iomap_seek_data(inode, offset,
 874                                         &ext4_iomap_report_ops);
 875                inode_unlock_shared(inode);
 876                break;
 877        }
 878
 879        if (offset < 0)
 880                return offset;
 881        return vfs_setpos(file, offset, maxbytes);
 882}
 883
 884const struct file_operations ext4_file_operations = {
 885        .llseek         = ext4_llseek,
 886        .read_iter      = ext4_file_read_iter,
 887        .write_iter     = ext4_file_write_iter,
 888        .iopoll         = iomap_dio_iopoll,
 889        .unlocked_ioctl = ext4_ioctl,
 890#ifdef CONFIG_COMPAT
 891        .compat_ioctl   = ext4_compat_ioctl,
 892#endif
 893        .mmap           = ext4_file_mmap,
 894        .mmap_supported_flags = MAP_SYNC,
 895        .open           = ext4_file_open,
 896        .release        = ext4_release_file,
 897        .fsync          = ext4_sync_file,
 898        .get_unmapped_area = thp_get_unmapped_area,
 899        .splice_read    = generic_file_splice_read,
 900        .splice_write   = iter_file_splice_write,
 901        .fallocate      = ext4_fallocate,
 902};
 903
 904const struct inode_operations ext4_file_inode_operations = {
 905        .setattr        = ext4_setattr,
 906        .getattr        = ext4_file_getattr,
 907        .listxattr      = ext4_listxattr,
 908        .get_acl        = ext4_get_acl,
 909        .set_acl        = ext4_set_acl,
 910        .fiemap         = ext4_fiemap,
 911};
 912
 913