linux/fs/ext4/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ext4/file.c
   4 *
   5 * Copyright (C) 1992, 1993, 1994, 1995
   6 * Remy Card (card@masi.ibp.fr)
   7 * Laboratoire MASI - Institut Blaise Pascal
   8 * Universite Pierre et Marie Curie (Paris VI)
   9 *
  10 *  from
  11 *
  12 *  linux/fs/minix/file.c
  13 *
  14 *  Copyright (C) 1991, 1992  Linus Torvalds
  15 *
  16 *  ext4 fs regular file handling primitives
  17 *
  18 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  19 *      (jj@sunsite.ms.mff.cuni.cz)
  20 */
  21
  22#include <linux/time.h>
  23#include <linux/fs.h>
  24#include <linux/iomap.h>
  25#include <linux/mount.h>
  26#include <linux/path.h>
  27#include <linux/dax.h>
  28#include <linux/quotaops.h>
  29#include <linux/pagevec.h>
  30#include <linux/uio.h>
  31#include <linux/mman.h>
  32#include <linux/backing-dev.h>
  33#include "ext4.h"
  34#include "ext4_jbd2.h"
  35#include "xattr.h"
  36#include "acl.h"
  37#include "truncate.h"
  38
  39static bool ext4_dio_supported(struct inode *inode)
  40{
  41        if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
  42                return false;
  43        if (fsverity_active(inode))
  44                return false;
  45        if (ext4_should_journal_data(inode))
  46                return false;
  47        if (ext4_has_inline_data(inode))
  48                return false;
  49        return true;
  50}
  51
  52static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
  53{
  54        ssize_t ret;
  55        struct inode *inode = file_inode(iocb->ki_filp);
  56
  57        if (iocb->ki_flags & IOCB_NOWAIT) {
  58                if (!inode_trylock_shared(inode))
  59                        return -EAGAIN;
  60        } else {
  61                inode_lock_shared(inode);
  62        }
  63
  64        if (!ext4_dio_supported(inode)) {
  65                inode_unlock_shared(inode);
  66                /*
  67                 * Fallback to buffered I/O if the operation being performed on
  68                 * the inode is not supported by direct I/O. The IOCB_DIRECT
  69                 * flag needs to be cleared here in order to ensure that the
  70                 * direct I/O path within generic_file_read_iter() is not
  71                 * taken.
  72                 */
  73                iocb->ki_flags &= ~IOCB_DIRECT;
  74                return generic_file_read_iter(iocb, to);
  75        }
  76
  77        ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
  78                           is_sync_kiocb(iocb));
  79        inode_unlock_shared(inode);
  80
  81        file_accessed(iocb->ki_filp);
  82        return ret;
  83}
  84
  85#ifdef CONFIG_FS_DAX
  86static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  87{
  88        struct inode *inode = file_inode(iocb->ki_filp);
  89        ssize_t ret;
  90
  91        if (iocb->ki_flags & IOCB_NOWAIT) {
  92                if (!inode_trylock_shared(inode))
  93                        return -EAGAIN;
  94        } else {
  95                inode_lock_shared(inode);
  96        }
  97        /*
  98         * Recheck under inode lock - at this point we are sure it cannot
  99         * change anymore
 100         */
 101        if (!IS_DAX(inode)) {
 102                inode_unlock_shared(inode);
 103                /* Fallback to buffered IO in case we cannot support DAX */
 104                return generic_file_read_iter(iocb, to);
 105        }
 106        ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
 107        inode_unlock_shared(inode);
 108
 109        file_accessed(iocb->ki_filp);
 110        return ret;
 111}
 112#endif
 113
 114static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 115{
 116        struct inode *inode = file_inode(iocb->ki_filp);
 117
 118        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 119                return -EIO;
 120
 121        if (!iov_iter_count(to))
 122                return 0; /* skip atime */
 123
 124#ifdef CONFIG_FS_DAX
 125        if (IS_DAX(inode))
 126                return ext4_dax_read_iter(iocb, to);
 127#endif
 128        if (iocb->ki_flags & IOCB_DIRECT)
 129                return ext4_dio_read_iter(iocb, to);
 130
 131        return generic_file_read_iter(iocb, to);
 132}
 133
 134/*
 135 * Called when an inode is released. Note that this is different
 136 * from ext4_file_open: open gets called at every open, but release
 137 * gets called only when /all/ the files are closed.
 138 */
 139static int ext4_release_file(struct inode *inode, struct file *filp)
 140{
 141        if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
 142                ext4_alloc_da_blocks(inode);
 143                ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
 144        }
 145        /* if we are the last writer on the inode, drop the block reservation */
 146        if ((filp->f_mode & FMODE_WRITE) &&
 147                        (atomic_read(&inode->i_writecount) == 1) &&
 148                        !EXT4_I(inode)->i_reserved_data_blocks)
 149        {
 150                down_write(&EXT4_I(inode)->i_data_sem);
 151                ext4_discard_preallocations(inode);
 152                up_write(&EXT4_I(inode)->i_data_sem);
 153        }
 154        if (is_dx(inode) && filp->private_data)
 155                ext4_htree_free_dir_info(filp->private_data);
 156
 157        return 0;
 158}
 159
 160/*
 161 * This tests whether the IO in question is block-aligned or not.
 162 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 163 * are converted to written only after the IO is complete.  Until they are
 164 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 165 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 166 * threads are at work on the same unwritten block, they must be synchronized
 167 * or one thread will zero the other's data, causing corruption.
 168 */
 169static bool
 170ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
 171{
 172        struct super_block *sb = inode->i_sb;
 173        unsigned long blockmask = sb->s_blocksize - 1;
 174
 175        if ((pos | iov_iter_alignment(from)) & blockmask)
 176                return true;
 177
 178        return false;
 179}
 180
 181static bool
 182ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
 183{
 184        if (offset + len > i_size_read(inode) ||
 185            offset + len > EXT4_I(inode)->i_disksize)
 186                return true;
 187        return false;
 188}
 189
 190/* Is IO overwriting allocated and initialized blocks? */
 191static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
 192{
 193        struct ext4_map_blocks map;
 194        unsigned int blkbits = inode->i_blkbits;
 195        int err, blklen;
 196
 197        if (pos + len > i_size_read(inode))
 198                return false;
 199
 200        map.m_lblk = pos >> blkbits;
 201        map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
 202        blklen = map.m_len;
 203
 204        err = ext4_map_blocks(NULL, inode, &map, 0);
 205        /*
 206         * 'err==len' means that all of the blocks have been preallocated,
 207         * regardless of whether they have been initialized or not. To exclude
 208         * unwritten extents, we need to check m_flags.
 209         */
 210        return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
 211}
 212
 213static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
 214                                         struct iov_iter *from)
 215{
 216        struct inode *inode = file_inode(iocb->ki_filp);
 217        ssize_t ret;
 218
 219        if (unlikely(IS_IMMUTABLE(inode)))
 220                return -EPERM;
 221
 222        ret = generic_write_checks(iocb, from);
 223        if (ret <= 0)
 224                return ret;
 225
 226        /*
 227         * If we have encountered a bitmap-format file, the size limit
 228         * is smaller than s_maxbytes, which is for extent-mapped files.
 229         */
 230        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
 231                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 232
 233                if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
 234                        return -EFBIG;
 235                iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
 236        }
 237
 238        return iov_iter_count(from);
 239}
 240
 241static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
 242{
 243        ssize_t ret, count;
 244
 245        count = ext4_generic_write_checks(iocb, from);
 246        if (count <= 0)
 247                return count;
 248
 249        ret = file_modified(iocb->ki_filp);
 250        if (ret)
 251                return ret;
 252        return count;
 253}
 254
 255static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
 256                                        struct iov_iter *from)
 257{
 258        ssize_t ret;
 259        struct inode *inode = file_inode(iocb->ki_filp);
 260
 261        if (iocb->ki_flags & IOCB_NOWAIT)
 262                return -EOPNOTSUPP;
 263
 264        inode_lock(inode);
 265        ret = ext4_write_checks(iocb, from);
 266        if (ret <= 0)
 267                goto out;
 268
 269        current->backing_dev_info = inode_to_bdi(inode);
 270        ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
 271        current->backing_dev_info = NULL;
 272
 273out:
 274        inode_unlock(inode);
 275        if (likely(ret > 0)) {
 276                iocb->ki_pos += ret;
 277                ret = generic_write_sync(iocb, ret);
 278        }
 279
 280        return ret;
 281}
 282
 283static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
 284                                           ssize_t written, size_t count)
 285{
 286        handle_t *handle;
 287        bool truncate = false;
 288        u8 blkbits = inode->i_blkbits;
 289        ext4_lblk_t written_blk, end_blk;
 290
 291        /*
 292         * Note that EXT4_I(inode)->i_disksize can get extended up to
 293         * inode->i_size while the I/O was running due to writeback of delalloc
 294         * blocks. But, the code in ext4_iomap_alloc() is careful to use
 295         * zeroed/unwritten extents if this is possible; thus we won't leave
 296         * uninitialized blocks in a file even if we didn't succeed in writing
 297         * as much as we intended.
 298         */
 299        WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
 300        if (offset + count <= EXT4_I(inode)->i_disksize) {
 301                /*
 302                 * We need to ensure that the inode is removed from the orphan
 303                 * list if it has been added prematurely, due to writeback of
 304                 * delalloc blocks.
 305                 */
 306                if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
 307                        handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 308
 309                        if (IS_ERR(handle)) {
 310                                ext4_orphan_del(NULL, inode);
 311                                return PTR_ERR(handle);
 312                        }
 313
 314                        ext4_orphan_del(handle, inode);
 315                        ext4_journal_stop(handle);
 316                }
 317
 318                return written;
 319        }
 320
 321        if (written < 0)
 322                goto truncate;
 323
 324        handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 325        if (IS_ERR(handle)) {
 326                written = PTR_ERR(handle);
 327                goto truncate;
 328        }
 329
 330        if (ext4_update_inode_size(inode, offset + written))
 331                ext4_mark_inode_dirty(handle, inode);
 332
 333        /*
 334         * We may need to truncate allocated but not written blocks beyond EOF.
 335         */
 336        written_blk = ALIGN(offset + written, 1 << blkbits);
 337        end_blk = ALIGN(offset + count, 1 << blkbits);
 338        if (written_blk < end_blk && ext4_can_truncate(inode))
 339                truncate = true;
 340
 341        /*
 342         * Remove the inode from the orphan list if it has been extended and
 343         * everything went OK.
 344         */
 345        if (!truncate && inode->i_nlink)
 346                ext4_orphan_del(handle, inode);
 347        ext4_journal_stop(handle);
 348
 349        if (truncate) {
 350truncate:
 351                ext4_truncate_failed_write(inode);
 352                /*
 353                 * If the truncate operation failed early, then the inode may
 354                 * still be on the orphan list. In that case, we need to try
 355                 * remove the inode from the in-memory linked list.
 356                 */
 357                if (inode->i_nlink)
 358                        ext4_orphan_del(NULL, inode);
 359        }
 360
 361        return written;
 362}
 363
 364static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
 365                                 int error, unsigned int flags)
 366{
 367        loff_t offset = iocb->ki_pos;
 368        struct inode *inode = file_inode(iocb->ki_filp);
 369
 370        if (error)
 371                return error;
 372
 373        if (size && flags & IOMAP_DIO_UNWRITTEN)
 374                return ext4_convert_unwritten_extents(NULL, inode,
 375                                                      offset, size);
 376
 377        return 0;
 378}
 379
 380static const struct iomap_dio_ops ext4_dio_write_ops = {
 381        .end_io = ext4_dio_write_end_io,
 382};
 383
 384/*
 385 * The intention here is to start with shared lock acquired then see if any
 386 * condition requires an exclusive inode lock. If yes, then we restart the
 387 * whole operation by releasing the shared lock and acquiring exclusive lock.
 388 *
 389 * - For unaligned_io we never take shared lock as it may cause data corruption
 390 *   when two unaligned IO tries to modify the same block e.g. while zeroing.
 391 *
 392 * - For extending writes case we don't take the shared lock, since it requires
 393 *   updating inode i_disksize and/or orphan handling with exclusive lock.
 394 *
 395 * - shared locking will only be true mostly with overwrites. Otherwise we will
 396 *   switch to exclusive i_rwsem lock.
 397 */
 398static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
 399                                     bool *ilock_shared, bool *extend)
 400{
 401        struct file *file = iocb->ki_filp;
 402        struct inode *inode = file_inode(file);
 403        loff_t offset;
 404        size_t count;
 405        ssize_t ret;
 406
 407restart:
 408        ret = ext4_generic_write_checks(iocb, from);
 409        if (ret <= 0)
 410                goto out;
 411
 412        offset = iocb->ki_pos;
 413        count = ret;
 414        if (ext4_extending_io(inode, offset, count))
 415                *extend = true;
 416        /*
 417         * Determine whether the IO operation will overwrite allocated
 418         * and initialized blocks.
 419         * We need exclusive i_rwsem for changing security info
 420         * in file_modified().
 421         */
 422        if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
 423             !ext4_overwrite_io(inode, offset, count))) {
 424                inode_unlock_shared(inode);
 425                *ilock_shared = false;
 426                inode_lock(inode);
 427                goto restart;
 428        }
 429
 430        ret = file_modified(file);
 431        if (ret < 0)
 432                goto out;
 433
 434        return count;
 435out:
 436        if (*ilock_shared)
 437                inode_unlock_shared(inode);
 438        else
 439                inode_unlock(inode);
 440        return ret;
 441}
 442
 443static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
 444{
 445        ssize_t ret;
 446        handle_t *handle;
 447        struct inode *inode = file_inode(iocb->ki_filp);
 448        loff_t offset = iocb->ki_pos;
 449        size_t count = iov_iter_count(from);
 450        const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
 451        bool extend = false, unaligned_io = false;
 452        bool ilock_shared = true;
 453
 454        /*
 455         * We initially start with shared inode lock unless it is
 456         * unaligned IO which needs exclusive lock anyways.
 457         */
 458        if (ext4_unaligned_io(inode, from, offset)) {
 459                unaligned_io = true;
 460                ilock_shared = false;
 461        }
 462        /*
 463         * Quick check here without any i_rwsem lock to see if it is extending
 464         * IO. A more reliable check is done in ext4_dio_write_checks() with
 465         * proper locking in place.
 466         */
 467        if (offset + count > i_size_read(inode))
 468                ilock_shared = false;
 469
 470        if (iocb->ki_flags & IOCB_NOWAIT) {
 471                if (ilock_shared) {
 472                        if (!inode_trylock_shared(inode))
 473                                return -EAGAIN;
 474                } else {
 475                        if (!inode_trylock(inode))
 476                                return -EAGAIN;
 477                }
 478        } else {
 479                if (ilock_shared)
 480                        inode_lock_shared(inode);
 481                else
 482                        inode_lock(inode);
 483        }
 484
 485        /* Fallback to buffered I/O if the inode does not support direct I/O. */
 486        if (!ext4_dio_supported(inode)) {
 487                if (ilock_shared)
 488                        inode_unlock_shared(inode);
 489                else
 490                        inode_unlock(inode);
 491                return ext4_buffered_write_iter(iocb, from);
 492        }
 493
 494        ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
 495        if (ret <= 0)
 496                return ret;
 497
 498        offset = iocb->ki_pos;
 499        count = ret;
 500
 501        /*
 502         * Unaligned direct IO must be serialized among each other as zeroing
 503         * of partial blocks of two competing unaligned IOs can result in data
 504         * corruption.
 505         *
 506         * So we make sure we don't allow any unaligned IO in flight.
 507         * For IOs where we need not wait (like unaligned non-AIO DIO),
 508         * below inode_dio_wait() may anyway become a no-op, since we start
 509         * with exclusive lock.
 510         */
 511        if (unaligned_io)
 512                inode_dio_wait(inode);
 513
 514        if (extend) {
 515                handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 516                if (IS_ERR(handle)) {
 517                        ret = PTR_ERR(handle);
 518                        goto out;
 519                }
 520
 521                ret = ext4_orphan_add(handle, inode);
 522                if (ret) {
 523                        ext4_journal_stop(handle);
 524                        goto out;
 525                }
 526
 527                ext4_journal_stop(handle);
 528        }
 529
 530        if (ilock_shared)
 531                iomap_ops = &ext4_iomap_overwrite_ops;
 532        ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
 533                           is_sync_kiocb(iocb) || unaligned_io || extend);
 534
 535        if (extend)
 536                ret = ext4_handle_inode_extension(inode, offset, ret, count);
 537
 538out:
 539        if (ilock_shared)
 540                inode_unlock_shared(inode);
 541        else
 542                inode_unlock(inode);
 543
 544        if (ret >= 0 && iov_iter_count(from)) {
 545                ssize_t err;
 546                loff_t endbyte;
 547
 548                offset = iocb->ki_pos;
 549                err = ext4_buffered_write_iter(iocb, from);
 550                if (err < 0)
 551                        return err;
 552
 553                /*
 554                 * We need to ensure that the pages within the page cache for
 555                 * the range covered by this I/O are written to disk and
 556                 * invalidated. This is in attempt to preserve the expected
 557                 * direct I/O semantics in the case we fallback to buffered I/O
 558                 * to complete off the I/O request.
 559                 */
 560                ret += err;
 561                endbyte = offset + err - 1;
 562                err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
 563                                                   offset, endbyte);
 564                if (!err)
 565                        invalidate_mapping_pages(iocb->ki_filp->f_mapping,
 566                                                 offset >> PAGE_SHIFT,
 567                                                 endbyte >> PAGE_SHIFT);
 568        }
 569
 570        return ret;
 571}
 572
 573#ifdef CONFIG_FS_DAX
 574static ssize_t
 575ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 576{
 577        ssize_t ret;
 578        size_t count;
 579        loff_t offset;
 580        handle_t *handle;
 581        bool extend = false;
 582        struct inode *inode = file_inode(iocb->ki_filp);
 583
 584        if (iocb->ki_flags & IOCB_NOWAIT) {
 585                if (!inode_trylock(inode))
 586                        return -EAGAIN;
 587        } else {
 588                inode_lock(inode);
 589        }
 590
 591        ret = ext4_write_checks(iocb, from);
 592        if (ret <= 0)
 593                goto out;
 594
 595        offset = iocb->ki_pos;
 596        count = iov_iter_count(from);
 597
 598        if (offset + count > EXT4_I(inode)->i_disksize) {
 599                handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 600                if (IS_ERR(handle)) {
 601                        ret = PTR_ERR(handle);
 602                        goto out;
 603                }
 604
 605                ret = ext4_orphan_add(handle, inode);
 606                if (ret) {
 607                        ext4_journal_stop(handle);
 608                        goto out;
 609                }
 610
 611                extend = true;
 612                ext4_journal_stop(handle);
 613        }
 614
 615        ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
 616
 617        if (extend)
 618                ret = ext4_handle_inode_extension(inode, offset, ret, count);
 619out:
 620        inode_unlock(inode);
 621        if (ret > 0)
 622                ret = generic_write_sync(iocb, ret);
 623        return ret;
 624}
 625#endif
 626
 627static ssize_t
 628ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 629{
 630        struct inode *inode = file_inode(iocb->ki_filp);
 631
 632        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 633                return -EIO;
 634
 635#ifdef CONFIG_FS_DAX
 636        if (IS_DAX(inode))
 637                return ext4_dax_write_iter(iocb, from);
 638#endif
 639        if (iocb->ki_flags & IOCB_DIRECT)
 640                return ext4_dio_write_iter(iocb, from);
 641
 642        return ext4_buffered_write_iter(iocb, from);
 643}
 644
 645#ifdef CONFIG_FS_DAX
 646static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
 647                enum page_entry_size pe_size)
 648{
 649        int error = 0;
 650        vm_fault_t result;
 651        int retries = 0;
 652        handle_t *handle = NULL;
 653        struct inode *inode = file_inode(vmf->vma->vm_file);
 654        struct super_block *sb = inode->i_sb;
 655
 656        /*
 657         * We have to distinguish real writes from writes which will result in a
 658         * COW page; COW writes should *not* poke the journal (the file will not
 659         * be changed). Doing so would cause unintended failures when mounted
 660         * read-only.
 661         *
 662         * We check for VM_SHARED rather than vmf->cow_page since the latter is
 663         * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
 664         * other sizes, dax_iomap_fault will handle splitting / fallback so that
 665         * we eventually come back with a COW page.
 666         */
 667        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
 668                (vmf->vma->vm_flags & VM_SHARED);
 669        pfn_t pfn;
 670
 671        if (write) {
 672                sb_start_pagefault(sb);
 673                file_update_time(vmf->vma->vm_file);
 674                down_read(&EXT4_I(inode)->i_mmap_sem);
 675retry:
 676                handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
 677                                               EXT4_DATA_TRANS_BLOCKS(sb));
 678                if (IS_ERR(handle)) {
 679                        up_read(&EXT4_I(inode)->i_mmap_sem);
 680                        sb_end_pagefault(sb);
 681                        return VM_FAULT_SIGBUS;
 682                }
 683        } else {
 684                down_read(&EXT4_I(inode)->i_mmap_sem);
 685        }
 686        result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
 687        if (write) {
 688                ext4_journal_stop(handle);
 689
 690                if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
 691                    ext4_should_retry_alloc(sb, &retries))
 692                        goto retry;
 693                /* Handling synchronous page fault? */
 694                if (result & VM_FAULT_NEEDDSYNC)
 695                        result = dax_finish_sync_fault(vmf, pe_size, pfn);
 696                up_read(&EXT4_I(inode)->i_mmap_sem);
 697                sb_end_pagefault(sb);
 698        } else {
 699                up_read(&EXT4_I(inode)->i_mmap_sem);
 700        }
 701
 702        return result;
 703}
 704
 705static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
 706{
 707        return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
 708}
 709
 710static const struct vm_operations_struct ext4_dax_vm_ops = {
 711        .fault          = ext4_dax_fault,
 712        .huge_fault     = ext4_dax_huge_fault,
 713        .page_mkwrite   = ext4_dax_fault,
 714        .pfn_mkwrite    = ext4_dax_fault,
 715};
 716#else
 717#define ext4_dax_vm_ops ext4_file_vm_ops
 718#endif
 719
 720static const struct vm_operations_struct ext4_file_vm_ops = {
 721        .fault          = ext4_filemap_fault,
 722        .map_pages      = filemap_map_pages,
 723        .page_mkwrite   = ext4_page_mkwrite,
 724};
 725
 726static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
 727{
 728        struct inode *inode = file->f_mapping->host;
 729        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 730        struct dax_device *dax_dev = sbi->s_daxdev;
 731
 732        if (unlikely(ext4_forced_shutdown(sbi)))
 733                return -EIO;
 734
 735        /*
 736         * We don't support synchronous mappings for non-DAX files and
 737         * for DAX files if underneath dax_device is not synchronous.
 738         */
 739        if (!daxdev_mapping_supported(vma, dax_dev))
 740                return -EOPNOTSUPP;
 741
 742        file_accessed(file);
 743        if (IS_DAX(file_inode(file))) {
 744                vma->vm_ops = &ext4_dax_vm_ops;
 745                vma->vm_flags |= VM_HUGEPAGE;
 746        } else {
 747                vma->vm_ops = &ext4_file_vm_ops;
 748        }
 749        return 0;
 750}
 751
 752static int ext4_sample_last_mounted(struct super_block *sb,
 753                                    struct vfsmount *mnt)
 754{
 755        struct ext4_sb_info *sbi = EXT4_SB(sb);
 756        struct path path;
 757        char buf[64], *cp;
 758        handle_t *handle;
 759        int err;
 760
 761        if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
 762                return 0;
 763
 764        if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
 765                return 0;
 766
 767        sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
 768        /*
 769         * Sample where the filesystem has been mounted and
 770         * store it in the superblock for sysadmin convenience
 771         * when trying to sort through large numbers of block
 772         * devices or filesystem images.
 773         */
 774        memset(buf, 0, sizeof(buf));
 775        path.mnt = mnt;
 776        path.dentry = mnt->mnt_root;
 777        cp = d_path(&path, buf, sizeof(buf));
 778        err = 0;
 779        if (IS_ERR(cp))
 780                goto out;
 781
 782        handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
 783        err = PTR_ERR(handle);
 784        if (IS_ERR(handle))
 785                goto out;
 786        BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 787        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 788        if (err)
 789                goto out_journal;
 790        strlcpy(sbi->s_es->s_last_mounted, cp,
 791                sizeof(sbi->s_es->s_last_mounted));
 792        ext4_handle_dirty_super(handle, sb);
 793out_journal:
 794        ext4_journal_stop(handle);
 795out:
 796        sb_end_intwrite(sb);
 797        return err;
 798}
 799
 800static int ext4_file_open(struct inode * inode, struct file * filp)
 801{
 802        int ret;
 803
 804        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 805                return -EIO;
 806
 807        ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
 808        if (ret)
 809                return ret;
 810
 811        ret = fscrypt_file_open(inode, filp);
 812        if (ret)
 813                return ret;
 814
 815        ret = fsverity_file_open(inode, filp);
 816        if (ret)
 817                return ret;
 818
 819        /*
 820         * Set up the jbd2_inode if we are opening the inode for
 821         * writing and the journal is present
 822         */
 823        if (filp->f_mode & FMODE_WRITE) {
 824                ret = ext4_inode_attach_jinode(inode);
 825                if (ret < 0)
 826                        return ret;
 827        }
 828
 829        filp->f_mode |= FMODE_NOWAIT;
 830        return dquot_file_open(inode, filp);
 831}
 832
 833/*
 834 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 835 * by calling generic_file_llseek_size() with the appropriate maxbytes
 836 * value for each.
 837 */
 838loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
 839{
 840        struct inode *inode = file->f_mapping->host;
 841        loff_t maxbytes;
 842
 843        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 844                maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
 845        else
 846                maxbytes = inode->i_sb->s_maxbytes;
 847
 848        switch (whence) {
 849        default:
 850                return generic_file_llseek_size(file, offset, whence,
 851                                                maxbytes, i_size_read(inode));
 852        case SEEK_HOLE:
 853                inode_lock_shared(inode);
 854                offset = iomap_seek_hole(inode, offset,
 855                                         &ext4_iomap_report_ops);
 856                inode_unlock_shared(inode);
 857                break;
 858        case SEEK_DATA:
 859                inode_lock_shared(inode);
 860                offset = iomap_seek_data(inode, offset,
 861                                         &ext4_iomap_report_ops);
 862                inode_unlock_shared(inode);
 863                break;
 864        }
 865
 866        if (offset < 0)
 867                return offset;
 868        return vfs_setpos(file, offset, maxbytes);
 869}
 870
 871const struct file_operations ext4_file_operations = {
 872        .llseek         = ext4_llseek,
 873        .read_iter      = ext4_file_read_iter,
 874        .write_iter     = ext4_file_write_iter,
 875        .unlocked_ioctl = ext4_ioctl,
 876#ifdef CONFIG_COMPAT
 877        .compat_ioctl   = ext4_compat_ioctl,
 878#endif
 879        .mmap           = ext4_file_mmap,
 880        .mmap_supported_flags = MAP_SYNC,
 881        .open           = ext4_file_open,
 882        .release        = ext4_release_file,
 883        .fsync          = ext4_sync_file,
 884        .get_unmapped_area = thp_get_unmapped_area,
 885        .splice_read    = generic_file_splice_read,
 886        .splice_write   = iter_file_splice_write,
 887        .fallocate      = ext4_fallocate,
 888};
 889
 890const struct inode_operations ext4_file_inode_operations = {
 891        .setattr        = ext4_setattr,
 892        .getattr        = ext4_file_getattr,
 893        .listxattr      = ext4_listxattr,
 894        .get_acl        = ext4_get_acl,
 895        .set_acl        = ext4_set_acl,
 896        .fiemap         = ext4_fiemap,
 897};
 898
 899