linux/fs/ext4/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ext4/file.c
   4 *
   5 * Copyright (C) 1992, 1993, 1994, 1995
   6 * Remy Card (card@masi.ibp.fr)
   7 * Laboratoire MASI - Institut Blaise Pascal
   8 * Universite Pierre et Marie Curie (Paris VI)
   9 *
  10 *  from
  11 *
  12 *  linux/fs/minix/file.c
  13 *
  14 *  Copyright (C) 1991, 1992  Linus Torvalds
  15 *
  16 *  ext4 fs regular file handling primitives
  17 *
  18 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  19 *      (jj@sunsite.ms.mff.cuni.cz)
  20 */
  21
  22#include <linux/time.h>
  23#include <linux/fs.h>
  24#include <linux/iomap.h>
  25#include <linux/mount.h>
  26#include <linux/path.h>
  27#include <linux/dax.h>
  28#include <linux/quotaops.h>
  29#include <linux/pagevec.h>
  30#include <linux/uio.h>
  31#include <linux/mman.h>
  32#include <linux/backing-dev.h>
  33#include "ext4.h"
  34#include "ext4_jbd2.h"
  35#include "xattr.h"
  36#include "acl.h"
  37#include "truncate.h"
  38
  39static bool ext4_dio_supported(struct inode *inode)
  40{
  41        if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
  42                return false;
  43        if (fsverity_active(inode))
  44                return false;
  45        if (ext4_should_journal_data(inode))
  46                return false;
  47        if (ext4_has_inline_data(inode))
  48                return false;
  49        return true;
  50}
  51
  52static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
  53{
  54        ssize_t ret;
  55        struct inode *inode = file_inode(iocb->ki_filp);
  56
  57        if (iocb->ki_flags & IOCB_NOWAIT) {
  58                if (!inode_trylock_shared(inode))
  59                        return -EAGAIN;
  60        } else {
  61                inode_lock_shared(inode);
  62        }
  63
  64        if (!ext4_dio_supported(inode)) {
  65                inode_unlock_shared(inode);
  66                /*
  67                 * Fallback to buffered I/O if the operation being performed on
  68                 * the inode is not supported by direct I/O. The IOCB_DIRECT
  69                 * flag needs to be cleared here in order to ensure that the
  70                 * direct I/O path within generic_file_read_iter() is not
  71                 * taken.
  72                 */
  73                iocb->ki_flags &= ~IOCB_DIRECT;
  74                return generic_file_read_iter(iocb, to);
  75        }
  76
  77        ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
  78                           is_sync_kiocb(iocb));
  79        inode_unlock_shared(inode);
  80
  81        file_accessed(iocb->ki_filp);
  82        return ret;
  83}
  84
  85#ifdef CONFIG_FS_DAX
  86static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  87{
  88        struct inode *inode = file_inode(iocb->ki_filp);
  89        ssize_t ret;
  90
  91        if (iocb->ki_flags & IOCB_NOWAIT) {
  92                if (!inode_trylock_shared(inode))
  93                        return -EAGAIN;
  94        } else {
  95                inode_lock_shared(inode);
  96        }
  97        /*
  98         * Recheck under inode lock - at this point we are sure it cannot
  99         * change anymore
 100         */
 101        if (!IS_DAX(inode)) {
 102                inode_unlock_shared(inode);
 103                /* Fallback to buffered IO in case we cannot support DAX */
 104                return generic_file_read_iter(iocb, to);
 105        }
 106        ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
 107        inode_unlock_shared(inode);
 108
 109        file_accessed(iocb->ki_filp);
 110        return ret;
 111}
 112#endif
 113
 114static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 115{
 116        struct inode *inode = file_inode(iocb->ki_filp);
 117
 118        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 119                return -EIO;
 120
 121        if (!iov_iter_count(to))
 122                return 0; /* skip atime */
 123
 124#ifdef CONFIG_FS_DAX
 125        if (IS_DAX(inode))
 126                return ext4_dax_read_iter(iocb, to);
 127#endif
 128        if (iocb->ki_flags & IOCB_DIRECT)
 129                return ext4_dio_read_iter(iocb, to);
 130
 131        return generic_file_read_iter(iocb, to);
 132}
 133
 134/*
 135 * Called when an inode is released. Note that this is different
 136 * from ext4_file_open: open gets called at every open, but release
 137 * gets called only when /all/ the files are closed.
 138 */
 139static int ext4_release_file(struct inode *inode, struct file *filp)
 140{
 141        if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
 142                ext4_alloc_da_blocks(inode);
 143                ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
 144        }
 145        /* if we are the last writer on the inode, drop the block reservation */
 146        if ((filp->f_mode & FMODE_WRITE) &&
 147                        (atomic_read(&inode->i_writecount) == 1) &&
 148                        !EXT4_I(inode)->i_reserved_data_blocks) {
 149                down_write(&EXT4_I(inode)->i_data_sem);
 150                ext4_discard_preallocations(inode, 0);
 151                up_write(&EXT4_I(inode)->i_data_sem);
 152        }
 153        if (is_dx(inode) && filp->private_data)
 154                ext4_htree_free_dir_info(filp->private_data);
 155
 156        return 0;
 157}
 158
 159/*
 160 * This tests whether the IO in question is block-aligned or not.
 161 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 162 * are converted to written only after the IO is complete.  Until they are
 163 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 164 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 165 * threads are at work on the same unwritten block, they must be synchronized
 166 * or one thread will zero the other's data, causing corruption.
 167 */
 168static bool
 169ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
 170{
 171        struct super_block *sb = inode->i_sb;
 172        unsigned long blockmask = sb->s_blocksize - 1;
 173
 174        if ((pos | iov_iter_alignment(from)) & blockmask)
 175                return true;
 176
 177        return false;
 178}
 179
 180static bool
 181ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
 182{
 183        if (offset + len > i_size_read(inode) ||
 184            offset + len > EXT4_I(inode)->i_disksize)
 185                return true;
 186        return false;
 187}
 188
 189/* Is IO overwriting allocated and initialized blocks? */
 190static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
 191{
 192        struct ext4_map_blocks map;
 193        unsigned int blkbits = inode->i_blkbits;
 194        int err, blklen;
 195
 196        if (pos + len > i_size_read(inode))
 197                return false;
 198
 199        map.m_lblk = pos >> blkbits;
 200        map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
 201        blklen = map.m_len;
 202
 203        err = ext4_map_blocks(NULL, inode, &map, 0);
 204        /*
 205         * 'err==len' means that all of the blocks have been preallocated,
 206         * regardless of whether they have been initialized or not. To exclude
 207         * unwritten extents, we need to check m_flags.
 208         */
 209        return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
 210}
 211
 212static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
 213                                         struct iov_iter *from)
 214{
 215        struct inode *inode = file_inode(iocb->ki_filp);
 216        ssize_t ret;
 217
 218        if (unlikely(IS_IMMUTABLE(inode)))
 219                return -EPERM;
 220
 221        ret = generic_write_checks(iocb, from);
 222        if (ret <= 0)
 223                return ret;
 224
 225        /*
 226         * If we have encountered a bitmap-format file, the size limit
 227         * is smaller than s_maxbytes, which is for extent-mapped files.
 228         */
 229        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
 230                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 231
 232                if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
 233                        return -EFBIG;
 234                iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
 235        }
 236
 237        return iov_iter_count(from);
 238}
 239
 240static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
 241{
 242        ssize_t ret, count;
 243
 244        count = ext4_generic_write_checks(iocb, from);
 245        if (count <= 0)
 246                return count;
 247
 248        ret = file_modified(iocb->ki_filp);
 249        if (ret)
 250                return ret;
 251        return count;
 252}
 253
 254static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
 255                                        struct iov_iter *from)
 256{
 257        ssize_t ret;
 258        struct inode *inode = file_inode(iocb->ki_filp);
 259
 260        if (iocb->ki_flags & IOCB_NOWAIT)
 261                return -EOPNOTSUPP;
 262
 263        inode_lock(inode);
 264        ret = ext4_write_checks(iocb, from);
 265        if (ret <= 0)
 266                goto out;
 267
 268        current->backing_dev_info = inode_to_bdi(inode);
 269        ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
 270        current->backing_dev_info = NULL;
 271
 272out:
 273        inode_unlock(inode);
 274        if (likely(ret > 0)) {
 275                iocb->ki_pos += ret;
 276                ret = generic_write_sync(iocb, ret);
 277        }
 278
 279        return ret;
 280}
 281
 282static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
 283                                           ssize_t written, size_t count)
 284{
 285        handle_t *handle;
 286        bool truncate = false;
 287        u8 blkbits = inode->i_blkbits;
 288        ext4_lblk_t written_blk, end_blk;
 289        int ret;
 290
 291        /*
 292         * Note that EXT4_I(inode)->i_disksize can get extended up to
 293         * inode->i_size while the I/O was running due to writeback of delalloc
 294         * blocks. But, the code in ext4_iomap_alloc() is careful to use
 295         * zeroed/unwritten extents if this is possible; thus we won't leave
 296         * uninitialized blocks in a file even if we didn't succeed in writing
 297         * as much as we intended.
 298         */
 299        WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
 300        if (offset + count <= EXT4_I(inode)->i_disksize) {
 301                /*
 302                 * We need to ensure that the inode is removed from the orphan
 303                 * list if it has been added prematurely, due to writeback of
 304                 * delalloc blocks.
 305                 */
 306                if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
 307                        handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 308
 309                        if (IS_ERR(handle)) {
 310                                ext4_orphan_del(NULL, inode);
 311                                return PTR_ERR(handle);
 312                        }
 313
 314                        ext4_orphan_del(handle, inode);
 315                        ext4_journal_stop(handle);
 316                }
 317
 318                return written;
 319        }
 320
 321        if (written < 0)
 322                goto truncate;
 323
 324        handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 325        if (IS_ERR(handle)) {
 326                written = PTR_ERR(handle);
 327                goto truncate;
 328        }
 329
 330        if (ext4_update_inode_size(inode, offset + written)) {
 331                ret = ext4_mark_inode_dirty(handle, inode);
 332                if (unlikely(ret)) {
 333                        written = ret;
 334                        ext4_journal_stop(handle);
 335                        goto truncate;
 336                }
 337        }
 338
 339        /*
 340         * We may need to truncate allocated but not written blocks beyond EOF.
 341         */
 342        written_blk = ALIGN(offset + written, 1 << blkbits);
 343        end_blk = ALIGN(offset + count, 1 << blkbits);
 344        if (written_blk < end_blk && ext4_can_truncate(inode))
 345                truncate = true;
 346
 347        /*
 348         * Remove the inode from the orphan list if it has been extended and
 349         * everything went OK.
 350         */
 351        if (!truncate && inode->i_nlink)
 352                ext4_orphan_del(handle, inode);
 353        ext4_journal_stop(handle);
 354
 355        if (truncate) {
 356truncate:
 357                ext4_truncate_failed_write(inode);
 358                /*
 359                 * If the truncate operation failed early, then the inode may
 360                 * still be on the orphan list. In that case, we need to try
 361                 * remove the inode from the in-memory linked list.
 362                 */
 363                if (inode->i_nlink)
 364                        ext4_orphan_del(NULL, inode);
 365        }
 366
 367        return written;
 368}
 369
 370static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
 371                                 int error, unsigned int flags)
 372{
 373        loff_t offset = iocb->ki_pos;
 374        struct inode *inode = file_inode(iocb->ki_filp);
 375
 376        if (error)
 377                return error;
 378
 379        if (size && flags & IOMAP_DIO_UNWRITTEN)
 380                return ext4_convert_unwritten_extents(NULL, inode,
 381                                                      offset, size);
 382
 383        return 0;
 384}
 385
 386static const struct iomap_dio_ops ext4_dio_write_ops = {
 387        .end_io = ext4_dio_write_end_io,
 388};
 389
 390/*
 391 * The intention here is to start with shared lock acquired then see if any
 392 * condition requires an exclusive inode lock. If yes, then we restart the
 393 * whole operation by releasing the shared lock and acquiring exclusive lock.
 394 *
 395 * - For unaligned_io we never take shared lock as it may cause data corruption
 396 *   when two unaligned IO tries to modify the same block e.g. while zeroing.
 397 *
 398 * - For extending writes case we don't take the shared lock, since it requires
 399 *   updating inode i_disksize and/or orphan handling with exclusive lock.
 400 *
 401 * - shared locking will only be true mostly with overwrites. Otherwise we will
 402 *   switch to exclusive i_rwsem lock.
 403 */
 404static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
 405                                     bool *ilock_shared, bool *extend)
 406{
 407        struct file *file = iocb->ki_filp;
 408        struct inode *inode = file_inode(file);
 409        loff_t offset;
 410        size_t count;
 411        ssize_t ret;
 412
 413restart:
 414        ret = ext4_generic_write_checks(iocb, from);
 415        if (ret <= 0)
 416                goto out;
 417
 418        offset = iocb->ki_pos;
 419        count = ret;
 420        if (ext4_extending_io(inode, offset, count))
 421                *extend = true;
 422        /*
 423         * Determine whether the IO operation will overwrite allocated
 424         * and initialized blocks.
 425         * We need exclusive i_rwsem for changing security info
 426         * in file_modified().
 427         */
 428        if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
 429             !ext4_overwrite_io(inode, offset, count))) {
 430                if (iocb->ki_flags & IOCB_NOWAIT) {
 431                        ret = -EAGAIN;
 432                        goto out;
 433                }
 434                inode_unlock_shared(inode);
 435                *ilock_shared = false;
 436                inode_lock(inode);
 437                goto restart;
 438        }
 439
 440        ret = file_modified(file);
 441        if (ret < 0)
 442                goto out;
 443
 444        return count;
 445out:
 446        if (*ilock_shared)
 447                inode_unlock_shared(inode);
 448        else
 449                inode_unlock(inode);
 450        return ret;
 451}
 452
 453static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
 454{
 455        ssize_t ret;
 456        handle_t *handle;
 457        struct inode *inode = file_inode(iocb->ki_filp);
 458        loff_t offset = iocb->ki_pos;
 459        size_t count = iov_iter_count(from);
 460        const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
 461        bool extend = false, unaligned_io = false;
 462        bool ilock_shared = true;
 463
 464        /*
 465         * We initially start with shared inode lock unless it is
 466         * unaligned IO which needs exclusive lock anyways.
 467         */
 468        if (ext4_unaligned_io(inode, from, offset)) {
 469                unaligned_io = true;
 470                ilock_shared = false;
 471        }
 472        /*
 473         * Quick check here without any i_rwsem lock to see if it is extending
 474         * IO. A more reliable check is done in ext4_dio_write_checks() with
 475         * proper locking in place.
 476         */
 477        if (offset + count > i_size_read(inode))
 478                ilock_shared = false;
 479
 480        if (iocb->ki_flags & IOCB_NOWAIT) {
 481                if (ilock_shared) {
 482                        if (!inode_trylock_shared(inode))
 483                                return -EAGAIN;
 484                } else {
 485                        if (!inode_trylock(inode))
 486                                return -EAGAIN;
 487                }
 488        } else {
 489                if (ilock_shared)
 490                        inode_lock_shared(inode);
 491                else
 492                        inode_lock(inode);
 493        }
 494
 495        /* Fallback to buffered I/O if the inode does not support direct I/O. */
 496        if (!ext4_dio_supported(inode)) {
 497                if (ilock_shared)
 498                        inode_unlock_shared(inode);
 499                else
 500                        inode_unlock(inode);
 501                return ext4_buffered_write_iter(iocb, from);
 502        }
 503
 504        ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
 505        if (ret <= 0)
 506                return ret;
 507
 508        /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
 509        if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
 510                ret = -EAGAIN;
 511                goto out;
 512        }
 513
 514        offset = iocb->ki_pos;
 515        count = ret;
 516
 517        /*
 518         * Unaligned direct IO must be serialized among each other as zeroing
 519         * of partial blocks of two competing unaligned IOs can result in data
 520         * corruption.
 521         *
 522         * So we make sure we don't allow any unaligned IO in flight.
 523         * For IOs where we need not wait (like unaligned non-AIO DIO),
 524         * below inode_dio_wait() may anyway become a no-op, since we start
 525         * with exclusive lock.
 526         */
 527        if (unaligned_io)
 528                inode_dio_wait(inode);
 529
 530        if (extend) {
 531                handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 532                if (IS_ERR(handle)) {
 533                        ret = PTR_ERR(handle);
 534                        goto out;
 535                }
 536
 537                ret = ext4_orphan_add(handle, inode);
 538                if (ret) {
 539                        ext4_journal_stop(handle);
 540                        goto out;
 541                }
 542
 543                ext4_journal_stop(handle);
 544        }
 545
 546        if (ilock_shared)
 547                iomap_ops = &ext4_iomap_overwrite_ops;
 548        ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
 549                           is_sync_kiocb(iocb) || unaligned_io || extend);
 550        if (ret == -ENOTBLK)
 551                ret = 0;
 552
 553        if (extend)
 554                ret = ext4_handle_inode_extension(inode, offset, ret, count);
 555
 556out:
 557        if (ilock_shared)
 558                inode_unlock_shared(inode);
 559        else
 560                inode_unlock(inode);
 561
 562        if (ret >= 0 && iov_iter_count(from)) {
 563                ssize_t err;
 564                loff_t endbyte;
 565
 566                offset = iocb->ki_pos;
 567                err = ext4_buffered_write_iter(iocb, from);
 568                if (err < 0)
 569                        return err;
 570
 571                /*
 572                 * We need to ensure that the pages within the page cache for
 573                 * the range covered by this I/O are written to disk and
 574                 * invalidated. This is in attempt to preserve the expected
 575                 * direct I/O semantics in the case we fallback to buffered I/O
 576                 * to complete off the I/O request.
 577                 */
 578                ret += err;
 579                endbyte = offset + err - 1;
 580                err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
 581                                                   offset, endbyte);
 582                if (!err)
 583                        invalidate_mapping_pages(iocb->ki_filp->f_mapping,
 584                                                 offset >> PAGE_SHIFT,
 585                                                 endbyte >> PAGE_SHIFT);
 586        }
 587
 588        return ret;
 589}
 590
 591#ifdef CONFIG_FS_DAX
 592static ssize_t
 593ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 594{
 595        ssize_t ret;
 596        size_t count;
 597        loff_t offset;
 598        handle_t *handle;
 599        bool extend = false;
 600        struct inode *inode = file_inode(iocb->ki_filp);
 601
 602        if (iocb->ki_flags & IOCB_NOWAIT) {
 603                if (!inode_trylock(inode))
 604                        return -EAGAIN;
 605        } else {
 606                inode_lock(inode);
 607        }
 608
 609        ret = ext4_write_checks(iocb, from);
 610        if (ret <= 0)
 611                goto out;
 612
 613        offset = iocb->ki_pos;
 614        count = iov_iter_count(from);
 615
 616        if (offset + count > EXT4_I(inode)->i_disksize) {
 617                handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 618                if (IS_ERR(handle)) {
 619                        ret = PTR_ERR(handle);
 620                        goto out;
 621                }
 622
 623                ret = ext4_orphan_add(handle, inode);
 624                if (ret) {
 625                        ext4_journal_stop(handle);
 626                        goto out;
 627                }
 628
 629                extend = true;
 630                ext4_journal_stop(handle);
 631        }
 632
 633        ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
 634
 635        if (extend)
 636                ret = ext4_handle_inode_extension(inode, offset, ret, count);
 637out:
 638        inode_unlock(inode);
 639        if (ret > 0)
 640                ret = generic_write_sync(iocb, ret);
 641        return ret;
 642}
 643#endif
 644
 645static ssize_t
 646ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 647{
 648        struct inode *inode = file_inode(iocb->ki_filp);
 649
 650        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 651                return -EIO;
 652
 653#ifdef CONFIG_FS_DAX
 654        if (IS_DAX(inode))
 655                return ext4_dax_write_iter(iocb, from);
 656#endif
 657        if (iocb->ki_flags & IOCB_DIRECT)
 658                return ext4_dio_write_iter(iocb, from);
 659
 660        return ext4_buffered_write_iter(iocb, from);
 661}
 662
 663#ifdef CONFIG_FS_DAX
 664static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
 665                enum page_entry_size pe_size)
 666{
 667        int error = 0;
 668        vm_fault_t result;
 669        int retries = 0;
 670        handle_t *handle = NULL;
 671        struct inode *inode = file_inode(vmf->vma->vm_file);
 672        struct super_block *sb = inode->i_sb;
 673
 674        /*
 675         * We have to distinguish real writes from writes which will result in a
 676         * COW page; COW writes should *not* poke the journal (the file will not
 677         * be changed). Doing so would cause unintended failures when mounted
 678         * read-only.
 679         *
 680         * We check for VM_SHARED rather than vmf->cow_page since the latter is
 681         * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
 682         * other sizes, dax_iomap_fault will handle splitting / fallback so that
 683         * we eventually come back with a COW page.
 684         */
 685        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
 686                (vmf->vma->vm_flags & VM_SHARED);
 687        pfn_t pfn;
 688
 689        if (write) {
 690                sb_start_pagefault(sb);
 691                file_update_time(vmf->vma->vm_file);
 692                down_read(&EXT4_I(inode)->i_mmap_sem);
 693retry:
 694                handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
 695                                               EXT4_DATA_TRANS_BLOCKS(sb));
 696                if (IS_ERR(handle)) {
 697                        up_read(&EXT4_I(inode)->i_mmap_sem);
 698                        sb_end_pagefault(sb);
 699                        return VM_FAULT_SIGBUS;
 700                }
 701        } else {
 702                down_read(&EXT4_I(inode)->i_mmap_sem);
 703        }
 704        result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
 705        if (write) {
 706                ext4_journal_stop(handle);
 707
 708                if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
 709                    ext4_should_retry_alloc(sb, &retries))
 710                        goto retry;
 711                /* Handling synchronous page fault? */
 712                if (result & VM_FAULT_NEEDDSYNC)
 713                        result = dax_finish_sync_fault(vmf, pe_size, pfn);
 714                up_read(&EXT4_I(inode)->i_mmap_sem);
 715                sb_end_pagefault(sb);
 716        } else {
 717                up_read(&EXT4_I(inode)->i_mmap_sem);
 718        }
 719
 720        return result;
 721}
 722
 723static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
 724{
 725        return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
 726}
 727
 728static const struct vm_operations_struct ext4_dax_vm_ops = {
 729        .fault          = ext4_dax_fault,
 730        .huge_fault     = ext4_dax_huge_fault,
 731        .page_mkwrite   = ext4_dax_fault,
 732        .pfn_mkwrite    = ext4_dax_fault,
 733};
 734#else
 735#define ext4_dax_vm_ops ext4_file_vm_ops
 736#endif
 737
 738static const struct vm_operations_struct ext4_file_vm_ops = {
 739        .fault          = ext4_filemap_fault,
 740        .map_pages      = filemap_map_pages,
 741        .page_mkwrite   = ext4_page_mkwrite,
 742};
 743
 744static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
 745{
 746        struct inode *inode = file->f_mapping->host;
 747        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 748        struct dax_device *dax_dev = sbi->s_daxdev;
 749
 750        if (unlikely(ext4_forced_shutdown(sbi)))
 751                return -EIO;
 752
 753        /*
 754         * We don't support synchronous mappings for non-DAX files and
 755         * for DAX files if underneath dax_device is not synchronous.
 756         */
 757        if (!daxdev_mapping_supported(vma, dax_dev))
 758                return -EOPNOTSUPP;
 759
 760        file_accessed(file);
 761        if (IS_DAX(file_inode(file))) {
 762                vma->vm_ops = &ext4_dax_vm_ops;
 763                vma->vm_flags |= VM_HUGEPAGE;
 764        } else {
 765                vma->vm_ops = &ext4_file_vm_ops;
 766        }
 767        return 0;
 768}
 769
 770static int ext4_sample_last_mounted(struct super_block *sb,
 771                                    struct vfsmount *mnt)
 772{
 773        struct ext4_sb_info *sbi = EXT4_SB(sb);
 774        struct path path;
 775        char buf[64], *cp;
 776        handle_t *handle;
 777        int err;
 778
 779        if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
 780                return 0;
 781
 782        if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
 783                return 0;
 784
 785        sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
 786        /*
 787         * Sample where the filesystem has been mounted and
 788         * store it in the superblock for sysadmin convenience
 789         * when trying to sort through large numbers of block
 790         * devices or filesystem images.
 791         */
 792        memset(buf, 0, sizeof(buf));
 793        path.mnt = mnt;
 794        path.dentry = mnt->mnt_root;
 795        cp = d_path(&path, buf, sizeof(buf));
 796        err = 0;
 797        if (IS_ERR(cp))
 798                goto out;
 799
 800        handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
 801        err = PTR_ERR(handle);
 802        if (IS_ERR(handle))
 803                goto out;
 804        BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 805        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 806        if (err)
 807                goto out_journal;
 808        strlcpy(sbi->s_es->s_last_mounted, cp,
 809                sizeof(sbi->s_es->s_last_mounted));
 810        ext4_handle_dirty_super(handle, sb);
 811out_journal:
 812        ext4_journal_stop(handle);
 813out:
 814        sb_end_intwrite(sb);
 815        return err;
 816}
 817
 818static int ext4_file_open(struct inode *inode, struct file *filp)
 819{
 820        int ret;
 821
 822        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 823                return -EIO;
 824
 825        ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
 826        if (ret)
 827                return ret;
 828
 829        ret = fscrypt_file_open(inode, filp);
 830        if (ret)
 831                return ret;
 832
 833        ret = fsverity_file_open(inode, filp);
 834        if (ret)
 835                return ret;
 836
 837        /*
 838         * Set up the jbd2_inode if we are opening the inode for
 839         * writing and the journal is present
 840         */
 841        if (filp->f_mode & FMODE_WRITE) {
 842                ret = ext4_inode_attach_jinode(inode);
 843                if (ret < 0)
 844                        return ret;
 845        }
 846
 847        filp->f_mode |= FMODE_NOWAIT;
 848        return dquot_file_open(inode, filp);
 849}
 850
 851/*
 852 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 853 * by calling generic_file_llseek_size() with the appropriate maxbytes
 854 * value for each.
 855 */
 856loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
 857{
 858        struct inode *inode = file->f_mapping->host;
 859        loff_t maxbytes;
 860
 861        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 862                maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
 863        else
 864                maxbytes = inode->i_sb->s_maxbytes;
 865
 866        switch (whence) {
 867        default:
 868                return generic_file_llseek_size(file, offset, whence,
 869                                                maxbytes, i_size_read(inode));
 870        case SEEK_HOLE:
 871                inode_lock_shared(inode);
 872                offset = iomap_seek_hole(inode, offset,
 873                                         &ext4_iomap_report_ops);
 874                inode_unlock_shared(inode);
 875                break;
 876        case SEEK_DATA:
 877                inode_lock_shared(inode);
 878                offset = iomap_seek_data(inode, offset,
 879                                         &ext4_iomap_report_ops);
 880                inode_unlock_shared(inode);
 881                break;
 882        }
 883
 884        if (offset < 0)
 885                return offset;
 886        return vfs_setpos(file, offset, maxbytes);
 887}
 888
 889const struct file_operations ext4_file_operations = {
 890        .llseek         = ext4_llseek,
 891        .read_iter      = ext4_file_read_iter,
 892        .write_iter     = ext4_file_write_iter,
 893        .iopoll         = iomap_dio_iopoll,
 894        .unlocked_ioctl = ext4_ioctl,
 895#ifdef CONFIG_COMPAT
 896        .compat_ioctl   = ext4_compat_ioctl,
 897#endif
 898        .mmap           = ext4_file_mmap,
 899        .mmap_supported_flags = MAP_SYNC,
 900        .open           = ext4_file_open,
 901        .release        = ext4_release_file,
 902        .fsync          = ext4_sync_file,
 903        .get_unmapped_area = thp_get_unmapped_area,
 904        .splice_read    = generic_file_splice_read,
 905        .splice_write   = iter_file_splice_write,
 906        .fallocate      = ext4_fallocate,
 907};
 908
 909const struct inode_operations ext4_file_inode_operations = {
 910        .setattr        = ext4_setattr,
 911        .getattr        = ext4_file_getattr,
 912        .listxattr      = ext4_listxattr,
 913        .get_acl        = ext4_get_acl,
 914        .set_acl        = ext4_set_acl,
 915        .fiemap         = ext4_fiemap,
 916};
 917
 918