linux/fs/ext4/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ext4/file.c
   4 *
   5 * Copyright (C) 1992, 1993, 1994, 1995
   6 * Remy Card (card@masi.ibp.fr)
   7 * Laboratoire MASI - Institut Blaise Pascal
   8 * Universite Pierre et Marie Curie (Paris VI)
   9 *
  10 *  from
  11 *
  12 *  linux/fs/minix/file.c
  13 *
  14 *  Copyright (C) 1991, 1992  Linus Torvalds
  15 *
  16 *  ext4 fs regular file handling primitives
  17 *
  18 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  19 *      (jj@sunsite.ms.mff.cuni.cz)
  20 */
  21
  22#include <linux/time.h>
  23#include <linux/fs.h>
  24#include <linux/iomap.h>
  25#include <linux/mount.h>
  26#include <linux/path.h>
  27#include <linux/dax.h>
  28#include <linux/quotaops.h>
  29#include <linux/pagevec.h>
  30#include <linux/uio.h>
  31#include <linux/mman.h>
  32#include "ext4.h"
  33#include "ext4_jbd2.h"
  34#include "xattr.h"
  35#include "acl.h"
  36
  37#ifdef CONFIG_FS_DAX
  38static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  39{
  40        struct inode *inode = file_inode(iocb->ki_filp);
  41        ssize_t ret;
  42
  43        if (!inode_trylock_shared(inode)) {
  44                if (iocb->ki_flags & IOCB_NOWAIT)
  45                        return -EAGAIN;
  46                inode_lock_shared(inode);
  47        }
  48        /*
  49         * Recheck under inode lock - at this point we are sure it cannot
  50         * change anymore
  51         */
  52        if (!IS_DAX(inode)) {
  53                inode_unlock_shared(inode);
  54                /* Fallback to buffered IO in case we cannot support DAX */
  55                return generic_file_read_iter(iocb, to);
  56        }
  57        ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
  58        inode_unlock_shared(inode);
  59
  60        file_accessed(iocb->ki_filp);
  61        return ret;
  62}
  63#endif
  64
  65static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
  66{
  67        if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
  68                return -EIO;
  69
  70        if (!iov_iter_count(to))
  71                return 0; /* skip atime */
  72
  73#ifdef CONFIG_FS_DAX
  74        if (IS_DAX(file_inode(iocb->ki_filp)))
  75                return ext4_dax_read_iter(iocb, to);
  76#endif
  77        return generic_file_read_iter(iocb, to);
  78}
  79
  80/*
  81 * Called when an inode is released. Note that this is different
  82 * from ext4_file_open: open gets called at every open, but release
  83 * gets called only when /all/ the files are closed.
  84 */
  85static int ext4_release_file(struct inode *inode, struct file *filp)
  86{
  87        if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
  88                ext4_alloc_da_blocks(inode);
  89                ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  90        }
  91        /* if we are the last writer on the inode, drop the block reservation */
  92        if ((filp->f_mode & FMODE_WRITE) &&
  93                        (atomic_read(&inode->i_writecount) == 1) &&
  94                        !EXT4_I(inode)->i_reserved_data_blocks)
  95        {
  96                down_write(&EXT4_I(inode)->i_data_sem);
  97                ext4_discard_preallocations(inode);
  98                up_write(&EXT4_I(inode)->i_data_sem);
  99        }
 100        if (is_dx(inode) && filp->private_data)
 101                ext4_htree_free_dir_info(filp->private_data);
 102
 103        return 0;
 104}
 105
 106static void ext4_unwritten_wait(struct inode *inode)
 107{
 108        wait_queue_head_t *wq = ext4_ioend_wq(inode);
 109
 110        wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
 111}
 112
 113/*
 114 * This tests whether the IO in question is block-aligned or not.
 115 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 116 * are converted to written only after the IO is complete.  Until they are
 117 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 118 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 119 * threads are at work on the same unwritten block, they must be synchronized
 120 * or one thread will zero the other's data, causing corruption.
 121 */
 122static int
 123ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
 124{
 125        struct super_block *sb = inode->i_sb;
 126        int blockmask = sb->s_blocksize - 1;
 127
 128        if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
 129                return 0;
 130
 131        if ((pos | iov_iter_alignment(from)) & blockmask)
 132                return 1;
 133
 134        return 0;
 135}
 136
 137/* Is IO overwriting allocated and initialized blocks? */
 138static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
 139{
 140        struct ext4_map_blocks map;
 141        unsigned int blkbits = inode->i_blkbits;
 142        int err, blklen;
 143
 144        if (pos + len > i_size_read(inode))
 145                return false;
 146
 147        map.m_lblk = pos >> blkbits;
 148        map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
 149        blklen = map.m_len;
 150
 151        err = ext4_map_blocks(NULL, inode, &map, 0);
 152        /*
 153         * 'err==len' means that all of the blocks have been preallocated,
 154         * regardless of whether they have been initialized or not. To exclude
 155         * unwritten extents, we need to check m_flags.
 156         */
 157        return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
 158}
 159
 160static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
 161{
 162        struct inode *inode = file_inode(iocb->ki_filp);
 163        ssize_t ret;
 164
 165        ret = generic_write_checks(iocb, from);
 166        if (ret <= 0)
 167                return ret;
 168
 169        if (unlikely(IS_IMMUTABLE(inode)))
 170                return -EPERM;
 171
 172        /*
 173         * If we have encountered a bitmap-format file, the size limit
 174         * is smaller than s_maxbytes, which is for extent-mapped files.
 175         */
 176        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
 177                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 178
 179                if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
 180                        return -EFBIG;
 181                iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
 182        }
 183        return iov_iter_count(from);
 184}
 185
 186#ifdef CONFIG_FS_DAX
 187static ssize_t
 188ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 189{
 190        struct inode *inode = file_inode(iocb->ki_filp);
 191        ssize_t ret;
 192
 193        if (!inode_trylock(inode)) {
 194                if (iocb->ki_flags & IOCB_NOWAIT)
 195                        return -EAGAIN;
 196                inode_lock(inode);
 197        }
 198        ret = ext4_write_checks(iocb, from);
 199        if (ret <= 0)
 200                goto out;
 201        ret = file_remove_privs(iocb->ki_filp);
 202        if (ret)
 203                goto out;
 204        ret = file_update_time(iocb->ki_filp);
 205        if (ret)
 206                goto out;
 207
 208        ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
 209out:
 210        inode_unlock(inode);
 211        if (ret > 0)
 212                ret = generic_write_sync(iocb, ret);
 213        return ret;
 214}
 215#endif
 216
 217static ssize_t
 218ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 219{
 220        struct inode *inode = file_inode(iocb->ki_filp);
 221        int o_direct = iocb->ki_flags & IOCB_DIRECT;
 222        int unaligned_aio = 0;
 223        int overwrite = 0;
 224        ssize_t ret;
 225
 226        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 227                return -EIO;
 228
 229#ifdef CONFIG_FS_DAX
 230        if (IS_DAX(inode))
 231                return ext4_dax_write_iter(iocb, from);
 232#endif
 233        if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT))
 234                return -EOPNOTSUPP;
 235
 236        if (!inode_trylock(inode)) {
 237                if (iocb->ki_flags & IOCB_NOWAIT)
 238                        return -EAGAIN;
 239                inode_lock(inode);
 240        }
 241
 242        ret = ext4_write_checks(iocb, from);
 243        if (ret <= 0)
 244                goto out;
 245
 246        /*
 247         * Unaligned direct AIO must be serialized among each other as zeroing
 248         * of partial blocks of two competing unaligned AIOs can result in data
 249         * corruption.
 250         */
 251        if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
 252            !is_sync_kiocb(iocb) &&
 253            ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
 254                unaligned_aio = 1;
 255                ext4_unwritten_wait(inode);
 256        }
 257
 258        iocb->private = &overwrite;
 259        /* Check whether we do a DIO overwrite or not */
 260        if (o_direct && !unaligned_aio) {
 261                if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
 262                        if (ext4_should_dioread_nolock(inode))
 263                                overwrite = 1;
 264                } else if (iocb->ki_flags & IOCB_NOWAIT) {
 265                        ret = -EAGAIN;
 266                        goto out;
 267                }
 268        }
 269
 270        ret = __generic_file_write_iter(iocb, from);
 271        /*
 272         * Unaligned direct AIO must be the only IO in flight. Otherwise
 273         * overlapping aligned IO after unaligned might result in data
 274         * corruption.
 275         */
 276        if (ret == -EIOCBQUEUED && unaligned_aio)
 277                ext4_unwritten_wait(inode);
 278        inode_unlock(inode);
 279
 280        if (ret > 0)
 281                ret = generic_write_sync(iocb, ret);
 282
 283        return ret;
 284
 285out:
 286        inode_unlock(inode);
 287        return ret;
 288}
 289
 290#ifdef CONFIG_FS_DAX
 291static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
 292                enum page_entry_size pe_size)
 293{
 294        int error = 0;
 295        vm_fault_t result;
 296        int retries = 0;
 297        handle_t *handle = NULL;
 298        struct inode *inode = file_inode(vmf->vma->vm_file);
 299        struct super_block *sb = inode->i_sb;
 300
 301        /*
 302         * We have to distinguish real writes from writes which will result in a
 303         * COW page; COW writes should *not* poke the journal (the file will not
 304         * be changed). Doing so would cause unintended failures when mounted
 305         * read-only.
 306         *
 307         * We check for VM_SHARED rather than vmf->cow_page since the latter is
 308         * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
 309         * other sizes, dax_iomap_fault will handle splitting / fallback so that
 310         * we eventually come back with a COW page.
 311         */
 312        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
 313                (vmf->vma->vm_flags & VM_SHARED);
 314        pfn_t pfn;
 315
 316        if (write) {
 317                sb_start_pagefault(sb);
 318                file_update_time(vmf->vma->vm_file);
 319                down_read(&EXT4_I(inode)->i_mmap_sem);
 320retry:
 321                handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
 322                                               EXT4_DATA_TRANS_BLOCKS(sb));
 323                if (IS_ERR(handle)) {
 324                        up_read(&EXT4_I(inode)->i_mmap_sem);
 325                        sb_end_pagefault(sb);
 326                        return VM_FAULT_SIGBUS;
 327                }
 328        } else {
 329                down_read(&EXT4_I(inode)->i_mmap_sem);
 330        }
 331        result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
 332        if (write) {
 333                ext4_journal_stop(handle);
 334
 335                if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
 336                    ext4_should_retry_alloc(sb, &retries))
 337                        goto retry;
 338                /* Handling synchronous page fault? */
 339                if (result & VM_FAULT_NEEDDSYNC)
 340                        result = dax_finish_sync_fault(vmf, pe_size, pfn);
 341                up_read(&EXT4_I(inode)->i_mmap_sem);
 342                sb_end_pagefault(sb);
 343        } else {
 344                up_read(&EXT4_I(inode)->i_mmap_sem);
 345        }
 346
 347        return result;
 348}
 349
 350static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
 351{
 352        return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
 353}
 354
 355static const struct vm_operations_struct ext4_dax_vm_ops = {
 356        .fault          = ext4_dax_fault,
 357        .huge_fault     = ext4_dax_huge_fault,
 358        .page_mkwrite   = ext4_dax_fault,
 359        .pfn_mkwrite    = ext4_dax_fault,
 360};
 361#else
 362#define ext4_dax_vm_ops ext4_file_vm_ops
 363#endif
 364
 365static const struct vm_operations_struct ext4_file_vm_ops = {
 366        .fault          = ext4_filemap_fault,
 367        .map_pages      = filemap_map_pages,
 368        .page_mkwrite   = ext4_page_mkwrite,
 369};
 370
 371static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
 372{
 373        struct inode *inode = file->f_mapping->host;
 374        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 375        struct dax_device *dax_dev = sbi->s_daxdev;
 376
 377        if (unlikely(ext4_forced_shutdown(sbi)))
 378                return -EIO;
 379
 380        /*
 381         * We don't support synchronous mappings for non-DAX files and
 382         * for DAX files if underneath dax_device is not synchronous.
 383         */
 384        if (!daxdev_mapping_supported(vma, dax_dev))
 385                return -EOPNOTSUPP;
 386
 387        file_accessed(file);
 388        if (IS_DAX(file_inode(file))) {
 389                vma->vm_ops = &ext4_dax_vm_ops;
 390                vma->vm_flags |= VM_HUGEPAGE;
 391        } else {
 392                vma->vm_ops = &ext4_file_vm_ops;
 393        }
 394        return 0;
 395}
 396
 397static int ext4_sample_last_mounted(struct super_block *sb,
 398                                    struct vfsmount *mnt)
 399{
 400        struct ext4_sb_info *sbi = EXT4_SB(sb);
 401        struct path path;
 402        char buf[64], *cp;
 403        handle_t *handle;
 404        int err;
 405
 406        if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
 407                return 0;
 408
 409        if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
 410                return 0;
 411
 412        sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
 413        /*
 414         * Sample where the filesystem has been mounted and
 415         * store it in the superblock for sysadmin convenience
 416         * when trying to sort through large numbers of block
 417         * devices or filesystem images.
 418         */
 419        memset(buf, 0, sizeof(buf));
 420        path.mnt = mnt;
 421        path.dentry = mnt->mnt_root;
 422        cp = d_path(&path, buf, sizeof(buf));
 423        err = 0;
 424        if (IS_ERR(cp))
 425                goto out;
 426
 427        handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
 428        err = PTR_ERR(handle);
 429        if (IS_ERR(handle))
 430                goto out;
 431        BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 432        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 433        if (err)
 434                goto out_journal;
 435        strlcpy(sbi->s_es->s_last_mounted, cp,
 436                sizeof(sbi->s_es->s_last_mounted));
 437        ext4_handle_dirty_super(handle, sb);
 438out_journal:
 439        ext4_journal_stop(handle);
 440out:
 441        sb_end_intwrite(sb);
 442        return err;
 443}
 444
 445static int ext4_file_open(struct inode * inode, struct file * filp)
 446{
 447        int ret;
 448
 449        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 450                return -EIO;
 451
 452        ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
 453        if (ret)
 454                return ret;
 455
 456        ret = fscrypt_file_open(inode, filp);
 457        if (ret)
 458                return ret;
 459
 460        /*
 461         * Set up the jbd2_inode if we are opening the inode for
 462         * writing and the journal is present
 463         */
 464        if (filp->f_mode & FMODE_WRITE) {
 465                ret = ext4_inode_attach_jinode(inode);
 466                if (ret < 0)
 467                        return ret;
 468        }
 469
 470        filp->f_mode |= FMODE_NOWAIT;
 471        return dquot_file_open(inode, filp);
 472}
 473
 474/*
 475 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 476 * by calling generic_file_llseek_size() with the appropriate maxbytes
 477 * value for each.
 478 */
 479loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
 480{
 481        struct inode *inode = file->f_mapping->host;
 482        loff_t maxbytes;
 483
 484        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 485                maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
 486        else
 487                maxbytes = inode->i_sb->s_maxbytes;
 488
 489        switch (whence) {
 490        default:
 491                return generic_file_llseek_size(file, offset, whence,
 492                                                maxbytes, i_size_read(inode));
 493        case SEEK_HOLE:
 494                inode_lock_shared(inode);
 495                offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
 496                inode_unlock_shared(inode);
 497                break;
 498        case SEEK_DATA:
 499                inode_lock_shared(inode);
 500                offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
 501                inode_unlock_shared(inode);
 502                break;
 503        }
 504
 505        if (offset < 0)
 506                return offset;
 507        return vfs_setpos(file, offset, maxbytes);
 508}
 509
 510const struct file_operations ext4_file_operations = {
 511        .llseek         = ext4_llseek,
 512        .read_iter      = ext4_file_read_iter,
 513        .write_iter     = ext4_file_write_iter,
 514        .unlocked_ioctl = ext4_ioctl,
 515#ifdef CONFIG_COMPAT
 516        .compat_ioctl   = ext4_compat_ioctl,
 517#endif
 518        .mmap           = ext4_file_mmap,
 519        .mmap_supported_flags = MAP_SYNC,
 520        .open           = ext4_file_open,
 521        .release        = ext4_release_file,
 522        .fsync          = ext4_sync_file,
 523        .get_unmapped_area = thp_get_unmapped_area,
 524        .splice_read    = generic_file_splice_read,
 525        .splice_write   = iter_file_splice_write,
 526        .fallocate      = ext4_fallocate,
 527};
 528
 529const struct inode_operations ext4_file_inode_operations = {
 530        .setattr        = ext4_setattr,
 531        .getattr        = ext4_file_getattr,
 532        .listxattr      = ext4_listxattr,
 533        .get_acl        = ext4_get_acl,
 534        .set_acl        = ext4_set_acl,
 535        .fiemap         = ext4_fiemap,
 536};
 537
 538