linux/fs/sync.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * High-level sync()-related operations
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/file.h>
   8#include <linux/fs.h>
   9#include <linux/slab.h>
  10#include <linux/export.h>
  11#include <linux/namei.h>
  12#include <linux/sched.h>
  13#include <linux/writeback.h>
  14#include <linux/syscalls.h>
  15#include <linux/linkage.h>
  16#include <linux/pagemap.h>
  17#include <linux/quotaops.h>
  18#include <linux/backing-dev.h>
  19#include "internal.h"
  20
  21#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
  22                        SYNC_FILE_RANGE_WAIT_AFTER)
  23
  24/*
  25 * Do the filesystem syncing work. For simple filesystems
  26 * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
  27 * submit IO for these buffers via __sync_blockdev(). This also speeds up the
  28 * wait == 1 case since in that case write_inode() functions do
  29 * sync_dirty_buffer() and thus effectively write one block at a time.
  30 */
  31static int __sync_filesystem(struct super_block *sb, int wait)
  32{
  33        if (wait)
  34                sync_inodes_sb(sb);
  35        else
  36                writeback_inodes_sb(sb, WB_REASON_SYNC);
  37
  38        if (sb->s_op->sync_fs)
  39                sb->s_op->sync_fs(sb, wait);
  40        return __sync_blockdev(sb->s_bdev, wait);
  41}
  42
  43/*
  44 * Write out and wait upon all dirty data associated with this
  45 * superblock.  Filesystem data as well as the underlying block
  46 * device.  Takes the superblock lock.
  47 */
  48int sync_filesystem(struct super_block *sb)
  49{
  50        int ret;
  51
  52        /*
  53         * We need to be protected against the filesystem going from
  54         * r/o to r/w or vice versa.
  55         */
  56        WARN_ON(!rwsem_is_locked(&sb->s_umount));
  57
  58        /*
  59         * No point in syncing out anything if the filesystem is read-only.
  60         */
  61        if (sb_rdonly(sb))
  62                return 0;
  63
  64        ret = __sync_filesystem(sb, 0);
  65        if (ret < 0)
  66                return ret;
  67        return __sync_filesystem(sb, 1);
  68}
  69EXPORT_SYMBOL(sync_filesystem);
  70
  71static void sync_inodes_one_sb(struct super_block *sb, void *arg)
  72{
  73        if (!sb_rdonly(sb))
  74                sync_inodes_sb(sb);
  75}
  76
  77static void sync_fs_one_sb(struct super_block *sb, void *arg)
  78{
  79        if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) &&
  80            sb->s_op->sync_fs)
  81                sb->s_op->sync_fs(sb, *(int *)arg);
  82}
  83
  84static void fdatawrite_one_bdev(struct block_device *bdev, void *arg)
  85{
  86        filemap_fdatawrite(bdev->bd_inode->i_mapping);
  87}
  88
  89static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
  90{
  91        /*
  92         * We keep the error status of individual mapping so that
  93         * applications can catch the writeback error using fsync(2).
  94         * See filemap_fdatawait_keep_errors() for details.
  95         */
  96        filemap_fdatawait_keep_errors(bdev->bd_inode->i_mapping);
  97}
  98
  99/*
 100 * Sync everything. We start by waking flusher threads so that most of
 101 * writeback runs on all devices in parallel. Then we sync all inodes reliably
 102 * which effectively also waits for all flusher threads to finish doing
 103 * writeback. At this point all data is on disk so metadata should be stable
 104 * and we tell filesystems to sync their metadata via ->sync_fs() calls.
 105 * Finally, we writeout all block devices because some filesystems (e.g. ext2)
 106 * just write metadata (such as inodes or bitmaps) to block device page cache
 107 * and do not sync it on their own in ->sync_fs().
 108 */
 109void ksys_sync(void)
 110{
 111        int nowait = 0, wait = 1;
 112
 113        wakeup_flusher_threads(WB_REASON_SYNC);
 114        iterate_supers(sync_inodes_one_sb, NULL);
 115        iterate_supers(sync_fs_one_sb, &nowait);
 116        iterate_supers(sync_fs_one_sb, &wait);
 117        iterate_bdevs(fdatawrite_one_bdev, NULL);
 118        iterate_bdevs(fdatawait_one_bdev, NULL);
 119        if (unlikely(laptop_mode))
 120                laptop_sync_completion();
 121}
 122
 123SYSCALL_DEFINE0(sync)
 124{
 125        ksys_sync();
 126        return 0;
 127}
 128
 129static void do_sync_work(struct work_struct *work)
 130{
 131        int nowait = 0;
 132
 133        /*
 134         * Sync twice to reduce the possibility we skipped some inodes / pages
 135         * because they were temporarily locked
 136         */
 137        iterate_supers(sync_inodes_one_sb, &nowait);
 138        iterate_supers(sync_fs_one_sb, &nowait);
 139        iterate_bdevs(fdatawrite_one_bdev, NULL);
 140        iterate_supers(sync_inodes_one_sb, &nowait);
 141        iterate_supers(sync_fs_one_sb, &nowait);
 142        iterate_bdevs(fdatawrite_one_bdev, NULL);
 143        printk("Emergency Sync complete\n");
 144        kfree(work);
 145}
 146
 147void emergency_sync(void)
 148{
 149        struct work_struct *work;
 150
 151        work = kmalloc(sizeof(*work), GFP_ATOMIC);
 152        if (work) {
 153                INIT_WORK(work, do_sync_work);
 154                schedule_work(work);
 155        }
 156}
 157
 158/*
 159 * sync a single super
 160 */
 161SYSCALL_DEFINE1(syncfs, int, fd)
 162{
 163        struct fd f = fdget(fd);
 164        struct super_block *sb;
 165        int ret;
 166
 167        if (!f.file)
 168                return -EBADF;
 169        sb = f.file->f_path.dentry->d_sb;
 170
 171        down_read(&sb->s_umount);
 172        ret = sync_filesystem(sb);
 173        up_read(&sb->s_umount);
 174
 175        fdput(f);
 176        return ret;
 177}
 178
 179/**
 180 * vfs_fsync_range - helper to sync a range of data & metadata to disk
 181 * @file:               file to sync
 182 * @start:              offset in bytes of the beginning of data range to sync
 183 * @end:                offset in bytes of the end of data range (inclusive)
 184 * @datasync:           perform only datasync
 185 *
 186 * Write back data in range @start..@end and metadata for @file to disk.  If
 187 * @datasync is set only metadata needed to access modified file data is
 188 * written.
 189 */
 190int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
 191{
 192        struct inode *inode = file->f_mapping->host;
 193
 194        if (!file->f_op->fsync)
 195                return -EINVAL;
 196        if (!datasync && (inode->i_state & I_DIRTY_TIME))
 197                mark_inode_dirty_sync(inode);
 198        return file->f_op->fsync(file, start, end, datasync);
 199}
 200EXPORT_SYMBOL(vfs_fsync_range);
 201
 202/**
 203 * vfs_fsync - perform a fsync or fdatasync on a file
 204 * @file:               file to sync
 205 * @datasync:           only perform a fdatasync operation
 206 *
 207 * Write back data and metadata for @file to disk.  If @datasync is
 208 * set only metadata needed to access modified file data is written.
 209 */
 210int vfs_fsync(struct file *file, int datasync)
 211{
 212        return vfs_fsync_range(file, 0, LLONG_MAX, datasync);
 213}
 214EXPORT_SYMBOL(vfs_fsync);
 215
 216static int do_fsync(unsigned int fd, int datasync)
 217{
 218        struct fd f = fdget(fd);
 219        int ret = -EBADF;
 220
 221        if (f.file) {
 222                ret = vfs_fsync(f.file, datasync);
 223                fdput(f);
 224        }
 225        return ret;
 226}
 227
 228SYSCALL_DEFINE1(fsync, unsigned int, fd)
 229{
 230        return do_fsync(fd, 0);
 231}
 232
 233SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
 234{
 235        return do_fsync(fd, 1);
 236}
 237
 238int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
 239                    unsigned int flags)
 240{
 241        int ret;
 242        struct address_space *mapping;
 243        loff_t endbyte;                 /* inclusive */
 244        umode_t i_mode;
 245
 246        ret = -EINVAL;
 247        if (flags & ~VALID_FLAGS)
 248                goto out;
 249
 250        endbyte = offset + nbytes;
 251
 252        if ((s64)offset < 0)
 253                goto out;
 254        if ((s64)endbyte < 0)
 255                goto out;
 256        if (endbyte < offset)
 257                goto out;
 258
 259        if (sizeof(pgoff_t) == 4) {
 260                if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
 261                        /*
 262                         * The range starts outside a 32 bit machine's
 263                         * pagecache addressing capabilities.  Let it "succeed"
 264                         */
 265                        ret = 0;
 266                        goto out;
 267                }
 268                if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
 269                        /*
 270                         * Out to EOF
 271                         */
 272                        nbytes = 0;
 273                }
 274        }
 275
 276        if (nbytes == 0)
 277                endbyte = LLONG_MAX;
 278        else
 279                endbyte--;              /* inclusive */
 280
 281        i_mode = file_inode(file)->i_mode;
 282        ret = -ESPIPE;
 283        if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
 284                        !S_ISLNK(i_mode))
 285                goto out;
 286
 287        mapping = file->f_mapping;
 288        ret = 0;
 289        if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
 290                ret = file_fdatawait_range(file, offset, endbyte);
 291                if (ret < 0)
 292                        goto out;
 293        }
 294
 295        if (flags & SYNC_FILE_RANGE_WRITE) {
 296                ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
 297                                                 WB_SYNC_NONE);
 298                if (ret < 0)
 299                        goto out;
 300        }
 301
 302        if (flags & SYNC_FILE_RANGE_WAIT_AFTER)
 303                ret = file_fdatawait_range(file, offset, endbyte);
 304
 305out:
 306        return ret;
 307}
 308
 309/*
 310 * sys_sync_file_range() permits finely controlled syncing over a segment of
 311 * a file in the range offset .. (offset+nbytes-1) inclusive.  If nbytes is
 312 * zero then sys_sync_file_range() will operate from offset out to EOF.
 313 *
 314 * The flag bits are:
 315 *
 316 * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
 317 * before performing the write.
 318 *
 319 * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
 320 * range which are not presently under writeback. Note that this may block for
 321 * significant periods due to exhaustion of disk request structures.
 322 *
 323 * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
 324 * after performing the write.
 325 *
 326 * Useful combinations of the flag bits are:
 327 *
 328 * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
 329 * in the range which were dirty on entry to sys_sync_file_range() are placed
 330 * under writeout.  This is a start-write-for-data-integrity operation.
 331 *
 332 * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
 333 * are not presently under writeout.  This is an asynchronous flush-to-disk
 334 * operation.  Not suitable for data integrity operations.
 335 *
 336 * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
 337 * completion of writeout of all pages in the range.  This will be used after an
 338 * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
 339 * for that operation to complete and to return the result.
 340 *
 341 * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER:
 342 * a traditional sync() operation.  This is a write-for-data-integrity operation
 343 * which will ensure that all pages in the range which were dirty on entry to
 344 * sys_sync_file_range() are committed to disk.
 345 *
 346 *
 347 * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
 348 * I/O errors or ENOSPC conditions and will return those to the caller, after
 349 * clearing the EIO and ENOSPC flags in the address_space.
 350 *
 351 * It should be noted that none of these operations write out the file's
 352 * metadata.  So unless the application is strictly performing overwrites of
 353 * already-instantiated disk blocks, there are no guarantees here that the data
 354 * will be available after a crash.
 355 */
 356int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
 357                         unsigned int flags)
 358{
 359        int ret;
 360        struct fd f;
 361
 362        ret = -EBADF;
 363        f = fdget(fd);
 364        if (f.file)
 365                ret = sync_file_range(f.file, offset, nbytes, flags);
 366
 367        fdput(f);
 368        return ret;
 369}
 370
 371SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
 372                                unsigned int, flags)
 373{
 374        return ksys_sync_file_range(fd, offset, nbytes, flags);
 375}
 376
 377/* It would be nice if people remember that not all the world's an i386
 378   when they introduce new system calls */
 379SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags,
 380                                 loff_t, offset, loff_t, nbytes)
 381{
 382        return ksys_sync_file_range(fd, offset, nbytes, flags);
 383}
 384