linux/fs/f2fs/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/file.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/stat.h>
  11#include <linux/buffer_head.h>
  12#include <linux/writeback.h>
  13#include <linux/blkdev.h>
  14#include <linux/falloc.h>
  15#include <linux/types.h>
  16#include <linux/compat.h>
  17#include <linux/uaccess.h>
  18#include <linux/mount.h>
  19#include <linux/pagevec.h>
  20#include <linux/uio.h>
  21#include <linux/uuid.h>
  22#include <linux/file.h>
  23#include <linux/nls.h>
  24#include <linux/sched/signal.h>
  25#include <linux/fileattr.h>
  26#include <linux/fadvise.h>
  27
  28#include "f2fs.h"
  29#include "node.h"
  30#include "segment.h"
  31#include "xattr.h"
  32#include "acl.h"
  33#include "gc.h"
  34#include "iostat.h"
  35#include <trace/events/f2fs.h>
  36#include <uapi/linux/f2fs.h>
  37
  38static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
  39{
  40        struct inode *inode = file_inode(vmf->vma->vm_file);
  41        vm_fault_t ret;
  42
  43        ret = filemap_fault(vmf);
  44        if (!ret)
  45                f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
  46                                                        F2FS_BLKSIZE);
  47
  48        trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
  49
  50        return ret;
  51}
  52
  53static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
  54{
  55        struct page *page = vmf->page;
  56        struct inode *inode = file_inode(vmf->vma->vm_file);
  57        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  58        struct dnode_of_data dn;
  59        bool need_alloc = true;
  60        int err = 0;
  61
  62        if (unlikely(IS_IMMUTABLE(inode)))
  63                return VM_FAULT_SIGBUS;
  64
  65        if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
  66                return VM_FAULT_SIGBUS;
  67
  68        if (unlikely(f2fs_cp_error(sbi))) {
  69                err = -EIO;
  70                goto err;
  71        }
  72
  73        if (!f2fs_is_checkpoint_ready(sbi)) {
  74                err = -ENOSPC;
  75                goto err;
  76        }
  77
  78        err = f2fs_convert_inline_inode(inode);
  79        if (err)
  80                goto err;
  81
  82#ifdef CONFIG_F2FS_FS_COMPRESSION
  83        if (f2fs_compressed_file(inode)) {
  84                int ret = f2fs_is_compressed_cluster(inode, page->index);
  85
  86                if (ret < 0) {
  87                        err = ret;
  88                        goto err;
  89                } else if (ret) {
  90                        need_alloc = false;
  91                }
  92        }
  93#endif
  94        /* should do out of any locked page */
  95        if (need_alloc)
  96                f2fs_balance_fs(sbi, true);
  97
  98        sb_start_pagefault(inode->i_sb);
  99
 100        f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
 101
 102        file_update_time(vmf->vma->vm_file);
 103        filemap_invalidate_lock_shared(inode->i_mapping);
 104        lock_page(page);
 105        if (unlikely(page->mapping != inode->i_mapping ||
 106                        page_offset(page) > i_size_read(inode) ||
 107                        !PageUptodate(page))) {
 108                unlock_page(page);
 109                err = -EFAULT;
 110                goto out_sem;
 111        }
 112
 113        if (need_alloc) {
 114                /* block allocation */
 115                f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
 116                set_new_dnode(&dn, inode, NULL, NULL, 0);
 117                err = f2fs_get_block(&dn, page->index);
 118                f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
 119        }
 120
 121#ifdef CONFIG_F2FS_FS_COMPRESSION
 122        if (!need_alloc) {
 123                set_new_dnode(&dn, inode, NULL, NULL, 0);
 124                err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
 125                f2fs_put_dnode(&dn);
 126        }
 127#endif
 128        if (err) {
 129                unlock_page(page);
 130                goto out_sem;
 131        }
 132
 133        f2fs_wait_on_page_writeback(page, DATA, false, true);
 134
 135        /* wait for GCed page writeback via META_MAPPING */
 136        f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
 137
 138        /*
 139         * check to see if the page is mapped already (no holes)
 140         */
 141        if (PageMappedToDisk(page))
 142                goto out_sem;
 143
 144        /* page is wholly or partially inside EOF */
 145        if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
 146                                                i_size_read(inode)) {
 147                loff_t offset;
 148
 149                offset = i_size_read(inode) & ~PAGE_MASK;
 150                zero_user_segment(page, offset, PAGE_SIZE);
 151        }
 152        set_page_dirty(page);
 153        if (!PageUptodate(page))
 154                SetPageUptodate(page);
 155
 156        f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
 157        f2fs_update_time(sbi, REQ_TIME);
 158
 159        trace_f2fs_vm_page_mkwrite(page, DATA);
 160out_sem:
 161        filemap_invalidate_unlock_shared(inode->i_mapping);
 162
 163        sb_end_pagefault(inode->i_sb);
 164err:
 165        return block_page_mkwrite_return(err);
 166}
 167
 168static const struct vm_operations_struct f2fs_file_vm_ops = {
 169        .fault          = f2fs_filemap_fault,
 170        .map_pages      = filemap_map_pages,
 171        .page_mkwrite   = f2fs_vm_page_mkwrite,
 172};
 173
 174static int get_parent_ino(struct inode *inode, nid_t *pino)
 175{
 176        struct dentry *dentry;
 177
 178        /*
 179         * Make sure to get the non-deleted alias.  The alias associated with
 180         * the open file descriptor being fsync()'ed may be deleted already.
 181         */
 182        dentry = d_find_alias(inode);
 183        if (!dentry)
 184                return 0;
 185
 186        *pino = parent_ino(dentry);
 187        dput(dentry);
 188        return 1;
 189}
 190
 191static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
 192{
 193        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 194        enum cp_reason_type cp_reason = CP_NO_NEEDED;
 195
 196        if (!S_ISREG(inode->i_mode))
 197                cp_reason = CP_NON_REGULAR;
 198        else if (f2fs_compressed_file(inode))
 199                cp_reason = CP_COMPRESSED;
 200        else if (inode->i_nlink != 1)
 201                cp_reason = CP_HARDLINK;
 202        else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
 203                cp_reason = CP_SB_NEED_CP;
 204        else if (file_wrong_pino(inode))
 205                cp_reason = CP_WRONG_PINO;
 206        else if (!f2fs_space_for_roll_forward(sbi))
 207                cp_reason = CP_NO_SPC_ROLL;
 208        else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
 209                cp_reason = CP_NODE_NEED_CP;
 210        else if (test_opt(sbi, FASTBOOT))
 211                cp_reason = CP_FASTBOOT_MODE;
 212        else if (F2FS_OPTION(sbi).active_logs == 2)
 213                cp_reason = CP_SPEC_LOG_NUM;
 214        else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
 215                f2fs_need_dentry_mark(sbi, inode->i_ino) &&
 216                f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
 217                                                        TRANS_DIR_INO))
 218                cp_reason = CP_RECOVER_DIR;
 219
 220        return cp_reason;
 221}
 222
 223static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
 224{
 225        struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
 226        bool ret = false;
 227        /* But we need to avoid that there are some inode updates */
 228        if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
 229                ret = true;
 230        f2fs_put_page(i, 0);
 231        return ret;
 232}
 233
 234static void try_to_fix_pino(struct inode *inode)
 235{
 236        struct f2fs_inode_info *fi = F2FS_I(inode);
 237        nid_t pino;
 238
 239        down_write(&fi->i_sem);
 240        if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
 241                        get_parent_ino(inode, &pino)) {
 242                f2fs_i_pino_write(inode, pino);
 243                file_got_pino(inode);
 244        }
 245        up_write(&fi->i_sem);
 246}
 247
 248static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
 249                                                int datasync, bool atomic)
 250{
 251        struct inode *inode = file->f_mapping->host;
 252        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 253        nid_t ino = inode->i_ino;
 254        int ret = 0;
 255        enum cp_reason_type cp_reason = 0;
 256        struct writeback_control wbc = {
 257                .sync_mode = WB_SYNC_ALL,
 258                .nr_to_write = LONG_MAX,
 259                .for_reclaim = 0,
 260        };
 261        unsigned int seq_id = 0;
 262
 263        if (unlikely(f2fs_readonly(inode->i_sb)))
 264                return 0;
 265
 266        trace_f2fs_sync_file_enter(inode);
 267
 268        if (S_ISDIR(inode->i_mode))
 269                goto go_write;
 270
 271        /* if fdatasync is triggered, let's do in-place-update */
 272        if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
 273                set_inode_flag(inode, FI_NEED_IPU);
 274        ret = file_write_and_wait_range(file, start, end);
 275        clear_inode_flag(inode, FI_NEED_IPU);
 276
 277        if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
 278                trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
 279                return ret;
 280        }
 281
 282        /* if the inode is dirty, let's recover all the time */
 283        if (!f2fs_skip_inode_update(inode, datasync)) {
 284                f2fs_write_inode(inode, NULL);
 285                goto go_write;
 286        }
 287
 288        /*
 289         * if there is no written data, don't waste time to write recovery info.
 290         */
 291        if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
 292                        !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
 293
 294                /* it may call write_inode just prior to fsync */
 295                if (need_inode_page_update(sbi, ino))
 296                        goto go_write;
 297
 298                if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
 299                                f2fs_exist_written_data(sbi, ino, UPDATE_INO))
 300                        goto flush_out;
 301                goto out;
 302        } else {
 303                /*
 304                 * for OPU case, during fsync(), node can be persisted before
 305                 * data when lower device doesn't support write barrier, result
 306                 * in data corruption after SPO.
 307                 * So for strict fsync mode, force to use atomic write sematics
 308                 * to keep write order in between data/node and last node to
 309                 * avoid potential data corruption.
 310                 */
 311                if (F2FS_OPTION(sbi).fsync_mode ==
 312                                FSYNC_MODE_STRICT && !atomic)
 313                        atomic = true;
 314        }
 315go_write:
 316        /*
 317         * Both of fdatasync() and fsync() are able to be recovered from
 318         * sudden-power-off.
 319         */
 320        down_read(&F2FS_I(inode)->i_sem);
 321        cp_reason = need_do_checkpoint(inode);
 322        up_read(&F2FS_I(inode)->i_sem);
 323
 324        if (cp_reason) {
 325                /* all the dirty node pages should be flushed for POR */
 326                ret = f2fs_sync_fs(inode->i_sb, 1);
 327
 328                /*
 329                 * We've secured consistency through sync_fs. Following pino
 330                 * will be used only for fsynced inodes after checkpoint.
 331                 */
 332                try_to_fix_pino(inode);
 333                clear_inode_flag(inode, FI_APPEND_WRITE);
 334                clear_inode_flag(inode, FI_UPDATE_WRITE);
 335                goto out;
 336        }
 337sync_nodes:
 338        atomic_inc(&sbi->wb_sync_req[NODE]);
 339        ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
 340        atomic_dec(&sbi->wb_sync_req[NODE]);
 341        if (ret)
 342                goto out;
 343
 344        /* if cp_error was enabled, we should avoid infinite loop */
 345        if (unlikely(f2fs_cp_error(sbi))) {
 346                ret = -EIO;
 347                goto out;
 348        }
 349
 350        if (f2fs_need_inode_block_update(sbi, ino)) {
 351                f2fs_mark_inode_dirty_sync(inode, true);
 352                f2fs_write_inode(inode, NULL);
 353                goto sync_nodes;
 354        }
 355
 356        /*
 357         * If it's atomic_write, it's just fine to keep write ordering. So
 358         * here we don't need to wait for node write completion, since we use
 359         * node chain which serializes node blocks. If one of node writes are
 360         * reordered, we can see simply broken chain, resulting in stopping
 361         * roll-forward recovery. It means we'll recover all or none node blocks
 362         * given fsync mark.
 363         */
 364        if (!atomic) {
 365                ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
 366                if (ret)
 367                        goto out;
 368        }
 369
 370        /* once recovery info is written, don't need to tack this */
 371        f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
 372        clear_inode_flag(inode, FI_APPEND_WRITE);
 373flush_out:
 374        if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
 375                ret = f2fs_issue_flush(sbi, inode->i_ino);
 376        if (!ret) {
 377                f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
 378                clear_inode_flag(inode, FI_UPDATE_WRITE);
 379                f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
 380        }
 381        f2fs_update_time(sbi, REQ_TIME);
 382out:
 383        trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
 384        return ret;
 385}
 386
 387int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 388{
 389        if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
 390                return -EIO;
 391        return f2fs_do_sync_file(file, start, end, datasync, false);
 392}
 393
 394static bool __found_offset(struct address_space *mapping, block_t blkaddr,
 395                                pgoff_t index, int whence)
 396{
 397        switch (whence) {
 398        case SEEK_DATA:
 399                if (__is_valid_data_blkaddr(blkaddr))
 400                        return true;
 401                if (blkaddr == NEW_ADDR &&
 402                    xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
 403                        return true;
 404                break;
 405        case SEEK_HOLE:
 406                if (blkaddr == NULL_ADDR)
 407                        return true;
 408                break;
 409        }
 410        return false;
 411}
 412
 413static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
 414{
 415        struct inode *inode = file->f_mapping->host;
 416        loff_t maxbytes = inode->i_sb->s_maxbytes;
 417        struct dnode_of_data dn;
 418        pgoff_t pgofs, end_offset;
 419        loff_t data_ofs = offset;
 420        loff_t isize;
 421        int err = 0;
 422
 423        inode_lock(inode);
 424
 425        isize = i_size_read(inode);
 426        if (offset >= isize)
 427                goto fail;
 428
 429        /* handle inline data case */
 430        if (f2fs_has_inline_data(inode)) {
 431                if (whence == SEEK_HOLE) {
 432                        data_ofs = isize;
 433                        goto found;
 434                } else if (whence == SEEK_DATA) {
 435                        data_ofs = offset;
 436                        goto found;
 437                }
 438        }
 439
 440        pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
 441
 442        for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 443                set_new_dnode(&dn, inode, NULL, NULL, 0);
 444                err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
 445                if (err && err != -ENOENT) {
 446                        goto fail;
 447                } else if (err == -ENOENT) {
 448                        /* direct node does not exists */
 449                        if (whence == SEEK_DATA) {
 450                                pgofs = f2fs_get_next_page_offset(&dn, pgofs);
 451                                continue;
 452                        } else {
 453                                goto found;
 454                        }
 455                }
 456
 457                end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
 458
 459                /* find data/hole in dnode block */
 460                for (; dn.ofs_in_node < end_offset;
 461                                dn.ofs_in_node++, pgofs++,
 462                                data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 463                        block_t blkaddr;
 464
 465                        blkaddr = f2fs_data_blkaddr(&dn);
 466
 467                        if (__is_valid_data_blkaddr(blkaddr) &&
 468                                !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
 469                                        blkaddr, DATA_GENERIC_ENHANCE)) {
 470                                f2fs_put_dnode(&dn);
 471                                goto fail;
 472                        }
 473
 474                        if (__found_offset(file->f_mapping, blkaddr,
 475                                                        pgofs, whence)) {
 476                                f2fs_put_dnode(&dn);
 477                                goto found;
 478                        }
 479                }
 480                f2fs_put_dnode(&dn);
 481        }
 482
 483        if (whence == SEEK_DATA)
 484                goto fail;
 485found:
 486        if (whence == SEEK_HOLE && data_ofs > isize)
 487                data_ofs = isize;
 488        inode_unlock(inode);
 489        return vfs_setpos(file, data_ofs, maxbytes);
 490fail:
 491        inode_unlock(inode);
 492        return -ENXIO;
 493}
 494
 495static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
 496{
 497        struct inode *inode = file->f_mapping->host;
 498        loff_t maxbytes = inode->i_sb->s_maxbytes;
 499
 500        if (f2fs_compressed_file(inode))
 501                maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
 502
 503        switch (whence) {
 504        case SEEK_SET:
 505        case SEEK_CUR:
 506        case SEEK_END:
 507                return generic_file_llseek_size(file, offset, whence,
 508                                                maxbytes, i_size_read(inode));
 509        case SEEK_DATA:
 510        case SEEK_HOLE:
 511                if (offset < 0)
 512                        return -ENXIO;
 513                return f2fs_seek_block(file, offset, whence);
 514        }
 515
 516        return -EINVAL;
 517}
 518
 519static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
 520{
 521        struct inode *inode = file_inode(file);
 522
 523        if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 524                return -EIO;
 525
 526        if (!f2fs_is_compress_backend_ready(inode))
 527                return -EOPNOTSUPP;
 528
 529        file_accessed(file);
 530        vma->vm_ops = &f2fs_file_vm_ops;
 531        set_inode_flag(inode, FI_MMAP_FILE);
 532        return 0;
 533}
 534
 535static int f2fs_file_open(struct inode *inode, struct file *filp)
 536{
 537        int err = fscrypt_file_open(inode, filp);
 538
 539        if (err)
 540                return err;
 541
 542        if (!f2fs_is_compress_backend_ready(inode))
 543                return -EOPNOTSUPP;
 544
 545        err = fsverity_file_open(inode, filp);
 546        if (err)
 547                return err;
 548
 549        filp->f_mode |= FMODE_NOWAIT;
 550
 551        return dquot_file_open(inode, filp);
 552}
 553
 554void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 555{
 556        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 557        struct f2fs_node *raw_node;
 558        int nr_free = 0, ofs = dn->ofs_in_node, len = count;
 559        __le32 *addr;
 560        int base = 0;
 561        bool compressed_cluster = false;
 562        int cluster_index = 0, valid_blocks = 0;
 563        int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
 564        bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
 565
 566        if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
 567                base = get_extra_isize(dn->inode);
 568
 569        raw_node = F2FS_NODE(dn->node_page);
 570        addr = blkaddr_in_node(raw_node) + base + ofs;
 571
 572        /* Assumption: truncateion starts with cluster */
 573        for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
 574                block_t blkaddr = le32_to_cpu(*addr);
 575
 576                if (f2fs_compressed_file(dn->inode) &&
 577                                        !(cluster_index & (cluster_size - 1))) {
 578                        if (compressed_cluster)
 579                                f2fs_i_compr_blocks_update(dn->inode,
 580                                                        valid_blocks, false);
 581                        compressed_cluster = (blkaddr == COMPRESS_ADDR);
 582                        valid_blocks = 0;
 583                }
 584
 585                if (blkaddr == NULL_ADDR)
 586                        continue;
 587
 588                dn->data_blkaddr = NULL_ADDR;
 589                f2fs_set_data_blkaddr(dn);
 590
 591                if (__is_valid_data_blkaddr(blkaddr)) {
 592                        if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
 593                                        DATA_GENERIC_ENHANCE))
 594                                continue;
 595                        if (compressed_cluster)
 596                                valid_blocks++;
 597                }
 598
 599                if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
 600                        clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
 601
 602                f2fs_invalidate_blocks(sbi, blkaddr);
 603
 604                if (!released || blkaddr != COMPRESS_ADDR)
 605                        nr_free++;
 606        }
 607
 608        if (compressed_cluster)
 609                f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
 610
 611        if (nr_free) {
 612                pgoff_t fofs;
 613                /*
 614                 * once we invalidate valid blkaddr in range [ofs, ofs + count],
 615                 * we will invalidate all blkaddr in the whole range.
 616                 */
 617                fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
 618                                                        dn->inode) + ofs;
 619                f2fs_update_extent_cache_range(dn, fofs, 0, len);
 620                dec_valid_block_count(sbi, dn->inode, nr_free);
 621        }
 622        dn->ofs_in_node = ofs;
 623
 624        f2fs_update_time(sbi, REQ_TIME);
 625        trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
 626                                         dn->ofs_in_node, nr_free);
 627}
 628
 629void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
 630{
 631        f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
 632}
 633
 634static int truncate_partial_data_page(struct inode *inode, u64 from,
 635                                                                bool cache_only)
 636{
 637        loff_t offset = from & (PAGE_SIZE - 1);
 638        pgoff_t index = from >> PAGE_SHIFT;
 639        struct address_space *mapping = inode->i_mapping;
 640        struct page *page;
 641
 642        if (!offset && !cache_only)
 643                return 0;
 644
 645        if (cache_only) {
 646                page = find_lock_page(mapping, index);
 647                if (page && PageUptodate(page))
 648                        goto truncate_out;
 649                f2fs_put_page(page, 1);
 650                return 0;
 651        }
 652
 653        page = f2fs_get_lock_data_page(inode, index, true);
 654        if (IS_ERR(page))
 655                return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
 656truncate_out:
 657        f2fs_wait_on_page_writeback(page, DATA, true, true);
 658        zero_user(page, offset, PAGE_SIZE - offset);
 659
 660        /* An encrypted inode should have a key and truncate the last page. */
 661        f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
 662        if (!cache_only)
 663                set_page_dirty(page);
 664        f2fs_put_page(page, 1);
 665        return 0;
 666}
 667
 668int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
 669{
 670        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 671        struct dnode_of_data dn;
 672        pgoff_t free_from;
 673        int count = 0, err = 0;
 674        struct page *ipage;
 675        bool truncate_page = false;
 676
 677        trace_f2fs_truncate_blocks_enter(inode, from);
 678
 679        free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
 680
 681        if (free_from >= max_file_blocks(inode))
 682                goto free_partial;
 683
 684        if (lock)
 685                f2fs_lock_op(sbi);
 686
 687        ipage = f2fs_get_node_page(sbi, inode->i_ino);
 688        if (IS_ERR(ipage)) {
 689                err = PTR_ERR(ipage);
 690                goto out;
 691        }
 692
 693        if (f2fs_has_inline_data(inode)) {
 694                f2fs_truncate_inline_inode(inode, ipage, from);
 695                f2fs_put_page(ipage, 1);
 696                truncate_page = true;
 697                goto out;
 698        }
 699
 700        set_new_dnode(&dn, inode, ipage, NULL, 0);
 701        err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
 702        if (err) {
 703                if (err == -ENOENT)
 704                        goto free_next;
 705                goto out;
 706        }
 707
 708        count = ADDRS_PER_PAGE(dn.node_page, inode);
 709
 710        count -= dn.ofs_in_node;
 711        f2fs_bug_on(sbi, count < 0);
 712
 713        if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
 714                f2fs_truncate_data_blocks_range(&dn, count);
 715                free_from += count;
 716        }
 717
 718        f2fs_put_dnode(&dn);
 719free_next:
 720        err = f2fs_truncate_inode_blocks(inode, free_from);
 721out:
 722        if (lock)
 723                f2fs_unlock_op(sbi);
 724free_partial:
 725        /* lastly zero out the first data page */
 726        if (!err)
 727                err = truncate_partial_data_page(inode, from, truncate_page);
 728
 729        trace_f2fs_truncate_blocks_exit(inode, err);
 730        return err;
 731}
 732
 733int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
 734{
 735        u64 free_from = from;
 736        int err;
 737
 738#ifdef CONFIG_F2FS_FS_COMPRESSION
 739        /*
 740         * for compressed file, only support cluster size
 741         * aligned truncation.
 742         */
 743        if (f2fs_compressed_file(inode))
 744                free_from = round_up(from,
 745                                F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
 746#endif
 747
 748        err = f2fs_do_truncate_blocks(inode, free_from, lock);
 749        if (err)
 750                return err;
 751
 752#ifdef CONFIG_F2FS_FS_COMPRESSION
 753        /*
 754         * For compressed file, after release compress blocks, don't allow write
 755         * direct, but we should allow write direct after truncate to zero.
 756         */
 757        if (f2fs_compressed_file(inode) && !free_from
 758                        && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
 759                clear_inode_flag(inode, FI_COMPRESS_RELEASED);
 760
 761        if (from != free_from) {
 762                err = f2fs_truncate_partial_cluster(inode, from, lock);
 763                if (err)
 764                        return err;
 765        }
 766#endif
 767
 768        return 0;
 769}
 770
 771int f2fs_truncate(struct inode *inode)
 772{
 773        int err;
 774
 775        if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 776                return -EIO;
 777
 778        if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 779                                S_ISLNK(inode->i_mode)))
 780                return 0;
 781
 782        trace_f2fs_truncate(inode);
 783
 784        if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
 785                f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
 786                return -EIO;
 787        }
 788
 789        err = dquot_initialize(inode);
 790        if (err)
 791                return err;
 792
 793        /* we should check inline_data size */
 794        if (!f2fs_may_inline_data(inode)) {
 795                err = f2fs_convert_inline_inode(inode);
 796                if (err)
 797                        return err;
 798        }
 799
 800        err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
 801        if (err)
 802                return err;
 803
 804        inode->i_mtime = inode->i_ctime = current_time(inode);
 805        f2fs_mark_inode_dirty_sync(inode, false);
 806        return 0;
 807}
 808
 809int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
 810                 struct kstat *stat, u32 request_mask, unsigned int query_flags)
 811{
 812        struct inode *inode = d_inode(path->dentry);
 813        struct f2fs_inode_info *fi = F2FS_I(inode);
 814        struct f2fs_inode *ri;
 815        unsigned int flags;
 816
 817        if (f2fs_has_extra_attr(inode) &&
 818                        f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
 819                        F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
 820                stat->result_mask |= STATX_BTIME;
 821                stat->btime.tv_sec = fi->i_crtime.tv_sec;
 822                stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
 823        }
 824
 825        flags = fi->i_flags;
 826        if (flags & F2FS_COMPR_FL)
 827                stat->attributes |= STATX_ATTR_COMPRESSED;
 828        if (flags & F2FS_APPEND_FL)
 829                stat->attributes |= STATX_ATTR_APPEND;
 830        if (IS_ENCRYPTED(inode))
 831                stat->attributes |= STATX_ATTR_ENCRYPTED;
 832        if (flags & F2FS_IMMUTABLE_FL)
 833                stat->attributes |= STATX_ATTR_IMMUTABLE;
 834        if (flags & F2FS_NODUMP_FL)
 835                stat->attributes |= STATX_ATTR_NODUMP;
 836        if (IS_VERITY(inode))
 837                stat->attributes |= STATX_ATTR_VERITY;
 838
 839        stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
 840                                  STATX_ATTR_APPEND |
 841                                  STATX_ATTR_ENCRYPTED |
 842                                  STATX_ATTR_IMMUTABLE |
 843                                  STATX_ATTR_NODUMP |
 844                                  STATX_ATTR_VERITY);
 845
 846        generic_fillattr(&init_user_ns, inode, stat);
 847
 848        /* we need to show initial sectors used for inline_data/dentries */
 849        if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
 850                                        f2fs_has_inline_dentry(inode))
 851                stat->blocks += (stat->size + 511) >> 9;
 852
 853        return 0;
 854}
 855
 856#ifdef CONFIG_F2FS_FS_POSIX_ACL
 857static void __setattr_copy(struct user_namespace *mnt_userns,
 858                           struct inode *inode, const struct iattr *attr)
 859{
 860        unsigned int ia_valid = attr->ia_valid;
 861
 862        if (ia_valid & ATTR_UID)
 863                inode->i_uid = attr->ia_uid;
 864        if (ia_valid & ATTR_GID)
 865                inode->i_gid = attr->ia_gid;
 866        if (ia_valid & ATTR_ATIME)
 867                inode->i_atime = attr->ia_atime;
 868        if (ia_valid & ATTR_MTIME)
 869                inode->i_mtime = attr->ia_mtime;
 870        if (ia_valid & ATTR_CTIME)
 871                inode->i_ctime = attr->ia_ctime;
 872        if (ia_valid & ATTR_MODE) {
 873                umode_t mode = attr->ia_mode;
 874                kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
 875
 876                if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
 877                        mode &= ~S_ISGID;
 878                set_acl_inode(inode, mode);
 879        }
 880}
 881#else
 882#define __setattr_copy setattr_copy
 883#endif
 884
 885int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
 886                 struct iattr *attr)
 887{
 888        struct inode *inode = d_inode(dentry);
 889        int err;
 890
 891        if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 892                return -EIO;
 893
 894        if (unlikely(IS_IMMUTABLE(inode)))
 895                return -EPERM;
 896
 897        if (unlikely(IS_APPEND(inode) &&
 898                        (attr->ia_valid & (ATTR_MODE | ATTR_UID |
 899                                  ATTR_GID | ATTR_TIMES_SET))))
 900                return -EPERM;
 901
 902        if ((attr->ia_valid & ATTR_SIZE) &&
 903                !f2fs_is_compress_backend_ready(inode))
 904                return -EOPNOTSUPP;
 905
 906        err = setattr_prepare(&init_user_ns, dentry, attr);
 907        if (err)
 908                return err;
 909
 910        err = fscrypt_prepare_setattr(dentry, attr);
 911        if (err)
 912                return err;
 913
 914        err = fsverity_prepare_setattr(dentry, attr);
 915        if (err)
 916                return err;
 917
 918        if (is_quota_modification(inode, attr)) {
 919                err = dquot_initialize(inode);
 920                if (err)
 921                        return err;
 922        }
 923        if ((attr->ia_valid & ATTR_UID &&
 924                !uid_eq(attr->ia_uid, inode->i_uid)) ||
 925                (attr->ia_valid & ATTR_GID &&
 926                !gid_eq(attr->ia_gid, inode->i_gid))) {
 927                f2fs_lock_op(F2FS_I_SB(inode));
 928                err = dquot_transfer(inode, attr);
 929                if (err) {
 930                        set_sbi_flag(F2FS_I_SB(inode),
 931                                        SBI_QUOTA_NEED_REPAIR);
 932                        f2fs_unlock_op(F2FS_I_SB(inode));
 933                        return err;
 934                }
 935                /*
 936                 * update uid/gid under lock_op(), so that dquot and inode can
 937                 * be updated atomically.
 938                 */
 939                if (attr->ia_valid & ATTR_UID)
 940                        inode->i_uid = attr->ia_uid;
 941                if (attr->ia_valid & ATTR_GID)
 942                        inode->i_gid = attr->ia_gid;
 943                f2fs_mark_inode_dirty_sync(inode, true);
 944                f2fs_unlock_op(F2FS_I_SB(inode));
 945        }
 946
 947        if (attr->ia_valid & ATTR_SIZE) {
 948                loff_t old_size = i_size_read(inode);
 949
 950                if (attr->ia_size > MAX_INLINE_DATA(inode)) {
 951                        /*
 952                         * should convert inline inode before i_size_write to
 953                         * keep smaller than inline_data size with inline flag.
 954                         */
 955                        err = f2fs_convert_inline_inode(inode);
 956                        if (err)
 957                                return err;
 958                }
 959
 960                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 961                filemap_invalidate_lock(inode->i_mapping);
 962
 963                truncate_setsize(inode, attr->ia_size);
 964
 965                if (attr->ia_size <= old_size)
 966                        err = f2fs_truncate(inode);
 967                /*
 968                 * do not trim all blocks after i_size if target size is
 969                 * larger than i_size.
 970                 */
 971                filemap_invalidate_unlock(inode->i_mapping);
 972                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 973                if (err)
 974                        return err;
 975
 976                spin_lock(&F2FS_I(inode)->i_size_lock);
 977                inode->i_mtime = inode->i_ctime = current_time(inode);
 978                F2FS_I(inode)->last_disk_size = i_size_read(inode);
 979                spin_unlock(&F2FS_I(inode)->i_size_lock);
 980        }
 981
 982        __setattr_copy(&init_user_ns, inode, attr);
 983
 984        if (attr->ia_valid & ATTR_MODE) {
 985                err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
 986
 987                if (is_inode_flag_set(inode, FI_ACL_MODE)) {
 988                        if (!err)
 989                                inode->i_mode = F2FS_I(inode)->i_acl_mode;
 990                        clear_inode_flag(inode, FI_ACL_MODE);
 991                }
 992        }
 993
 994        /* file size may changed here */
 995        f2fs_mark_inode_dirty_sync(inode, true);
 996
 997        /* inode change will produce dirty node pages flushed by checkpoint */
 998        f2fs_balance_fs(F2FS_I_SB(inode), true);
 999
1000        return err;
1001}
1002
1003const struct inode_operations f2fs_file_inode_operations = {
1004        .getattr        = f2fs_getattr,
1005        .setattr        = f2fs_setattr,
1006        .get_acl        = f2fs_get_acl,
1007        .set_acl        = f2fs_set_acl,
1008        .listxattr      = f2fs_listxattr,
1009        .fiemap         = f2fs_fiemap,
1010        .fileattr_get   = f2fs_fileattr_get,
1011        .fileattr_set   = f2fs_fileattr_set,
1012};
1013
1014static int fill_zero(struct inode *inode, pgoff_t index,
1015                                        loff_t start, loff_t len)
1016{
1017        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1018        struct page *page;
1019
1020        if (!len)
1021                return 0;
1022
1023        f2fs_balance_fs(sbi, true);
1024
1025        f2fs_lock_op(sbi);
1026        page = f2fs_get_new_data_page(inode, NULL, index, false);
1027        f2fs_unlock_op(sbi);
1028
1029        if (IS_ERR(page))
1030                return PTR_ERR(page);
1031
1032        f2fs_wait_on_page_writeback(page, DATA, true, true);
1033        zero_user(page, start, len);
1034        set_page_dirty(page);
1035        f2fs_put_page(page, 1);
1036        return 0;
1037}
1038
1039int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1040{
1041        int err;
1042
1043        while (pg_start < pg_end) {
1044                struct dnode_of_data dn;
1045                pgoff_t end_offset, count;
1046
1047                set_new_dnode(&dn, inode, NULL, NULL, 0);
1048                err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1049                if (err) {
1050                        if (err == -ENOENT) {
1051                                pg_start = f2fs_get_next_page_offset(&dn,
1052                                                                pg_start);
1053                                continue;
1054                        }
1055                        return err;
1056                }
1057
1058                end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1059                count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1060
1061                f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1062
1063                f2fs_truncate_data_blocks_range(&dn, count);
1064                f2fs_put_dnode(&dn);
1065
1066                pg_start += count;
1067        }
1068        return 0;
1069}
1070
1071static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1072{
1073        pgoff_t pg_start, pg_end;
1074        loff_t off_start, off_end;
1075        int ret;
1076
1077        ret = f2fs_convert_inline_inode(inode);
1078        if (ret)
1079                return ret;
1080
1081        pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1082        pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1083
1084        off_start = offset & (PAGE_SIZE - 1);
1085        off_end = (offset + len) & (PAGE_SIZE - 1);
1086
1087        if (pg_start == pg_end) {
1088                ret = fill_zero(inode, pg_start, off_start,
1089                                                off_end - off_start);
1090                if (ret)
1091                        return ret;
1092        } else {
1093                if (off_start) {
1094                        ret = fill_zero(inode, pg_start++, off_start,
1095                                                PAGE_SIZE - off_start);
1096                        if (ret)
1097                                return ret;
1098                }
1099                if (off_end) {
1100                        ret = fill_zero(inode, pg_end, 0, off_end);
1101                        if (ret)
1102                                return ret;
1103                }
1104
1105                if (pg_start < pg_end) {
1106                        loff_t blk_start, blk_end;
1107                        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1108
1109                        f2fs_balance_fs(sbi, true);
1110
1111                        blk_start = (loff_t)pg_start << PAGE_SHIFT;
1112                        blk_end = (loff_t)pg_end << PAGE_SHIFT;
1113
1114                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1115                        filemap_invalidate_lock(inode->i_mapping);
1116
1117                        truncate_pagecache_range(inode, blk_start, blk_end - 1);
1118
1119                        f2fs_lock_op(sbi);
1120                        ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1121                        f2fs_unlock_op(sbi);
1122
1123                        filemap_invalidate_unlock(inode->i_mapping);
1124                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1125                }
1126        }
1127
1128        return ret;
1129}
1130
1131static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1132                                int *do_replace, pgoff_t off, pgoff_t len)
1133{
1134        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1135        struct dnode_of_data dn;
1136        int ret, done, i;
1137
1138next_dnode:
1139        set_new_dnode(&dn, inode, NULL, NULL, 0);
1140        ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1141        if (ret && ret != -ENOENT) {
1142                return ret;
1143        } else if (ret == -ENOENT) {
1144                if (dn.max_level == 0)
1145                        return -ENOENT;
1146                done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1147                                                dn.ofs_in_node, len);
1148                blkaddr += done;
1149                do_replace += done;
1150                goto next;
1151        }
1152
1153        done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1154                                                        dn.ofs_in_node, len);
1155        for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1156                *blkaddr = f2fs_data_blkaddr(&dn);
1157
1158                if (__is_valid_data_blkaddr(*blkaddr) &&
1159                        !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1160                                        DATA_GENERIC_ENHANCE)) {
1161                        f2fs_put_dnode(&dn);
1162                        return -EFSCORRUPTED;
1163                }
1164
1165                if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1166
1167                        if (f2fs_lfs_mode(sbi)) {
1168                                f2fs_put_dnode(&dn);
1169                                return -EOPNOTSUPP;
1170                        }
1171
1172                        /* do not invalidate this block address */
1173                        f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1174                        *do_replace = 1;
1175                }
1176        }
1177        f2fs_put_dnode(&dn);
1178next:
1179        len -= done;
1180        off += done;
1181        if (len)
1182                goto next_dnode;
1183        return 0;
1184}
1185
1186static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1187                                int *do_replace, pgoff_t off, int len)
1188{
1189        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1190        struct dnode_of_data dn;
1191        int ret, i;
1192
1193        for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1194                if (*do_replace == 0)
1195                        continue;
1196
1197                set_new_dnode(&dn, inode, NULL, NULL, 0);
1198                ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1199                if (ret) {
1200                        dec_valid_block_count(sbi, inode, 1);
1201                        f2fs_invalidate_blocks(sbi, *blkaddr);
1202                } else {
1203                        f2fs_update_data_blkaddr(&dn, *blkaddr);
1204                }
1205                f2fs_put_dnode(&dn);
1206        }
1207        return 0;
1208}
1209
1210static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1211                        block_t *blkaddr, int *do_replace,
1212                        pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1213{
1214        struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1215        pgoff_t i = 0;
1216        int ret;
1217
1218        while (i < len) {
1219                if (blkaddr[i] == NULL_ADDR && !full) {
1220                        i++;
1221                        continue;
1222                }
1223
1224                if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1225                        struct dnode_of_data dn;
1226                        struct node_info ni;
1227                        size_t new_size;
1228                        pgoff_t ilen;
1229
1230                        set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1231                        ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1232                        if (ret)
1233                                return ret;
1234
1235                        ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1236                        if (ret) {
1237                                f2fs_put_dnode(&dn);
1238                                return ret;
1239                        }
1240
1241                        ilen = min((pgoff_t)
1242                                ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1243                                                dn.ofs_in_node, len - i);
1244                        do {
1245                                dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1246                                f2fs_truncate_data_blocks_range(&dn, 1);
1247
1248                                if (do_replace[i]) {
1249                                        f2fs_i_blocks_write(src_inode,
1250                                                        1, false, false);
1251                                        f2fs_i_blocks_write(dst_inode,
1252                                                        1, true, false);
1253                                        f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1254                                        blkaddr[i], ni.version, true, false);
1255
1256                                        do_replace[i] = 0;
1257                                }
1258                                dn.ofs_in_node++;
1259                                i++;
1260                                new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1261                                if (dst_inode->i_size < new_size)
1262                                        f2fs_i_size_write(dst_inode, new_size);
1263                        } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1264
1265                        f2fs_put_dnode(&dn);
1266                } else {
1267                        struct page *psrc, *pdst;
1268
1269                        psrc = f2fs_get_lock_data_page(src_inode,
1270                                                        src + i, true);
1271                        if (IS_ERR(psrc))
1272                                return PTR_ERR(psrc);
1273                        pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1274                                                                true);
1275                        if (IS_ERR(pdst)) {
1276                                f2fs_put_page(psrc, 1);
1277                                return PTR_ERR(pdst);
1278                        }
1279                        f2fs_copy_page(psrc, pdst);
1280                        set_page_dirty(pdst);
1281                        f2fs_put_page(pdst, 1);
1282                        f2fs_put_page(psrc, 1);
1283
1284                        ret = f2fs_truncate_hole(src_inode,
1285                                                src + i, src + i + 1);
1286                        if (ret)
1287                                return ret;
1288                        i++;
1289                }
1290        }
1291        return 0;
1292}
1293
1294static int __exchange_data_block(struct inode *src_inode,
1295                        struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1296                        pgoff_t len, bool full)
1297{
1298        block_t *src_blkaddr;
1299        int *do_replace;
1300        pgoff_t olen;
1301        int ret;
1302
1303        while (len) {
1304                olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1305
1306                src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1307                                        array_size(olen, sizeof(block_t)),
1308                                        GFP_NOFS);
1309                if (!src_blkaddr)
1310                        return -ENOMEM;
1311
1312                do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1313                                        array_size(olen, sizeof(int)),
1314                                        GFP_NOFS);
1315                if (!do_replace) {
1316                        kvfree(src_blkaddr);
1317                        return -ENOMEM;
1318                }
1319
1320                ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1321                                        do_replace, src, olen);
1322                if (ret)
1323                        goto roll_back;
1324
1325                ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1326                                        do_replace, src, dst, olen, full);
1327                if (ret)
1328                        goto roll_back;
1329
1330                src += olen;
1331                dst += olen;
1332                len -= olen;
1333
1334                kvfree(src_blkaddr);
1335                kvfree(do_replace);
1336        }
1337        return 0;
1338
1339roll_back:
1340        __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1341        kvfree(src_blkaddr);
1342        kvfree(do_replace);
1343        return ret;
1344}
1345
1346static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1347{
1348        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1349        pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1350        pgoff_t start = offset >> PAGE_SHIFT;
1351        pgoff_t end = (offset + len) >> PAGE_SHIFT;
1352        int ret;
1353
1354        f2fs_balance_fs(sbi, true);
1355
1356        /* avoid gc operation during block exchange */
1357        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1358        filemap_invalidate_lock(inode->i_mapping);
1359
1360        f2fs_lock_op(sbi);
1361        f2fs_drop_extent_tree(inode);
1362        truncate_pagecache(inode, offset);
1363        ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1364        f2fs_unlock_op(sbi);
1365
1366        filemap_invalidate_unlock(inode->i_mapping);
1367        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1368        return ret;
1369}
1370
1371static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1372{
1373        loff_t new_size;
1374        int ret;
1375
1376        if (offset + len >= i_size_read(inode))
1377                return -EINVAL;
1378
1379        /* collapse range should be aligned to block size of f2fs. */
1380        if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1381                return -EINVAL;
1382
1383        ret = f2fs_convert_inline_inode(inode);
1384        if (ret)
1385                return ret;
1386
1387        /* write out all dirty pages from offset */
1388        ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1389        if (ret)
1390                return ret;
1391
1392        ret = f2fs_do_collapse(inode, offset, len);
1393        if (ret)
1394                return ret;
1395
1396        /* write out all moved pages, if possible */
1397        filemap_invalidate_lock(inode->i_mapping);
1398        filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1399        truncate_pagecache(inode, offset);
1400
1401        new_size = i_size_read(inode) - len;
1402        ret = f2fs_truncate_blocks(inode, new_size, true);
1403        filemap_invalidate_unlock(inode->i_mapping);
1404        if (!ret)
1405                f2fs_i_size_write(inode, new_size);
1406        return ret;
1407}
1408
1409static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1410                                                                pgoff_t end)
1411{
1412        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1413        pgoff_t index = start;
1414        unsigned int ofs_in_node = dn->ofs_in_node;
1415        blkcnt_t count = 0;
1416        int ret;
1417
1418        for (; index < end; index++, dn->ofs_in_node++) {
1419                if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1420                        count++;
1421        }
1422
1423        dn->ofs_in_node = ofs_in_node;
1424        ret = f2fs_reserve_new_blocks(dn, count);
1425        if (ret)
1426                return ret;
1427
1428        dn->ofs_in_node = ofs_in_node;
1429        for (index = start; index < end; index++, dn->ofs_in_node++) {
1430                dn->data_blkaddr = f2fs_data_blkaddr(dn);
1431                /*
1432                 * f2fs_reserve_new_blocks will not guarantee entire block
1433                 * allocation.
1434                 */
1435                if (dn->data_blkaddr == NULL_ADDR) {
1436                        ret = -ENOSPC;
1437                        break;
1438                }
1439                if (dn->data_blkaddr != NEW_ADDR) {
1440                        f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1441                        dn->data_blkaddr = NEW_ADDR;
1442                        f2fs_set_data_blkaddr(dn);
1443                }
1444        }
1445
1446        f2fs_update_extent_cache_range(dn, start, 0, index - start);
1447
1448        return ret;
1449}
1450
1451static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1452                                                                int mode)
1453{
1454        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1455        struct address_space *mapping = inode->i_mapping;
1456        pgoff_t index, pg_start, pg_end;
1457        loff_t new_size = i_size_read(inode);
1458        loff_t off_start, off_end;
1459        int ret = 0;
1460
1461        ret = inode_newsize_ok(inode, (len + offset));
1462        if (ret)
1463                return ret;
1464
1465        ret = f2fs_convert_inline_inode(inode);
1466        if (ret)
1467                return ret;
1468
1469        ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1470        if (ret)
1471                return ret;
1472
1473        pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1474        pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1475
1476        off_start = offset & (PAGE_SIZE - 1);
1477        off_end = (offset + len) & (PAGE_SIZE - 1);
1478
1479        if (pg_start == pg_end) {
1480                ret = fill_zero(inode, pg_start, off_start,
1481                                                off_end - off_start);
1482                if (ret)
1483                        return ret;
1484
1485                new_size = max_t(loff_t, new_size, offset + len);
1486        } else {
1487                if (off_start) {
1488                        ret = fill_zero(inode, pg_start++, off_start,
1489                                                PAGE_SIZE - off_start);
1490                        if (ret)
1491                                return ret;
1492
1493                        new_size = max_t(loff_t, new_size,
1494                                        (loff_t)pg_start << PAGE_SHIFT);
1495                }
1496
1497                for (index = pg_start; index < pg_end;) {
1498                        struct dnode_of_data dn;
1499                        unsigned int end_offset;
1500                        pgoff_t end;
1501
1502                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1503                        filemap_invalidate_lock(mapping);
1504
1505                        truncate_pagecache_range(inode,
1506                                (loff_t)index << PAGE_SHIFT,
1507                                ((loff_t)pg_end << PAGE_SHIFT) - 1);
1508
1509                        f2fs_lock_op(sbi);
1510
1511                        set_new_dnode(&dn, inode, NULL, NULL, 0);
1512                        ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1513                        if (ret) {
1514                                f2fs_unlock_op(sbi);
1515                                filemap_invalidate_unlock(mapping);
1516                                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1517                                goto out;
1518                        }
1519
1520                        end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1521                        end = min(pg_end, end_offset - dn.ofs_in_node + index);
1522
1523                        ret = f2fs_do_zero_range(&dn, index, end);
1524                        f2fs_put_dnode(&dn);
1525
1526                        f2fs_unlock_op(sbi);
1527                        filemap_invalidate_unlock(mapping);
1528                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1529
1530                        f2fs_balance_fs(sbi, dn.node_changed);
1531
1532                        if (ret)
1533                                goto out;
1534
1535                        index = end;
1536                        new_size = max_t(loff_t, new_size,
1537                                        (loff_t)index << PAGE_SHIFT);
1538                }
1539
1540                if (off_end) {
1541                        ret = fill_zero(inode, pg_end, 0, off_end);
1542                        if (ret)
1543                                goto out;
1544
1545                        new_size = max_t(loff_t, new_size, offset + len);
1546                }
1547        }
1548
1549out:
1550        if (new_size > i_size_read(inode)) {
1551                if (mode & FALLOC_FL_KEEP_SIZE)
1552                        file_set_keep_isize(inode);
1553                else
1554                        f2fs_i_size_write(inode, new_size);
1555        }
1556        return ret;
1557}
1558
1559static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1560{
1561        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1562        struct address_space *mapping = inode->i_mapping;
1563        pgoff_t nr, pg_start, pg_end, delta, idx;
1564        loff_t new_size;
1565        int ret = 0;
1566
1567        new_size = i_size_read(inode) + len;
1568        ret = inode_newsize_ok(inode, new_size);
1569        if (ret)
1570                return ret;
1571
1572        if (offset >= i_size_read(inode))
1573                return -EINVAL;
1574
1575        /* insert range should be aligned to block size of f2fs. */
1576        if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1577                return -EINVAL;
1578
1579        ret = f2fs_convert_inline_inode(inode);
1580        if (ret)
1581                return ret;
1582
1583        f2fs_balance_fs(sbi, true);
1584
1585        filemap_invalidate_lock(mapping);
1586        ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1587        filemap_invalidate_unlock(mapping);
1588        if (ret)
1589                return ret;
1590
1591        /* write out all dirty pages from offset */
1592        ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1593        if (ret)
1594                return ret;
1595
1596        pg_start = offset >> PAGE_SHIFT;
1597        pg_end = (offset + len) >> PAGE_SHIFT;
1598        delta = pg_end - pg_start;
1599        idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1600
1601        /* avoid gc operation during block exchange */
1602        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1603        filemap_invalidate_lock(mapping);
1604        truncate_pagecache(inode, offset);
1605
1606        while (!ret && idx > pg_start) {
1607                nr = idx - pg_start;
1608                if (nr > delta)
1609                        nr = delta;
1610                idx -= nr;
1611
1612                f2fs_lock_op(sbi);
1613                f2fs_drop_extent_tree(inode);
1614
1615                ret = __exchange_data_block(inode, inode, idx,
1616                                        idx + delta, nr, false);
1617                f2fs_unlock_op(sbi);
1618        }
1619        filemap_invalidate_unlock(mapping);
1620        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1621
1622        /* write out all moved pages, if possible */
1623        filemap_invalidate_lock(mapping);
1624        filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1625        truncate_pagecache(inode, offset);
1626        filemap_invalidate_unlock(mapping);
1627
1628        if (!ret)
1629                f2fs_i_size_write(inode, new_size);
1630        return ret;
1631}
1632
1633static int expand_inode_data(struct inode *inode, loff_t offset,
1634                                        loff_t len, int mode)
1635{
1636        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1637        struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1638                        .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1639                        .m_may_create = true };
1640        pgoff_t pg_start, pg_end;
1641        loff_t new_size = i_size_read(inode);
1642        loff_t off_end;
1643        block_t expanded = 0;
1644        int err;
1645
1646        err = inode_newsize_ok(inode, (len + offset));
1647        if (err)
1648                return err;
1649
1650        err = f2fs_convert_inline_inode(inode);
1651        if (err)
1652                return err;
1653
1654        f2fs_balance_fs(sbi, true);
1655
1656        pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1657        pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1658        off_end = (offset + len) & (PAGE_SIZE - 1);
1659
1660        map.m_lblk = pg_start;
1661        map.m_len = pg_end - pg_start;
1662        if (off_end)
1663                map.m_len++;
1664
1665        if (!map.m_len)
1666                return 0;
1667
1668        if (f2fs_is_pinned_file(inode)) {
1669                block_t sec_blks = BLKS_PER_SEC(sbi);
1670                block_t sec_len = roundup(map.m_len, sec_blks);
1671
1672                map.m_len = sec_blks;
1673next_alloc:
1674                if (has_not_enough_free_secs(sbi, 0,
1675                        GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1676                        down_write(&sbi->gc_lock);
1677                        err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1678                        if (err && err != -ENODATA && err != -EAGAIN)
1679                                goto out_err;
1680                }
1681
1682                down_write(&sbi->pin_sem);
1683
1684                f2fs_lock_op(sbi);
1685                f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1686                f2fs_unlock_op(sbi);
1687
1688                map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1689                err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1690
1691                up_write(&sbi->pin_sem);
1692
1693                expanded += map.m_len;
1694                sec_len -= map.m_len;
1695                map.m_lblk += map.m_len;
1696                if (!err && sec_len)
1697                        goto next_alloc;
1698
1699                map.m_len = expanded;
1700        } else {
1701                err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1702                expanded = map.m_len;
1703        }
1704out_err:
1705        if (err) {
1706                pgoff_t last_off;
1707
1708                if (!expanded)
1709                        return err;
1710
1711                last_off = pg_start + expanded - 1;
1712
1713                /* update new size to the failed position */
1714                new_size = (last_off == pg_end) ? offset + len :
1715                                        (loff_t)(last_off + 1) << PAGE_SHIFT;
1716        } else {
1717                new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1718        }
1719
1720        if (new_size > i_size_read(inode)) {
1721                if (mode & FALLOC_FL_KEEP_SIZE)
1722                        file_set_keep_isize(inode);
1723                else
1724                        f2fs_i_size_write(inode, new_size);
1725        }
1726
1727        return err;
1728}
1729
1730static long f2fs_fallocate(struct file *file, int mode,
1731                                loff_t offset, loff_t len)
1732{
1733        struct inode *inode = file_inode(file);
1734        long ret = 0;
1735
1736        if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1737                return -EIO;
1738        if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1739                return -ENOSPC;
1740        if (!f2fs_is_compress_backend_ready(inode))
1741                return -EOPNOTSUPP;
1742
1743        /* f2fs only support ->fallocate for regular file */
1744        if (!S_ISREG(inode->i_mode))
1745                return -EINVAL;
1746
1747        if (IS_ENCRYPTED(inode) &&
1748                (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1749                return -EOPNOTSUPP;
1750
1751        if (f2fs_compressed_file(inode) &&
1752                (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1753                        FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1754                return -EOPNOTSUPP;
1755
1756        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1757                        FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1758                        FALLOC_FL_INSERT_RANGE))
1759                return -EOPNOTSUPP;
1760
1761        inode_lock(inode);
1762
1763        if (mode & FALLOC_FL_PUNCH_HOLE) {
1764                if (offset >= inode->i_size)
1765                        goto out;
1766
1767                ret = punch_hole(inode, offset, len);
1768        } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1769                ret = f2fs_collapse_range(inode, offset, len);
1770        } else if (mode & FALLOC_FL_ZERO_RANGE) {
1771                ret = f2fs_zero_range(inode, offset, len, mode);
1772        } else if (mode & FALLOC_FL_INSERT_RANGE) {
1773                ret = f2fs_insert_range(inode, offset, len);
1774        } else {
1775                ret = expand_inode_data(inode, offset, len, mode);
1776        }
1777
1778        if (!ret) {
1779                inode->i_mtime = inode->i_ctime = current_time(inode);
1780                f2fs_mark_inode_dirty_sync(inode, false);
1781                f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1782        }
1783
1784out:
1785        inode_unlock(inode);
1786
1787        trace_f2fs_fallocate(inode, mode, offset, len, ret);
1788        return ret;
1789}
1790
1791static int f2fs_release_file(struct inode *inode, struct file *filp)
1792{
1793        /*
1794         * f2fs_relase_file is called at every close calls. So we should
1795         * not drop any inmemory pages by close called by other process.
1796         */
1797        if (!(filp->f_mode & FMODE_WRITE) ||
1798                        atomic_read(&inode->i_writecount) != 1)
1799                return 0;
1800
1801        /* some remained atomic pages should discarded */
1802        if (f2fs_is_atomic_file(inode))
1803                f2fs_drop_inmem_pages(inode);
1804        if (f2fs_is_volatile_file(inode)) {
1805                set_inode_flag(inode, FI_DROP_CACHE);
1806                filemap_fdatawrite(inode->i_mapping);
1807                clear_inode_flag(inode, FI_DROP_CACHE);
1808                clear_inode_flag(inode, FI_VOLATILE_FILE);
1809                stat_dec_volatile_write(inode);
1810        }
1811        return 0;
1812}
1813
1814static int f2fs_file_flush(struct file *file, fl_owner_t id)
1815{
1816        struct inode *inode = file_inode(file);
1817
1818        /*
1819         * If the process doing a transaction is crashed, we should do
1820         * roll-back. Otherwise, other reader/write can see corrupted database
1821         * until all the writers close its file. Since this should be done
1822         * before dropping file lock, it needs to do in ->flush.
1823         */
1824        if (f2fs_is_atomic_file(inode) &&
1825                        F2FS_I(inode)->inmem_task == current)
1826                f2fs_drop_inmem_pages(inode);
1827        return 0;
1828}
1829
1830static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1831{
1832        struct f2fs_inode_info *fi = F2FS_I(inode);
1833        u32 masked_flags = fi->i_flags & mask;
1834
1835        /* mask can be shrunk by flags_valid selector */
1836        iflags &= mask;
1837
1838        /* Is it quota file? Do not allow user to mess with it */
1839        if (IS_NOQUOTA(inode))
1840                return -EPERM;
1841
1842        if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1843                if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1844                        return -EOPNOTSUPP;
1845                if (!f2fs_empty_dir(inode))
1846                        return -ENOTEMPTY;
1847        }
1848
1849        if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1850                if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1851                        return -EOPNOTSUPP;
1852                if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1853                        return -EINVAL;
1854        }
1855
1856        if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1857                if (masked_flags & F2FS_COMPR_FL) {
1858                        if (!f2fs_disable_compressed_file(inode))
1859                                return -EINVAL;
1860                }
1861                if (iflags & F2FS_NOCOMP_FL)
1862                        return -EINVAL;
1863                if (iflags & F2FS_COMPR_FL) {
1864                        if (!f2fs_may_compress(inode))
1865                                return -EINVAL;
1866                        if (S_ISREG(inode->i_mode) && inode->i_size)
1867                                return -EINVAL;
1868
1869                        set_compress_context(inode);
1870                }
1871        }
1872        if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1873                if (masked_flags & F2FS_COMPR_FL)
1874                        return -EINVAL;
1875        }
1876
1877        fi->i_flags = iflags | (fi->i_flags & ~mask);
1878        f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1879                                        (fi->i_flags & F2FS_NOCOMP_FL));
1880
1881        if (fi->i_flags & F2FS_PROJINHERIT_FL)
1882                set_inode_flag(inode, FI_PROJ_INHERIT);
1883        else
1884                clear_inode_flag(inode, FI_PROJ_INHERIT);
1885
1886        inode->i_ctime = current_time(inode);
1887        f2fs_set_inode_flags(inode);
1888        f2fs_mark_inode_dirty_sync(inode, true);
1889        return 0;
1890}
1891
1892/* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1893
1894/*
1895 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1896 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1897 * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1898 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1899 *
1900 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1901 * FS_IOC_FSSETXATTR is done by the VFS.
1902 */
1903
1904static const struct {
1905        u32 iflag;
1906        u32 fsflag;
1907} f2fs_fsflags_map[] = {
1908        { F2FS_COMPR_FL,        FS_COMPR_FL },
1909        { F2FS_SYNC_FL,         FS_SYNC_FL },
1910        { F2FS_IMMUTABLE_FL,    FS_IMMUTABLE_FL },
1911        { F2FS_APPEND_FL,       FS_APPEND_FL },
1912        { F2FS_NODUMP_FL,       FS_NODUMP_FL },
1913        { F2FS_NOATIME_FL,      FS_NOATIME_FL },
1914        { F2FS_NOCOMP_FL,       FS_NOCOMP_FL },
1915        { F2FS_INDEX_FL,        FS_INDEX_FL },
1916        { F2FS_DIRSYNC_FL,      FS_DIRSYNC_FL },
1917        { F2FS_PROJINHERIT_FL,  FS_PROJINHERIT_FL },
1918        { F2FS_CASEFOLD_FL,     FS_CASEFOLD_FL },
1919};
1920
1921#define F2FS_GETTABLE_FS_FL (           \
1922                FS_COMPR_FL |           \
1923                FS_SYNC_FL |            \
1924                FS_IMMUTABLE_FL |       \
1925                FS_APPEND_FL |          \
1926                FS_NODUMP_FL |          \
1927                FS_NOATIME_FL |         \
1928                FS_NOCOMP_FL |          \
1929                FS_INDEX_FL |           \
1930                FS_DIRSYNC_FL |         \
1931                FS_PROJINHERIT_FL |     \
1932                FS_ENCRYPT_FL |         \
1933                FS_INLINE_DATA_FL |     \
1934                FS_NOCOW_FL |           \
1935                FS_VERITY_FL |          \
1936                FS_CASEFOLD_FL)
1937
1938#define F2FS_SETTABLE_FS_FL (           \
1939                FS_COMPR_FL |           \
1940                FS_SYNC_FL |            \
1941                FS_IMMUTABLE_FL |       \
1942                FS_APPEND_FL |          \
1943                FS_NODUMP_FL |          \
1944                FS_NOATIME_FL |         \
1945                FS_NOCOMP_FL |          \
1946                FS_DIRSYNC_FL |         \
1947                FS_PROJINHERIT_FL |     \
1948                FS_CASEFOLD_FL)
1949
1950/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1951static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1952{
1953        u32 fsflags = 0;
1954        int i;
1955
1956        for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1957                if (iflags & f2fs_fsflags_map[i].iflag)
1958                        fsflags |= f2fs_fsflags_map[i].fsflag;
1959
1960        return fsflags;
1961}
1962
1963/* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1964static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1965{
1966        u32 iflags = 0;
1967        int i;
1968
1969        for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1970                if (fsflags & f2fs_fsflags_map[i].fsflag)
1971                        iflags |= f2fs_fsflags_map[i].iflag;
1972
1973        return iflags;
1974}
1975
1976static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1977{
1978        struct inode *inode = file_inode(filp);
1979
1980        return put_user(inode->i_generation, (int __user *)arg);
1981}
1982
1983static int f2fs_ioc_start_atomic_write(struct file *filp)
1984{
1985        struct inode *inode = file_inode(filp);
1986        struct f2fs_inode_info *fi = F2FS_I(inode);
1987        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1988        int ret;
1989
1990        if (!inode_owner_or_capable(&init_user_ns, inode))
1991                return -EACCES;
1992
1993        if (!S_ISREG(inode->i_mode))
1994                return -EINVAL;
1995
1996        if (filp->f_flags & O_DIRECT)
1997                return -EINVAL;
1998
1999        ret = mnt_want_write_file(filp);
2000        if (ret)
2001                return ret;
2002
2003        inode_lock(inode);
2004
2005        f2fs_disable_compressed_file(inode);
2006
2007        if (f2fs_is_atomic_file(inode)) {
2008                if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2009                        ret = -EINVAL;
2010                goto out;
2011        }
2012
2013        ret = f2fs_convert_inline_inode(inode);
2014        if (ret)
2015                goto out;
2016
2017        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2018
2019        /*
2020         * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2021         * f2fs_is_atomic_file.
2022         */
2023        if (get_dirty_pages(inode))
2024                f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2025                          inode->i_ino, get_dirty_pages(inode));
2026        ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2027        if (ret) {
2028                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2029                goto out;
2030        }
2031
2032        spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2033        if (list_empty(&fi->inmem_ilist))
2034                list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2035        sbi->atomic_files++;
2036        spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2037
2038        /* add inode in inmem_list first and set atomic_file */
2039        set_inode_flag(inode, FI_ATOMIC_FILE);
2040        clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2041        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2042
2043        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2044        F2FS_I(inode)->inmem_task = current;
2045        stat_update_max_atomic_write(inode);
2046out:
2047        inode_unlock(inode);
2048        mnt_drop_write_file(filp);
2049        return ret;
2050}
2051
2052static int f2fs_ioc_commit_atomic_write(struct file *filp)
2053{
2054        struct inode *inode = file_inode(filp);
2055        int ret;
2056
2057        if (!inode_owner_or_capable(&init_user_ns, inode))
2058                return -EACCES;
2059
2060        ret = mnt_want_write_file(filp);
2061        if (ret)
2062                return ret;
2063
2064        f2fs_balance_fs(F2FS_I_SB(inode), true);
2065
2066        inode_lock(inode);
2067
2068        if (f2fs_is_volatile_file(inode)) {
2069                ret = -EINVAL;
2070                goto err_out;
2071        }
2072
2073        if (f2fs_is_atomic_file(inode)) {
2074                ret = f2fs_commit_inmem_pages(inode);
2075                if (ret)
2076                        goto err_out;
2077
2078                ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2079                if (!ret)
2080                        f2fs_drop_inmem_pages(inode);
2081        } else {
2082                ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2083        }
2084err_out:
2085        if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2086                clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2087                ret = -EINVAL;
2088        }
2089        inode_unlock(inode);
2090        mnt_drop_write_file(filp);
2091        return ret;
2092}
2093
2094static int f2fs_ioc_start_volatile_write(struct file *filp)
2095{
2096        struct inode *inode = file_inode(filp);
2097        int ret;
2098
2099        if (!inode_owner_or_capable(&init_user_ns, inode))
2100                return -EACCES;
2101
2102        if (!S_ISREG(inode->i_mode))
2103                return -EINVAL;
2104
2105        ret = mnt_want_write_file(filp);
2106        if (ret)
2107                return ret;
2108
2109        inode_lock(inode);
2110
2111        if (f2fs_is_volatile_file(inode))
2112                goto out;
2113
2114        ret = f2fs_convert_inline_inode(inode);
2115        if (ret)
2116                goto out;
2117
2118        stat_inc_volatile_write(inode);
2119        stat_update_max_volatile_write(inode);
2120
2121        set_inode_flag(inode, FI_VOLATILE_FILE);
2122        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2123out:
2124        inode_unlock(inode);
2125        mnt_drop_write_file(filp);
2126        return ret;
2127}
2128
2129static int f2fs_ioc_release_volatile_write(struct file *filp)
2130{
2131        struct inode *inode = file_inode(filp);
2132        int ret;
2133
2134        if (!inode_owner_or_capable(&init_user_ns, inode))
2135                return -EACCES;
2136
2137        ret = mnt_want_write_file(filp);
2138        if (ret)
2139                return ret;
2140
2141        inode_lock(inode);
2142
2143        if (!f2fs_is_volatile_file(inode))
2144                goto out;
2145
2146        if (!f2fs_is_first_block_written(inode)) {
2147                ret = truncate_partial_data_page(inode, 0, true);
2148                goto out;
2149        }
2150
2151        ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2152out:
2153        inode_unlock(inode);
2154        mnt_drop_write_file(filp);
2155        return ret;
2156}
2157
2158static int f2fs_ioc_abort_volatile_write(struct file *filp)
2159{
2160        struct inode *inode = file_inode(filp);
2161        int ret;
2162
2163        if (!inode_owner_or_capable(&init_user_ns, inode))
2164                return -EACCES;
2165
2166        ret = mnt_want_write_file(filp);
2167        if (ret)
2168                return ret;
2169
2170        inode_lock(inode);
2171
2172        if (f2fs_is_atomic_file(inode))
2173                f2fs_drop_inmem_pages(inode);
2174        if (f2fs_is_volatile_file(inode)) {
2175                clear_inode_flag(inode, FI_VOLATILE_FILE);
2176                stat_dec_volatile_write(inode);
2177                ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2178        }
2179
2180        clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2181
2182        inode_unlock(inode);
2183
2184        mnt_drop_write_file(filp);
2185        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2186        return ret;
2187}
2188
2189static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2190{
2191        struct inode *inode = file_inode(filp);
2192        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2193        struct super_block *sb = sbi->sb;
2194        __u32 in;
2195        int ret = 0;
2196
2197        if (!capable(CAP_SYS_ADMIN))
2198                return -EPERM;
2199
2200        if (get_user(in, (__u32 __user *)arg))
2201                return -EFAULT;
2202
2203        if (in != F2FS_GOING_DOWN_FULLSYNC) {
2204                ret = mnt_want_write_file(filp);
2205                if (ret) {
2206                        if (ret == -EROFS) {
2207                                ret = 0;
2208                                f2fs_stop_checkpoint(sbi, false);
2209                                set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2210                                trace_f2fs_shutdown(sbi, in, ret);
2211                        }
2212                        return ret;
2213                }
2214        }
2215
2216        switch (in) {
2217        case F2FS_GOING_DOWN_FULLSYNC:
2218                ret = freeze_bdev(sb->s_bdev);
2219                if (ret)
2220                        goto out;
2221                f2fs_stop_checkpoint(sbi, false);
2222                set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2223                thaw_bdev(sb->s_bdev);
2224                break;
2225        case F2FS_GOING_DOWN_METASYNC:
2226                /* do checkpoint only */
2227                ret = f2fs_sync_fs(sb, 1);
2228                if (ret)
2229                        goto out;
2230                f2fs_stop_checkpoint(sbi, false);
2231                set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2232                break;
2233        case F2FS_GOING_DOWN_NOSYNC:
2234                f2fs_stop_checkpoint(sbi, false);
2235                set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2236                break;
2237        case F2FS_GOING_DOWN_METAFLUSH:
2238                f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2239                f2fs_stop_checkpoint(sbi, false);
2240                set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2241                break;
2242        case F2FS_GOING_DOWN_NEED_FSCK:
2243                set_sbi_flag(sbi, SBI_NEED_FSCK);
2244                set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2245                set_sbi_flag(sbi, SBI_IS_DIRTY);
2246                /* do checkpoint only */
2247                ret = f2fs_sync_fs(sb, 1);
2248                goto out;
2249        default:
2250                ret = -EINVAL;
2251                goto out;
2252        }
2253
2254        f2fs_stop_gc_thread(sbi);
2255        f2fs_stop_discard_thread(sbi);
2256
2257        f2fs_drop_discard_cmd(sbi);
2258        clear_opt(sbi, DISCARD);
2259
2260        f2fs_update_time(sbi, REQ_TIME);
2261out:
2262        if (in != F2FS_GOING_DOWN_FULLSYNC)
2263                mnt_drop_write_file(filp);
2264
2265        trace_f2fs_shutdown(sbi, in, ret);
2266
2267        return ret;
2268}
2269
2270static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2271{
2272        struct inode *inode = file_inode(filp);
2273        struct super_block *sb = inode->i_sb;
2274        struct request_queue *q = bdev_get_queue(sb->s_bdev);
2275        struct fstrim_range range;
2276        int ret;
2277
2278        if (!capable(CAP_SYS_ADMIN))
2279                return -EPERM;
2280
2281        if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2282                return -EOPNOTSUPP;
2283
2284        if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2285                                sizeof(range)))
2286                return -EFAULT;
2287
2288        ret = mnt_want_write_file(filp);
2289        if (ret)
2290                return ret;
2291
2292        range.minlen = max((unsigned int)range.minlen,
2293                                q->limits.discard_granularity);
2294        ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2295        mnt_drop_write_file(filp);
2296        if (ret < 0)
2297                return ret;
2298
2299        if (copy_to_user((struct fstrim_range __user *)arg, &range,
2300                                sizeof(range)))
2301                return -EFAULT;
2302        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2303        return 0;
2304}
2305
2306static bool uuid_is_nonzero(__u8 u[16])
2307{
2308        int i;
2309
2310        for (i = 0; i < 16; i++)
2311                if (u[i])
2312                        return true;
2313        return false;
2314}
2315
2316static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2317{
2318        struct inode *inode = file_inode(filp);
2319
2320        if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2321                return -EOPNOTSUPP;
2322
2323        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2324
2325        return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2326}
2327
2328static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2329{
2330        if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2331                return -EOPNOTSUPP;
2332        return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2333}
2334
2335static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2336{
2337        struct inode *inode = file_inode(filp);
2338        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2339        int err;
2340
2341        if (!f2fs_sb_has_encrypt(sbi))
2342                return -EOPNOTSUPP;
2343
2344        err = mnt_want_write_file(filp);
2345        if (err)
2346                return err;
2347
2348        down_write(&sbi->sb_lock);
2349
2350        if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2351                goto got_it;
2352
2353        /* update superblock with uuid */
2354        generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2355
2356        err = f2fs_commit_super(sbi, false);
2357        if (err) {
2358                /* undo new data */
2359                memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2360                goto out_err;
2361        }
2362got_it:
2363        if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2364                                                                        16))
2365                err = -EFAULT;
2366out_err:
2367        up_write(&sbi->sb_lock);
2368        mnt_drop_write_file(filp);
2369        return err;
2370}
2371
2372static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2373                                             unsigned long arg)
2374{
2375        if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2376                return -EOPNOTSUPP;
2377
2378        return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2379}
2380
2381static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2382{
2383        if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2384                return -EOPNOTSUPP;
2385
2386        return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2387}
2388
2389static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2390{
2391        if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2392                return -EOPNOTSUPP;
2393
2394        return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2395}
2396
2397static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2398                                                    unsigned long arg)
2399{
2400        if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2401                return -EOPNOTSUPP;
2402
2403        return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2404}
2405
2406static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2407                                              unsigned long arg)
2408{
2409        if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2410                return -EOPNOTSUPP;
2411
2412        return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2413}
2414
2415static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2416{
2417        if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2418                return -EOPNOTSUPP;
2419
2420        return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2421}
2422
2423static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2424{
2425        struct inode *inode = file_inode(filp);
2426        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2427        __u32 sync;
2428        int ret;
2429
2430        if (!capable(CAP_SYS_ADMIN))
2431                return -EPERM;
2432
2433        if (get_user(sync, (__u32 __user *)arg))
2434                return -EFAULT;
2435
2436        if (f2fs_readonly(sbi->sb))
2437                return -EROFS;
2438
2439        ret = mnt_want_write_file(filp);
2440        if (ret)
2441                return ret;
2442
2443        if (!sync) {
2444                if (!down_write_trylock(&sbi->gc_lock)) {
2445                        ret = -EBUSY;
2446                        goto out;
2447                }
2448        } else {
2449                down_write(&sbi->gc_lock);
2450        }
2451
2452        ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2453out:
2454        mnt_drop_write_file(filp);
2455        return ret;
2456}
2457
2458static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2459{
2460        struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2461        u64 end;
2462        int ret;
2463
2464        if (!capable(CAP_SYS_ADMIN))
2465                return -EPERM;
2466        if (f2fs_readonly(sbi->sb))
2467                return -EROFS;
2468
2469        end = range->start + range->len;
2470        if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2471                                        end >= MAX_BLKADDR(sbi))
2472                return -EINVAL;
2473
2474        ret = mnt_want_write_file(filp);
2475        if (ret)
2476                return ret;
2477
2478do_more:
2479        if (!range->sync) {
2480                if (!down_write_trylock(&sbi->gc_lock)) {
2481                        ret = -EBUSY;
2482                        goto out;
2483                }
2484        } else {
2485                down_write(&sbi->gc_lock);
2486        }
2487
2488        ret = f2fs_gc(sbi, range->sync, true, false,
2489                                GET_SEGNO(sbi, range->start));
2490        if (ret) {
2491                if (ret == -EBUSY)
2492                        ret = -EAGAIN;
2493                goto out;
2494        }
2495        range->start += BLKS_PER_SEC(sbi);
2496        if (range->start <= end)
2497                goto do_more;
2498out:
2499        mnt_drop_write_file(filp);
2500        return ret;
2501}
2502
2503static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2504{
2505        struct f2fs_gc_range range;
2506
2507        if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2508                                                        sizeof(range)))
2509                return -EFAULT;
2510        return __f2fs_ioc_gc_range(filp, &range);
2511}
2512
2513static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2514{
2515        struct inode *inode = file_inode(filp);
2516        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2517        int ret;
2518
2519        if (!capable(CAP_SYS_ADMIN))
2520                return -EPERM;
2521
2522        if (f2fs_readonly(sbi->sb))
2523                return -EROFS;
2524
2525        if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2526                f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2527                return -EINVAL;
2528        }
2529
2530        ret = mnt_want_write_file(filp);
2531        if (ret)
2532                return ret;
2533
2534        ret = f2fs_sync_fs(sbi->sb, 1);
2535
2536        mnt_drop_write_file(filp);
2537        return ret;
2538}
2539
2540static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2541                                        struct file *filp,
2542                                        struct f2fs_defragment *range)
2543{
2544        struct inode *inode = file_inode(filp);
2545        struct f2fs_map_blocks map = { .m_next_extent = NULL,
2546                                        .m_seg_type = NO_CHECK_TYPE,
2547                                        .m_may_create = false };
2548        struct extent_info ei = {0, 0, 0};
2549        pgoff_t pg_start, pg_end, next_pgofs;
2550        unsigned int blk_per_seg = sbi->blocks_per_seg;
2551        unsigned int total = 0, sec_num;
2552        block_t blk_end = 0;
2553        bool fragmented = false;
2554        int err;
2555
2556        /* if in-place-update policy is enabled, don't waste time here */
2557        if (f2fs_should_update_inplace(inode, NULL))
2558                return -EINVAL;
2559
2560        pg_start = range->start >> PAGE_SHIFT;
2561        pg_end = (range->start + range->len) >> PAGE_SHIFT;
2562
2563        f2fs_balance_fs(sbi, true);
2564
2565        inode_lock(inode);
2566
2567        /* writeback all dirty pages in the range */
2568        err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2569                                                range->start + range->len - 1);
2570        if (err)
2571                goto out;
2572
2573        /*
2574         * lookup mapping info in extent cache, skip defragmenting if physical
2575         * block addresses are continuous.
2576         */
2577        if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2578                if (ei.fofs + ei.len >= pg_end)
2579                        goto out;
2580        }
2581
2582        map.m_lblk = pg_start;
2583        map.m_next_pgofs = &next_pgofs;
2584
2585        /*
2586         * lookup mapping info in dnode page cache, skip defragmenting if all
2587         * physical block addresses are continuous even if there are hole(s)
2588         * in logical blocks.
2589         */
2590        while (map.m_lblk < pg_end) {
2591                map.m_len = pg_end - map.m_lblk;
2592                err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2593                if (err)
2594                        goto out;
2595
2596                if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2597                        map.m_lblk = next_pgofs;
2598                        continue;
2599                }
2600
2601                if (blk_end && blk_end != map.m_pblk)
2602                        fragmented = true;
2603
2604                /* record total count of block that we're going to move */
2605                total += map.m_len;
2606
2607                blk_end = map.m_pblk + map.m_len;
2608
2609                map.m_lblk += map.m_len;
2610        }
2611
2612        if (!fragmented) {
2613                total = 0;
2614                goto out;
2615        }
2616
2617        sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2618
2619        /*
2620         * make sure there are enough free section for LFS allocation, this can
2621         * avoid defragment running in SSR mode when free section are allocated
2622         * intensively
2623         */
2624        if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2625                err = -EAGAIN;
2626                goto out;
2627        }
2628
2629        map.m_lblk = pg_start;
2630        map.m_len = pg_end - pg_start;
2631        total = 0;
2632
2633        while (map.m_lblk < pg_end) {
2634                pgoff_t idx;
2635                int cnt = 0;
2636
2637do_map:
2638                map.m_len = pg_end - map.m_lblk;
2639                err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2640                if (err)
2641                        goto clear_out;
2642
2643                if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2644                        map.m_lblk = next_pgofs;
2645                        goto check;
2646                }
2647
2648                set_inode_flag(inode, FI_DO_DEFRAG);
2649
2650                idx = map.m_lblk;
2651                while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2652                        struct page *page;
2653
2654                        page = f2fs_get_lock_data_page(inode, idx, true);
2655                        if (IS_ERR(page)) {
2656                                err = PTR_ERR(page);
2657                                goto clear_out;
2658                        }
2659
2660                        set_page_dirty(page);
2661                        f2fs_put_page(page, 1);
2662
2663                        idx++;
2664                        cnt++;
2665                        total++;
2666                }
2667
2668                map.m_lblk = idx;
2669check:
2670                if (map.m_lblk < pg_end && cnt < blk_per_seg)
2671                        goto do_map;
2672
2673                clear_inode_flag(inode, FI_DO_DEFRAG);
2674
2675                err = filemap_fdatawrite(inode->i_mapping);
2676                if (err)
2677                        goto out;
2678        }
2679clear_out:
2680        clear_inode_flag(inode, FI_DO_DEFRAG);
2681out:
2682        inode_unlock(inode);
2683        if (!err)
2684                range->len = (u64)total << PAGE_SHIFT;
2685        return err;
2686}
2687
2688static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2689{
2690        struct inode *inode = file_inode(filp);
2691        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2692        struct f2fs_defragment range;
2693        int err;
2694
2695        if (!capable(CAP_SYS_ADMIN))
2696                return -EPERM;
2697
2698        if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2699                return -EINVAL;
2700
2701        if (f2fs_readonly(sbi->sb))
2702                return -EROFS;
2703
2704        if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2705                                                        sizeof(range)))
2706                return -EFAULT;
2707
2708        /* verify alignment of offset & size */
2709        if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2710                return -EINVAL;
2711
2712        if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2713                                        max_file_blocks(inode)))
2714                return -EINVAL;
2715
2716        err = mnt_want_write_file(filp);
2717        if (err)
2718                return err;
2719
2720        err = f2fs_defragment_range(sbi, filp, &range);
2721        mnt_drop_write_file(filp);
2722
2723        f2fs_update_time(sbi, REQ_TIME);
2724        if (err < 0)
2725                return err;
2726
2727        if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2728                                                        sizeof(range)))
2729                return -EFAULT;
2730
2731        return 0;
2732}
2733
2734static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2735                        struct file *file_out, loff_t pos_out, size_t len)
2736{
2737        struct inode *src = file_inode(file_in);
2738        struct inode *dst = file_inode(file_out);
2739        struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2740        size_t olen = len, dst_max_i_size = 0;
2741        size_t dst_osize;
2742        int ret;
2743
2744        if (file_in->f_path.mnt != file_out->f_path.mnt ||
2745                                src->i_sb != dst->i_sb)
2746                return -EXDEV;
2747
2748        if (unlikely(f2fs_readonly(src->i_sb)))
2749                return -EROFS;
2750
2751        if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2752                return -EINVAL;
2753
2754        if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2755                return -EOPNOTSUPP;
2756
2757        if (pos_out < 0 || pos_in < 0)
2758                return -EINVAL;
2759
2760        if (src == dst) {
2761                if (pos_in == pos_out)
2762                        return 0;
2763                if (pos_out > pos_in && pos_out < pos_in + len)
2764                        return -EINVAL;
2765        }
2766
2767        inode_lock(src);
2768        if (src != dst) {
2769                ret = -EBUSY;
2770                if (!inode_trylock(dst))
2771                        goto out;
2772        }
2773
2774        ret = -EINVAL;
2775        if (pos_in + len > src->i_size || pos_in + len < pos_in)
2776                goto out_unlock;
2777        if (len == 0)
2778                olen = len = src->i_size - pos_in;
2779        if (pos_in + len == src->i_size)
2780                len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2781        if (len == 0) {
2782                ret = 0;
2783                goto out_unlock;
2784        }
2785
2786        dst_osize = dst->i_size;
2787        if (pos_out + olen > dst->i_size)
2788                dst_max_i_size = pos_out + olen;
2789
2790        /* verify the end result is block aligned */
2791        if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2792                        !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2793                        !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2794                goto out_unlock;
2795
2796        ret = f2fs_convert_inline_inode(src);
2797        if (ret)
2798                goto out_unlock;
2799
2800        ret = f2fs_convert_inline_inode(dst);
2801        if (ret)
2802                goto out_unlock;
2803
2804        /* write out all dirty pages from offset */
2805        ret = filemap_write_and_wait_range(src->i_mapping,
2806                                        pos_in, pos_in + len);
2807        if (ret)
2808                goto out_unlock;
2809
2810        ret = filemap_write_and_wait_range(dst->i_mapping,
2811                                        pos_out, pos_out + len);
2812        if (ret)
2813                goto out_unlock;
2814
2815        f2fs_balance_fs(sbi, true);
2816
2817        down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2818        if (src != dst) {
2819                ret = -EBUSY;
2820                if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2821                        goto out_src;
2822        }
2823
2824        f2fs_lock_op(sbi);
2825        ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2826                                pos_out >> F2FS_BLKSIZE_BITS,
2827                                len >> F2FS_BLKSIZE_BITS, false);
2828
2829        if (!ret) {
2830                if (dst_max_i_size)
2831                        f2fs_i_size_write(dst, dst_max_i_size);
2832                else if (dst_osize != dst->i_size)
2833                        f2fs_i_size_write(dst, dst_osize);
2834        }
2835        f2fs_unlock_op(sbi);
2836
2837        if (src != dst)
2838                up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2839out_src:
2840        up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2841out_unlock:
2842        if (src != dst)
2843                inode_unlock(dst);
2844out:
2845        inode_unlock(src);
2846        return ret;
2847}
2848
2849static int __f2fs_ioc_move_range(struct file *filp,
2850                                struct f2fs_move_range *range)
2851{
2852        struct fd dst;
2853        int err;
2854
2855        if (!(filp->f_mode & FMODE_READ) ||
2856                        !(filp->f_mode & FMODE_WRITE))
2857                return -EBADF;
2858
2859        dst = fdget(range->dst_fd);
2860        if (!dst.file)
2861                return -EBADF;
2862
2863        if (!(dst.file->f_mode & FMODE_WRITE)) {
2864                err = -EBADF;
2865                goto err_out;
2866        }
2867
2868        err = mnt_want_write_file(filp);
2869        if (err)
2870                goto err_out;
2871
2872        err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2873                                        range->pos_out, range->len);
2874
2875        mnt_drop_write_file(filp);
2876err_out:
2877        fdput(dst);
2878        return err;
2879}
2880
2881static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2882{
2883        struct f2fs_move_range range;
2884
2885        if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2886                                                        sizeof(range)))
2887                return -EFAULT;
2888        return __f2fs_ioc_move_range(filp, &range);
2889}
2890
2891static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2892{
2893        struct inode *inode = file_inode(filp);
2894        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2895        struct sit_info *sm = SIT_I(sbi);
2896        unsigned int start_segno = 0, end_segno = 0;
2897        unsigned int dev_start_segno = 0, dev_end_segno = 0;
2898        struct f2fs_flush_device range;
2899        int ret;
2900
2901        if (!capable(CAP_SYS_ADMIN))
2902                return -EPERM;
2903
2904        if (f2fs_readonly(sbi->sb))
2905                return -EROFS;
2906
2907        if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2908                return -EINVAL;
2909
2910        if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2911                                                        sizeof(range)))
2912                return -EFAULT;
2913
2914        if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2915                        __is_large_section(sbi)) {
2916                f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2917                          range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2918                return -EINVAL;
2919        }
2920
2921        ret = mnt_want_write_file(filp);
2922        if (ret)
2923                return ret;
2924
2925        if (range.dev_num != 0)
2926                dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2927        dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2928
2929        start_segno = sm->last_victim[FLUSH_DEVICE];
2930        if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2931                start_segno = dev_start_segno;
2932        end_segno = min(start_segno + range.segments, dev_end_segno);
2933
2934        while (start_segno < end_segno) {
2935                if (!down_write_trylock(&sbi->gc_lock)) {
2936                        ret = -EBUSY;
2937                        goto out;
2938                }
2939                sm->last_victim[GC_CB] = end_segno + 1;
2940                sm->last_victim[GC_GREEDY] = end_segno + 1;
2941                sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2942                ret = f2fs_gc(sbi, true, true, true, start_segno);
2943                if (ret == -EAGAIN)
2944                        ret = 0;
2945                else if (ret < 0)
2946                        break;
2947                start_segno++;
2948        }
2949out:
2950        mnt_drop_write_file(filp);
2951        return ret;
2952}
2953
2954static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2955{
2956        struct inode *inode = file_inode(filp);
2957        u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2958
2959        /* Must validate to set it with SQLite behavior in Android. */
2960        sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2961
2962        return put_user(sb_feature, (u32 __user *)arg);
2963}
2964
2965#ifdef CONFIG_QUOTA
2966int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2967{
2968        struct dquot *transfer_to[MAXQUOTAS] = {};
2969        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2970        struct super_block *sb = sbi->sb;
2971        int err = 0;
2972
2973        transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2974        if (!IS_ERR(transfer_to[PRJQUOTA])) {
2975                err = __dquot_transfer(inode, transfer_to);
2976                if (err)
2977                        set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2978                dqput(transfer_to[PRJQUOTA]);
2979        }
2980        return err;
2981}
2982
2983static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2984{
2985        struct f2fs_inode_info *fi = F2FS_I(inode);
2986        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2987        struct page *ipage;
2988        kprojid_t kprojid;
2989        int err;
2990
2991        if (!f2fs_sb_has_project_quota(sbi)) {
2992                if (projid != F2FS_DEF_PROJID)
2993                        return -EOPNOTSUPP;
2994                else
2995                        return 0;
2996        }
2997
2998        if (!f2fs_has_extra_attr(inode))
2999                return -EOPNOTSUPP;
3000
3001        kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3002
3003        if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3004                return 0;
3005
3006        err = -EPERM;
3007        /* Is it quota file? Do not allow user to mess with it */
3008        if (IS_NOQUOTA(inode))
3009                return err;
3010
3011        ipage = f2fs_get_node_page(sbi, inode->i_ino);
3012        if (IS_ERR(ipage))
3013                return PTR_ERR(ipage);
3014
3015        if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3016                                                                i_projid)) {
3017                err = -EOVERFLOW;
3018                f2fs_put_page(ipage, 1);
3019                return err;
3020        }
3021        f2fs_put_page(ipage, 1);
3022
3023        err = dquot_initialize(inode);
3024        if (err)
3025                return err;
3026
3027        f2fs_lock_op(sbi);
3028        err = f2fs_transfer_project_quota(inode, kprojid);
3029        if (err)
3030                goto out_unlock;
3031
3032        F2FS_I(inode)->i_projid = kprojid;
3033        inode->i_ctime = current_time(inode);
3034        f2fs_mark_inode_dirty_sync(inode, true);
3035out_unlock:
3036        f2fs_unlock_op(sbi);
3037        return err;
3038}
3039#else
3040int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3041{
3042        return 0;
3043}
3044
3045static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3046{
3047        if (projid != F2FS_DEF_PROJID)
3048                return -EOPNOTSUPP;
3049        return 0;
3050}
3051#endif
3052
3053int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3054{
3055        struct inode *inode = d_inode(dentry);
3056        struct f2fs_inode_info *fi = F2FS_I(inode);
3057        u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3058
3059        if (IS_ENCRYPTED(inode))
3060                fsflags |= FS_ENCRYPT_FL;
3061        if (IS_VERITY(inode))
3062                fsflags |= FS_VERITY_FL;
3063        if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3064                fsflags |= FS_INLINE_DATA_FL;
3065        if (is_inode_flag_set(inode, FI_PIN_FILE))
3066                fsflags |= FS_NOCOW_FL;
3067
3068        fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3069
3070        if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3071                fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3072
3073        return 0;
3074}
3075
3076int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3077                      struct dentry *dentry, struct fileattr *fa)
3078{
3079        struct inode *inode = d_inode(dentry);
3080        u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3081        u32 iflags;
3082        int err;
3083
3084        if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3085                return -EIO;
3086        if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3087                return -ENOSPC;
3088        if (fsflags & ~F2FS_GETTABLE_FS_FL)
3089                return -EOPNOTSUPP;
3090        fsflags &= F2FS_SETTABLE_FS_FL;
3091        if (!fa->flags_valid)
3092                mask &= FS_COMMON_FL;
3093
3094        iflags = f2fs_fsflags_to_iflags(fsflags);
3095        if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3096                return -EOPNOTSUPP;
3097
3098        err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3099        if (!err)
3100                err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3101
3102        return err;
3103}
3104
3105int f2fs_pin_file_control(struct inode *inode, bool inc)
3106{
3107        struct f2fs_inode_info *fi = F2FS_I(inode);
3108        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3109
3110        /* Use i_gc_failures for normal file as a risk signal. */
3111        if (inc)
3112                f2fs_i_gc_failures_write(inode,
3113                                fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3114
3115        if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3116                f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3117                          __func__, inode->i_ino,
3118                          fi->i_gc_failures[GC_FAILURE_PIN]);
3119                clear_inode_flag(inode, FI_PIN_FILE);
3120                return -EAGAIN;
3121        }
3122        return 0;
3123}
3124
3125static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3126{
3127        struct inode *inode = file_inode(filp);
3128        __u32 pin;
3129        int ret = 0;
3130
3131        if (get_user(pin, (__u32 __user *)arg))
3132                return -EFAULT;
3133
3134        if (!S_ISREG(inode->i_mode))
3135                return -EINVAL;
3136
3137        if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3138                return -EROFS;
3139
3140        ret = mnt_want_write_file(filp);
3141        if (ret)
3142                return ret;
3143
3144        inode_lock(inode);
3145
3146        if (f2fs_should_update_outplace(inode, NULL)) {
3147                ret = -EINVAL;
3148                goto out;
3149        }
3150
3151        if (!pin) {
3152                clear_inode_flag(inode, FI_PIN_FILE);
3153                f2fs_i_gc_failures_write(inode, 0);
3154                goto done;
3155        }
3156
3157        if (f2fs_pin_file_control(inode, false)) {
3158                ret = -EAGAIN;
3159                goto out;
3160        }
3161
3162        ret = f2fs_convert_inline_inode(inode);
3163        if (ret)
3164                goto out;
3165
3166        if (!f2fs_disable_compressed_file(inode)) {
3167                ret = -EOPNOTSUPP;
3168                goto out;
3169        }
3170
3171        set_inode_flag(inode, FI_PIN_FILE);
3172        ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3173done:
3174        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3175out:
3176        inode_unlock(inode);
3177        mnt_drop_write_file(filp);
3178        return ret;
3179}
3180
3181static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3182{
3183        struct inode *inode = file_inode(filp);
3184        __u32 pin = 0;
3185
3186        if (is_inode_flag_set(inode, FI_PIN_FILE))
3187                pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3188        return put_user(pin, (u32 __user *)arg);
3189}
3190
3191int f2fs_precache_extents(struct inode *inode)
3192{
3193        struct f2fs_inode_info *fi = F2FS_I(inode);
3194        struct f2fs_map_blocks map;
3195        pgoff_t m_next_extent;
3196        loff_t end;
3197        int err;
3198
3199        if (is_inode_flag_set(inode, FI_NO_EXTENT))
3200                return -EOPNOTSUPP;
3201
3202        map.m_lblk = 0;
3203        map.m_next_pgofs = NULL;
3204        map.m_next_extent = &m_next_extent;
3205        map.m_seg_type = NO_CHECK_TYPE;
3206        map.m_may_create = false;
3207        end = max_file_blocks(inode);
3208
3209        while (map.m_lblk < end) {
3210                map.m_len = end - map.m_lblk;
3211
3212                down_write(&fi->i_gc_rwsem[WRITE]);
3213                err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3214                up_write(&fi->i_gc_rwsem[WRITE]);
3215                if (err)
3216                        return err;
3217
3218                map.m_lblk = m_next_extent;
3219        }
3220
3221        return 0;
3222}
3223
3224static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3225{
3226        return f2fs_precache_extents(file_inode(filp));
3227}
3228
3229static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3230{
3231        struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3232        __u64 block_count;
3233
3234        if (!capable(CAP_SYS_ADMIN))
3235                return -EPERM;
3236
3237        if (f2fs_readonly(sbi->sb))
3238                return -EROFS;
3239
3240        if (copy_from_user(&block_count, (void __user *)arg,
3241                           sizeof(block_count)))
3242                return -EFAULT;
3243
3244        return f2fs_resize_fs(sbi, block_count);
3245}
3246
3247static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3248{
3249        struct inode *inode = file_inode(filp);
3250
3251        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3252
3253        if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3254                f2fs_warn(F2FS_I_SB(inode),
3255                          "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3256                          inode->i_ino);
3257                return -EOPNOTSUPP;
3258        }
3259
3260        return fsverity_ioctl_enable(filp, (const void __user *)arg);
3261}
3262
3263static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3264{
3265        if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3266                return -EOPNOTSUPP;
3267
3268        return fsverity_ioctl_measure(filp, (void __user *)arg);
3269}
3270
3271static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3272{
3273        if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3274                return -EOPNOTSUPP;
3275
3276        return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3277}
3278
3279static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3280{
3281        struct inode *inode = file_inode(filp);
3282        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3283        char *vbuf;
3284        int count;
3285        int err = 0;
3286
3287        vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3288        if (!vbuf)
3289                return -ENOMEM;
3290
3291        down_read(&sbi->sb_lock);
3292        count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3293                        ARRAY_SIZE(sbi->raw_super->volume_name),
3294                        UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3295        up_read(&sbi->sb_lock);
3296
3297        if (copy_to_user((char __user *)arg, vbuf,
3298                                min(FSLABEL_MAX, count)))
3299                err = -EFAULT;
3300
3301        kfree(vbuf);
3302        return err;
3303}
3304
3305static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3306{
3307        struct inode *inode = file_inode(filp);
3308        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3309        char *vbuf;
3310        int err = 0;
3311
3312        if (!capable(CAP_SYS_ADMIN))
3313                return -EPERM;
3314
3315        vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3316        if (IS_ERR(vbuf))
3317                return PTR_ERR(vbuf);
3318
3319        err = mnt_want_write_file(filp);
3320        if (err)
3321                goto out;
3322
3323        down_write(&sbi->sb_lock);
3324
3325        memset(sbi->raw_super->volume_name, 0,
3326                        sizeof(sbi->raw_super->volume_name));
3327        utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3328                        sbi->raw_super->volume_name,
3329                        ARRAY_SIZE(sbi->raw_super->volume_name));
3330
3331        err = f2fs_commit_super(sbi, false);
3332
3333        up_write(&sbi->sb_lock);
3334
3335        mnt_drop_write_file(filp);
3336out:
3337        kfree(vbuf);
3338        return err;
3339}
3340
3341static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3342{
3343        struct inode *inode = file_inode(filp);
3344        __u64 blocks;
3345
3346        if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3347                return -EOPNOTSUPP;
3348
3349        if (!f2fs_compressed_file(inode))
3350                return -EINVAL;
3351
3352        blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3353        return put_user(blocks, (u64 __user *)arg);
3354}
3355
3356static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3357{
3358        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3359        unsigned int released_blocks = 0;
3360        int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3361        block_t blkaddr;
3362        int i;
3363
3364        for (i = 0; i < count; i++) {
3365                blkaddr = data_blkaddr(dn->inode, dn->node_page,
3366                                                dn->ofs_in_node + i);
3367
3368                if (!__is_valid_data_blkaddr(blkaddr))
3369                        continue;
3370                if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3371                                        DATA_GENERIC_ENHANCE)))
3372                        return -EFSCORRUPTED;
3373        }
3374
3375        while (count) {
3376                int compr_blocks = 0;
3377
3378                for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3379                        blkaddr = f2fs_data_blkaddr(dn);
3380
3381                        if (i == 0) {
3382                                if (blkaddr == COMPRESS_ADDR)
3383                                        continue;
3384                                dn->ofs_in_node += cluster_size;
3385                                goto next;
3386                        }
3387
3388                        if (__is_valid_data_blkaddr(blkaddr))
3389                                compr_blocks++;
3390
3391                        if (blkaddr != NEW_ADDR)
3392                                continue;
3393
3394                        dn->data_blkaddr = NULL_ADDR;
3395                        f2fs_set_data_blkaddr(dn);
3396                }
3397
3398                f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3399                dec_valid_block_count(sbi, dn->inode,
3400                                        cluster_size - compr_blocks);
3401
3402                released_blocks += cluster_size - compr_blocks;
3403next:
3404                count -= cluster_size;
3405        }
3406
3407        return released_blocks;
3408}
3409
3410static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3411{
3412        struct inode *inode = file_inode(filp);
3413        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3414        pgoff_t page_idx = 0, last_idx;
3415        unsigned int released_blocks = 0;
3416        int ret;
3417        int writecount;
3418
3419        if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3420                return -EOPNOTSUPP;
3421
3422        if (!f2fs_compressed_file(inode))
3423                return -EINVAL;
3424
3425        if (f2fs_readonly(sbi->sb))
3426                return -EROFS;
3427
3428        ret = mnt_want_write_file(filp);
3429        if (ret)
3430                return ret;
3431
3432        f2fs_balance_fs(F2FS_I_SB(inode), true);
3433
3434        inode_lock(inode);
3435
3436        writecount = atomic_read(&inode->i_writecount);
3437        if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3438                        (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3439                ret = -EBUSY;
3440                goto out;
3441        }
3442
3443        if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3444                ret = -EINVAL;
3445                goto out;
3446        }
3447
3448        ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3449        if (ret)
3450                goto out;
3451
3452        set_inode_flag(inode, FI_COMPRESS_RELEASED);
3453        inode->i_ctime = current_time(inode);
3454        f2fs_mark_inode_dirty_sync(inode, true);
3455
3456        if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3457                goto out;
3458
3459        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3460        filemap_invalidate_lock(inode->i_mapping);
3461
3462        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3463
3464        while (page_idx < last_idx) {
3465                struct dnode_of_data dn;
3466                pgoff_t end_offset, count;
3467
3468                set_new_dnode(&dn, inode, NULL, NULL, 0);
3469                ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3470                if (ret) {
3471                        if (ret == -ENOENT) {
3472                                page_idx = f2fs_get_next_page_offset(&dn,
3473                                                                page_idx);
3474                                ret = 0;
3475                                continue;
3476                        }
3477                        break;
3478                }
3479
3480                end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3481                count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3482                count = round_up(count, F2FS_I(inode)->i_cluster_size);
3483
3484                ret = release_compress_blocks(&dn, count);
3485
3486                f2fs_put_dnode(&dn);
3487
3488                if (ret < 0)
3489                        break;
3490
3491                page_idx += count;
3492                released_blocks += ret;
3493        }
3494
3495        filemap_invalidate_unlock(inode->i_mapping);
3496        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3497out:
3498        inode_unlock(inode);
3499
3500        mnt_drop_write_file(filp);
3501
3502        if (ret >= 0) {
3503                ret = put_user(released_blocks, (u64 __user *)arg);
3504        } else if (released_blocks &&
3505                        atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3506                set_sbi_flag(sbi, SBI_NEED_FSCK);
3507                f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3508                        "iblocks=%llu, released=%u, compr_blocks=%u, "
3509                        "run fsck to fix.",
3510                        __func__, inode->i_ino, inode->i_blocks,
3511                        released_blocks,
3512                        atomic_read(&F2FS_I(inode)->i_compr_blocks));
3513        }
3514
3515        return ret;
3516}
3517
3518static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3519{
3520        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3521        unsigned int reserved_blocks = 0;
3522        int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3523        block_t blkaddr;
3524        int i;
3525
3526        for (i = 0; i < count; i++) {
3527                blkaddr = data_blkaddr(dn->inode, dn->node_page,
3528                                                dn->ofs_in_node + i);
3529
3530                if (!__is_valid_data_blkaddr(blkaddr))
3531                        continue;
3532                if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3533                                        DATA_GENERIC_ENHANCE)))
3534                        return -EFSCORRUPTED;
3535        }
3536
3537        while (count) {
3538                int compr_blocks = 0;
3539                blkcnt_t reserved;
3540                int ret;
3541
3542                for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3543                        blkaddr = f2fs_data_blkaddr(dn);
3544
3545                        if (i == 0) {
3546                                if (blkaddr == COMPRESS_ADDR)
3547                                        continue;
3548                                dn->ofs_in_node += cluster_size;
3549                                goto next;
3550                        }
3551
3552                        if (__is_valid_data_blkaddr(blkaddr)) {
3553                                compr_blocks++;
3554                                continue;
3555                        }
3556
3557                        dn->data_blkaddr = NEW_ADDR;
3558                        f2fs_set_data_blkaddr(dn);
3559                }
3560
3561                reserved = cluster_size - compr_blocks;
3562                ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3563                if (ret)
3564                        return ret;
3565
3566                if (reserved != cluster_size - compr_blocks)
3567                        return -ENOSPC;
3568
3569                f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3570
3571                reserved_blocks += reserved;
3572next:
3573                count -= cluster_size;
3574        }
3575
3576        return reserved_blocks;
3577}
3578
3579static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3580{
3581        struct inode *inode = file_inode(filp);
3582        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3583        pgoff_t page_idx = 0, last_idx;
3584        unsigned int reserved_blocks = 0;
3585        int ret;
3586
3587        if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3588                return -EOPNOTSUPP;
3589
3590        if (!f2fs_compressed_file(inode))
3591                return -EINVAL;
3592
3593        if (f2fs_readonly(sbi->sb))
3594                return -EROFS;
3595
3596        ret = mnt_want_write_file(filp);
3597        if (ret)
3598                return ret;
3599
3600        if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3601                goto out;
3602
3603        f2fs_balance_fs(F2FS_I_SB(inode), true);
3604
3605        inode_lock(inode);
3606
3607        if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3608                ret = -EINVAL;
3609                goto unlock_inode;
3610        }
3611
3612        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3613        filemap_invalidate_lock(inode->i_mapping);
3614
3615        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3616
3617        while (page_idx < last_idx) {
3618                struct dnode_of_data dn;
3619                pgoff_t end_offset, count;
3620
3621                set_new_dnode(&dn, inode, NULL, NULL, 0);
3622                ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3623                if (ret) {
3624                        if (ret == -ENOENT) {
3625                                page_idx = f2fs_get_next_page_offset(&dn,
3626                                                                page_idx);
3627                                ret = 0;
3628                                continue;
3629                        }
3630                        break;
3631                }
3632
3633                end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3634                count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3635                count = round_up(count, F2FS_I(inode)->i_cluster_size);
3636
3637                ret = reserve_compress_blocks(&dn, count);
3638
3639                f2fs_put_dnode(&dn);
3640
3641                if (ret < 0)
3642                        break;
3643
3644                page_idx += count;
3645                reserved_blocks += ret;
3646        }
3647
3648        filemap_invalidate_unlock(inode->i_mapping);
3649        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3650
3651        if (ret >= 0) {
3652                clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3653                inode->i_ctime = current_time(inode);
3654                f2fs_mark_inode_dirty_sync(inode, true);
3655        }
3656unlock_inode:
3657        inode_unlock(inode);
3658out:
3659        mnt_drop_write_file(filp);
3660
3661        if (ret >= 0) {
3662                ret = put_user(reserved_blocks, (u64 __user *)arg);
3663        } else if (reserved_blocks &&
3664                        atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3665                set_sbi_flag(sbi, SBI_NEED_FSCK);
3666                f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3667                        "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3668                        "run fsck to fix.",
3669                        __func__, inode->i_ino, inode->i_blocks,
3670                        reserved_blocks,
3671                        atomic_read(&F2FS_I(inode)->i_compr_blocks));
3672        }
3673
3674        return ret;
3675}
3676
3677static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3678                pgoff_t off, block_t block, block_t len, u32 flags)
3679{
3680        struct request_queue *q = bdev_get_queue(bdev);
3681        sector_t sector = SECTOR_FROM_BLOCK(block);
3682        sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3683        int ret = 0;
3684
3685        if (!q)
3686                return -ENXIO;
3687
3688        if (flags & F2FS_TRIM_FILE_DISCARD)
3689                ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3690                                                blk_queue_secure_erase(q) ?
3691                                                BLKDEV_DISCARD_SECURE : 0);
3692
3693        if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3694                if (IS_ENCRYPTED(inode))
3695                        ret = fscrypt_zeroout_range(inode, off, block, len);
3696                else
3697                        ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3698                                        GFP_NOFS, 0);
3699        }
3700
3701        return ret;
3702}
3703
3704static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3705{
3706        struct inode *inode = file_inode(filp);
3707        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3708        struct address_space *mapping = inode->i_mapping;
3709        struct block_device *prev_bdev = NULL;
3710        struct f2fs_sectrim_range range;
3711        pgoff_t index, pg_end, prev_index = 0;
3712        block_t prev_block = 0, len = 0;
3713        loff_t end_addr;
3714        bool to_end = false;
3715        int ret = 0;
3716
3717        if (!(filp->f_mode & FMODE_WRITE))
3718                return -EBADF;
3719
3720        if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3721                                sizeof(range)))
3722                return -EFAULT;
3723
3724        if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3725                        !S_ISREG(inode->i_mode))
3726                return -EINVAL;
3727
3728        if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3729                        !f2fs_hw_support_discard(sbi)) ||
3730                        ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3731                         IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3732                return -EOPNOTSUPP;
3733
3734        file_start_write(filp);
3735        inode_lock(inode);
3736
3737        if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3738                        range.start >= inode->i_size) {
3739                ret = -EINVAL;
3740                goto err;
3741        }
3742
3743        if (range.len == 0)
3744                goto err;
3745
3746        if (inode->i_size - range.start > range.len) {
3747                end_addr = range.start + range.len;
3748        } else {
3749                end_addr = range.len == (u64)-1 ?
3750                        sbi->sb->s_maxbytes : inode->i_size;
3751                to_end = true;
3752        }
3753
3754        if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3755                        (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3756                ret = -EINVAL;
3757                goto err;
3758        }
3759
3760        index = F2FS_BYTES_TO_BLK(range.start);
3761        pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3762
3763        ret = f2fs_convert_inline_inode(inode);
3764        if (ret)
3765                goto err;
3766
3767        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3768        filemap_invalidate_lock(mapping);
3769
3770        ret = filemap_write_and_wait_range(mapping, range.start,
3771                        to_end ? LLONG_MAX : end_addr - 1);
3772        if (ret)
3773                goto out;
3774
3775        truncate_inode_pages_range(mapping, range.start,
3776                        to_end ? -1 : end_addr - 1);
3777
3778        while (index < pg_end) {
3779                struct dnode_of_data dn;
3780                pgoff_t end_offset, count;
3781                int i;
3782
3783                set_new_dnode(&dn, inode, NULL, NULL, 0);
3784                ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3785                if (ret) {
3786                        if (ret == -ENOENT) {
3787                                index = f2fs_get_next_page_offset(&dn, index);
3788                                continue;
3789                        }
3790                        goto out;
3791                }
3792
3793                end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3794                count = min(end_offset - dn.ofs_in_node, pg_end - index);
3795                for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3796                        struct block_device *cur_bdev;
3797                        block_t blkaddr = f2fs_data_blkaddr(&dn);
3798
3799                        if (!__is_valid_data_blkaddr(blkaddr))
3800                                continue;
3801
3802                        if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3803                                                DATA_GENERIC_ENHANCE)) {
3804                                ret = -EFSCORRUPTED;
3805                                f2fs_put_dnode(&dn);
3806                                goto out;
3807                        }
3808
3809                        cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3810                        if (f2fs_is_multi_device(sbi)) {
3811                                int di = f2fs_target_device_index(sbi, blkaddr);
3812
3813                                blkaddr -= FDEV(di).start_blk;
3814                        }
3815
3816                        if (len) {
3817                                if (prev_bdev == cur_bdev &&
3818                                                index == prev_index + len &&
3819                                                blkaddr == prev_block + len) {
3820                                        len++;
3821                                } else {
3822                                        ret = f2fs_secure_erase(prev_bdev,
3823                                                inode, prev_index, prev_block,
3824                                                len, range.flags);
3825                                        if (ret) {
3826                                                f2fs_put_dnode(&dn);
3827                                                goto out;
3828                                        }
3829
3830                                        len = 0;
3831                                }
3832                        }
3833
3834                        if (!len) {
3835                                prev_bdev = cur_bdev;
3836                                prev_index = index;
3837                                prev_block = blkaddr;
3838                                len = 1;
3839                        }
3840                }
3841
3842                f2fs_put_dnode(&dn);
3843
3844                if (fatal_signal_pending(current)) {
3845                        ret = -EINTR;
3846                        goto out;
3847                }
3848                cond_resched();
3849        }
3850
3851        if (len)
3852                ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3853                                prev_block, len, range.flags);
3854out:
3855        filemap_invalidate_unlock(mapping);
3856        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3857err:
3858        inode_unlock(inode);
3859        file_end_write(filp);
3860
3861        return ret;
3862}
3863
3864static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3865{
3866        struct inode *inode = file_inode(filp);
3867        struct f2fs_comp_option option;
3868
3869        if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3870                return -EOPNOTSUPP;
3871
3872        inode_lock_shared(inode);
3873
3874        if (!f2fs_compressed_file(inode)) {
3875                inode_unlock_shared(inode);
3876                return -ENODATA;
3877        }
3878
3879        option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3880        option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3881
3882        inode_unlock_shared(inode);
3883
3884        if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3885                                sizeof(option)))
3886                return -EFAULT;
3887
3888        return 0;
3889}
3890
3891static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3892{
3893        struct inode *inode = file_inode(filp);
3894        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3895        struct f2fs_comp_option option;
3896        int ret = 0;
3897
3898        if (!f2fs_sb_has_compression(sbi))
3899                return -EOPNOTSUPP;
3900
3901        if (!(filp->f_mode & FMODE_WRITE))
3902                return -EBADF;
3903
3904        if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3905                                sizeof(option)))
3906                return -EFAULT;
3907
3908        if (!f2fs_compressed_file(inode) ||
3909                        option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3910                        option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3911                        option.algorithm >= COMPRESS_MAX)
3912                return -EINVAL;
3913
3914        file_start_write(filp);
3915        inode_lock(inode);
3916
3917        if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3918                ret = -EBUSY;
3919                goto out;
3920        }
3921
3922        if (inode->i_size != 0) {
3923                ret = -EFBIG;
3924                goto out;
3925        }
3926
3927        F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3928        F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3929        F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3930        f2fs_mark_inode_dirty_sync(inode, true);
3931
3932        if (!f2fs_is_compress_backend_ready(inode))
3933                f2fs_warn(sbi, "compression algorithm is successfully set, "
3934                        "but current kernel doesn't support this algorithm.");
3935out:
3936        inode_unlock(inode);
3937        file_end_write(filp);
3938
3939        return ret;
3940}
3941
3942static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3943{
3944        DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3945        struct address_space *mapping = inode->i_mapping;
3946        struct page *page;
3947        pgoff_t redirty_idx = page_idx;
3948        int i, page_len = 0, ret = 0;
3949
3950        page_cache_ra_unbounded(&ractl, len, 0);
3951
3952        for (i = 0; i < len; i++, page_idx++) {
3953                page = read_cache_page(mapping, page_idx, NULL, NULL);
3954                if (IS_ERR(page)) {
3955                        ret = PTR_ERR(page);
3956                        break;
3957                }
3958                page_len++;
3959        }
3960
3961        for (i = 0; i < page_len; i++, redirty_idx++) {
3962                page = find_lock_page(mapping, redirty_idx);
3963                if (!page) {
3964                        ret = -ENOMEM;
3965                        break;
3966                }
3967                set_page_dirty(page);
3968                f2fs_put_page(page, 1);
3969                f2fs_put_page(page, 0);
3970        }
3971
3972        return ret;
3973}
3974
3975static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3976{
3977        struct inode *inode = file_inode(filp);
3978        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3979        struct f2fs_inode_info *fi = F2FS_I(inode);
3980        pgoff_t page_idx = 0, last_idx;
3981        unsigned int blk_per_seg = sbi->blocks_per_seg;
3982        int cluster_size = F2FS_I(inode)->i_cluster_size;
3983        int count, ret;
3984
3985        if (!f2fs_sb_has_compression(sbi) ||
3986                        F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3987                return -EOPNOTSUPP;
3988
3989        if (!(filp->f_mode & FMODE_WRITE))
3990                return -EBADF;
3991
3992        if (!f2fs_compressed_file(inode))
3993                return -EINVAL;
3994
3995        f2fs_balance_fs(F2FS_I_SB(inode), true);
3996
3997        file_start_write(filp);
3998        inode_lock(inode);
3999
4000        if (!f2fs_is_compress_backend_ready(inode)) {
4001                ret = -EOPNOTSUPP;
4002                goto out;
4003        }
4004
4005        if (f2fs_is_mmap_file(inode)) {
4006                ret = -EBUSY;
4007                goto out;
4008        }
4009
4010        ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4011        if (ret)
4012                goto out;
4013
4014        if (!atomic_read(&fi->i_compr_blocks))
4015                goto out;
4016
4017        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4018
4019        count = last_idx - page_idx;
4020        while (count) {
4021                int len = min(cluster_size, count);
4022
4023                ret = redirty_blocks(inode, page_idx, len);
4024                if (ret < 0)
4025                        break;
4026
4027                if (get_dirty_pages(inode) >= blk_per_seg)
4028                        filemap_fdatawrite(inode->i_mapping);
4029
4030                count -= len;
4031                page_idx += len;
4032        }
4033
4034        if (!ret)
4035                ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4036                                                        LLONG_MAX);
4037
4038        if (ret)
4039                f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4040                          __func__, ret);
4041out:
4042        inode_unlock(inode);
4043        file_end_write(filp);
4044
4045        return ret;
4046}
4047
4048static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4049{
4050        struct inode *inode = file_inode(filp);
4051        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4052        pgoff_t page_idx = 0, last_idx;
4053        unsigned int blk_per_seg = sbi->blocks_per_seg;
4054        int cluster_size = F2FS_I(inode)->i_cluster_size;
4055        int count, ret;
4056
4057        if (!f2fs_sb_has_compression(sbi) ||
4058                        F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4059                return -EOPNOTSUPP;
4060
4061        if (!(filp->f_mode & FMODE_WRITE))
4062                return -EBADF;
4063
4064        if (!f2fs_compressed_file(inode))
4065                return -EINVAL;
4066
4067        f2fs_balance_fs(F2FS_I_SB(inode), true);
4068
4069        file_start_write(filp);
4070        inode_lock(inode);
4071
4072        if (!f2fs_is_compress_backend_ready(inode)) {
4073                ret = -EOPNOTSUPP;
4074                goto out;
4075        }
4076
4077        if (f2fs_is_mmap_file(inode)) {
4078                ret = -EBUSY;
4079                goto out;
4080        }
4081
4082        ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4083        if (ret)
4084                goto out;
4085
4086        set_inode_flag(inode, FI_ENABLE_COMPRESS);
4087
4088        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4089
4090        count = last_idx - page_idx;
4091        while (count) {
4092                int len = min(cluster_size, count);
4093
4094                ret = redirty_blocks(inode, page_idx, len);
4095                if (ret < 0)
4096                        break;
4097
4098                if (get_dirty_pages(inode) >= blk_per_seg)
4099                        filemap_fdatawrite(inode->i_mapping);
4100
4101                count -= len;
4102                page_idx += len;
4103        }
4104
4105        if (!ret)
4106                ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4107                                                        LLONG_MAX);
4108
4109        clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4110
4111        if (ret)
4112                f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4113                          __func__, ret);
4114out:
4115        inode_unlock(inode);
4116        file_end_write(filp);
4117
4118        return ret;
4119}
4120
4121static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4122{
4123        switch (cmd) {
4124        case FS_IOC_GETVERSION:
4125                return f2fs_ioc_getversion(filp, arg);
4126        case F2FS_IOC_START_ATOMIC_WRITE:
4127                return f2fs_ioc_start_atomic_write(filp);
4128        case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4129                return f2fs_ioc_commit_atomic_write(filp);
4130        case F2FS_IOC_START_VOLATILE_WRITE:
4131                return f2fs_ioc_start_volatile_write(filp);
4132        case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4133                return f2fs_ioc_release_volatile_write(filp);
4134        case F2FS_IOC_ABORT_VOLATILE_WRITE:
4135                return f2fs_ioc_abort_volatile_write(filp);
4136        case F2FS_IOC_SHUTDOWN:
4137                return f2fs_ioc_shutdown(filp, arg);
4138        case FITRIM:
4139                return f2fs_ioc_fitrim(filp, arg);
4140        case FS_IOC_SET_ENCRYPTION_POLICY:
4141                return f2fs_ioc_set_encryption_policy(filp, arg);
4142        case FS_IOC_GET_ENCRYPTION_POLICY:
4143                return f2fs_ioc_get_encryption_policy(filp, arg);
4144        case FS_IOC_GET_ENCRYPTION_PWSALT:
4145                return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4146        case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4147                return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4148        case FS_IOC_ADD_ENCRYPTION_KEY:
4149                return f2fs_ioc_add_encryption_key(filp, arg);
4150        case FS_IOC_REMOVE_ENCRYPTION_KEY:
4151                return f2fs_ioc_remove_encryption_key(filp, arg);
4152        case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4153                return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4154        case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4155                return f2fs_ioc_get_encryption_key_status(filp, arg);
4156        case FS_IOC_GET_ENCRYPTION_NONCE:
4157                return f2fs_ioc_get_encryption_nonce(filp, arg);
4158        case F2FS_IOC_GARBAGE_COLLECT:
4159                return f2fs_ioc_gc(filp, arg);
4160        case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4161                return f2fs_ioc_gc_range(filp, arg);
4162        case F2FS_IOC_WRITE_CHECKPOINT:
4163                return f2fs_ioc_write_checkpoint(filp, arg);
4164        case F2FS_IOC_DEFRAGMENT:
4165                return f2fs_ioc_defragment(filp, arg);
4166        case F2FS_IOC_MOVE_RANGE:
4167                return f2fs_ioc_move_range(filp, arg);
4168        case F2FS_IOC_FLUSH_DEVICE:
4169                return f2fs_ioc_flush_device(filp, arg);
4170        case F2FS_IOC_GET_FEATURES:
4171                return f2fs_ioc_get_features(filp, arg);
4172        case F2FS_IOC_GET_PIN_FILE:
4173                return f2fs_ioc_get_pin_file(filp, arg);
4174        case F2FS_IOC_SET_PIN_FILE:
4175                return f2fs_ioc_set_pin_file(filp, arg);
4176        case F2FS_IOC_PRECACHE_EXTENTS:
4177                return f2fs_ioc_precache_extents(filp, arg);
4178        case F2FS_IOC_RESIZE_FS:
4179                return f2fs_ioc_resize_fs(filp, arg);
4180        case FS_IOC_ENABLE_VERITY:
4181                return f2fs_ioc_enable_verity(filp, arg);
4182        case FS_IOC_MEASURE_VERITY:
4183                return f2fs_ioc_measure_verity(filp, arg);
4184        case FS_IOC_READ_VERITY_METADATA:
4185                return f2fs_ioc_read_verity_metadata(filp, arg);
4186        case FS_IOC_GETFSLABEL:
4187                return f2fs_ioc_getfslabel(filp, arg);
4188        case FS_IOC_SETFSLABEL:
4189                return f2fs_ioc_setfslabel(filp, arg);
4190        case F2FS_IOC_GET_COMPRESS_BLOCKS:
4191                return f2fs_get_compress_blocks(filp, arg);
4192        case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4193                return f2fs_release_compress_blocks(filp, arg);
4194        case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4195                return f2fs_reserve_compress_blocks(filp, arg);
4196        case F2FS_IOC_SEC_TRIM_FILE:
4197                return f2fs_sec_trim_file(filp, arg);
4198        case F2FS_IOC_GET_COMPRESS_OPTION:
4199                return f2fs_ioc_get_compress_option(filp, arg);
4200        case F2FS_IOC_SET_COMPRESS_OPTION:
4201                return f2fs_ioc_set_compress_option(filp, arg);
4202        case F2FS_IOC_DECOMPRESS_FILE:
4203                return f2fs_ioc_decompress_file(filp, arg);
4204        case F2FS_IOC_COMPRESS_FILE:
4205                return f2fs_ioc_compress_file(filp, arg);
4206        default:
4207                return -ENOTTY;
4208        }
4209}
4210
4211long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4212{
4213        if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4214                return -EIO;
4215        if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4216                return -ENOSPC;
4217
4218        return __f2fs_ioctl(filp, cmd, arg);
4219}
4220
4221static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4222{
4223        struct file *file = iocb->ki_filp;
4224        struct inode *inode = file_inode(file);
4225        int ret;
4226
4227        if (!f2fs_is_compress_backend_ready(inode))
4228                return -EOPNOTSUPP;
4229
4230        ret = generic_file_read_iter(iocb, iter);
4231
4232        if (ret > 0)
4233                f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4234
4235        return ret;
4236}
4237
4238static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4239{
4240        struct file *file = iocb->ki_filp;
4241        struct inode *inode = file_inode(file);
4242        ssize_t ret;
4243
4244        if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4245                ret = -EIO;
4246                goto out;
4247        }
4248
4249        if (!f2fs_is_compress_backend_ready(inode)) {
4250                ret = -EOPNOTSUPP;
4251                goto out;
4252        }
4253
4254        if (iocb->ki_flags & IOCB_NOWAIT) {
4255                if (!inode_trylock(inode)) {
4256                        ret = -EAGAIN;
4257                        goto out;
4258                }
4259        } else {
4260                inode_lock(inode);
4261        }
4262
4263        if (unlikely(IS_IMMUTABLE(inode))) {
4264                ret = -EPERM;
4265                goto unlock;
4266        }
4267
4268        if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4269                ret = -EPERM;
4270                goto unlock;
4271        }
4272
4273        ret = generic_write_checks(iocb, from);
4274        if (ret > 0) {
4275                bool preallocated = false;
4276                size_t target_size = 0;
4277                int err;
4278
4279                if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4280                        set_inode_flag(inode, FI_NO_PREALLOC);
4281
4282                if ((iocb->ki_flags & IOCB_NOWAIT)) {
4283                        if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4284                                                iov_iter_count(from)) ||
4285                                f2fs_has_inline_data(inode) ||
4286                                f2fs_force_buffered_io(inode, iocb, from)) {
4287                                clear_inode_flag(inode, FI_NO_PREALLOC);
4288                                inode_unlock(inode);
4289                                ret = -EAGAIN;
4290                                goto out;
4291                        }
4292                        goto write;
4293                }
4294
4295                if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4296                        goto write;
4297
4298                if (iocb->ki_flags & IOCB_DIRECT) {
4299                        /*
4300                         * Convert inline data for Direct I/O before entering
4301                         * f2fs_direct_IO().
4302                         */
4303                        err = f2fs_convert_inline_inode(inode);
4304                        if (err)
4305                                goto out_err;
4306                        /*
4307                         * If force_buffere_io() is true, we have to allocate
4308                         * blocks all the time, since f2fs_direct_IO will fall
4309                         * back to buffered IO.
4310                         */
4311                        if (!f2fs_force_buffered_io(inode, iocb, from) &&
4312                                        f2fs_lfs_mode(F2FS_I_SB(inode)))
4313                                goto write;
4314                }
4315                preallocated = true;
4316                target_size = iocb->ki_pos + iov_iter_count(from);
4317
4318                err = f2fs_preallocate_blocks(iocb, from);
4319                if (err) {
4320out_err:
4321                        clear_inode_flag(inode, FI_NO_PREALLOC);
4322                        inode_unlock(inode);
4323                        ret = err;
4324                        goto out;
4325                }
4326write:
4327                ret = __generic_file_write_iter(iocb, from);
4328                clear_inode_flag(inode, FI_NO_PREALLOC);
4329
4330                /* if we couldn't write data, we should deallocate blocks. */
4331                if (preallocated && i_size_read(inode) < target_size) {
4332                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4333                        filemap_invalidate_lock(inode->i_mapping);
4334                        f2fs_truncate(inode);
4335                        filemap_invalidate_unlock(inode->i_mapping);
4336                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4337                }
4338
4339                if (ret > 0)
4340                        f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4341        }
4342unlock:
4343        inode_unlock(inode);
4344out:
4345        trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4346                                        iov_iter_count(from), ret);
4347        if (ret > 0)
4348                ret = generic_write_sync(iocb, ret);
4349        return ret;
4350}
4351
4352static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4353                int advice)
4354{
4355        struct inode *inode;
4356        struct address_space *mapping;
4357        struct backing_dev_info *bdi;
4358
4359        if (advice == POSIX_FADV_SEQUENTIAL) {
4360                inode = file_inode(filp);
4361                if (S_ISFIFO(inode->i_mode))
4362                        return -ESPIPE;
4363
4364                mapping = filp->f_mapping;
4365                if (!mapping || len < 0)
4366                        return -EINVAL;
4367
4368                bdi = inode_to_bdi(mapping->host);
4369                filp->f_ra.ra_pages = bdi->ra_pages *
4370                        F2FS_I_SB(inode)->seq_file_ra_mul;
4371                spin_lock(&filp->f_lock);
4372                filp->f_mode &= ~FMODE_RANDOM;
4373                spin_unlock(&filp->f_lock);
4374                return 0;
4375        }
4376
4377        return generic_fadvise(filp, offset, len, advice);
4378}
4379
4380#ifdef CONFIG_COMPAT
4381struct compat_f2fs_gc_range {
4382        u32 sync;
4383        compat_u64 start;
4384        compat_u64 len;
4385};
4386#define F2FS_IOC32_GARBAGE_COLLECT_RANGE        _IOW(F2FS_IOCTL_MAGIC, 11,\
4387                                                struct compat_f2fs_gc_range)
4388
4389static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4390{
4391        struct compat_f2fs_gc_range __user *urange;
4392        struct f2fs_gc_range range;
4393        int err;
4394
4395        urange = compat_ptr(arg);
4396        err = get_user(range.sync, &urange->sync);
4397        err |= get_user(range.start, &urange->start);
4398        err |= get_user(range.len, &urange->len);
4399        if (err)
4400                return -EFAULT;
4401
4402        return __f2fs_ioc_gc_range(file, &range);
4403}
4404
4405struct compat_f2fs_move_range {
4406        u32 dst_fd;
4407        compat_u64 pos_in;
4408        compat_u64 pos_out;
4409        compat_u64 len;
4410};
4411#define F2FS_IOC32_MOVE_RANGE           _IOWR(F2FS_IOCTL_MAGIC, 9,      \
4412                                        struct compat_f2fs_move_range)
4413
4414static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4415{
4416        struct compat_f2fs_move_range __user *urange;
4417        struct f2fs_move_range range;
4418        int err;
4419
4420        urange = compat_ptr(arg);
4421        err = get_user(range.dst_fd, &urange->dst_fd);
4422        err |= get_user(range.pos_in, &urange->pos_in);
4423        err |= get_user(range.pos_out, &urange->pos_out);
4424        err |= get_user(range.len, &urange->len);
4425        if (err)
4426                return -EFAULT;
4427
4428        return __f2fs_ioc_move_range(file, &range);
4429}
4430
4431long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4432{
4433        if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4434                return -EIO;
4435        if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4436                return -ENOSPC;
4437
4438        switch (cmd) {
4439        case FS_IOC32_GETVERSION:
4440                cmd = FS_IOC_GETVERSION;
4441                break;
4442        case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4443                return f2fs_compat_ioc_gc_range(file, arg);
4444        case F2FS_IOC32_MOVE_RANGE:
4445                return f2fs_compat_ioc_move_range(file, arg);
4446        case F2FS_IOC_START_ATOMIC_WRITE:
4447        case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4448        case F2FS_IOC_START_VOLATILE_WRITE:
4449        case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4450        case F2FS_IOC_ABORT_VOLATILE_WRITE:
4451        case F2FS_IOC_SHUTDOWN:
4452        case FITRIM:
4453        case FS_IOC_SET_ENCRYPTION_POLICY:
4454        case FS_IOC_GET_ENCRYPTION_PWSALT:
4455        case FS_IOC_GET_ENCRYPTION_POLICY:
4456        case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4457        case FS_IOC_ADD_ENCRYPTION_KEY:
4458        case FS_IOC_REMOVE_ENCRYPTION_KEY:
4459        case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4460        case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4461        case FS_IOC_GET_ENCRYPTION_NONCE:
4462        case F2FS_IOC_GARBAGE_COLLECT:
4463        case F2FS_IOC_WRITE_CHECKPOINT:
4464        case F2FS_IOC_DEFRAGMENT:
4465        case F2FS_IOC_FLUSH_DEVICE:
4466        case F2FS_IOC_GET_FEATURES:
4467        case F2FS_IOC_GET_PIN_FILE:
4468        case F2FS_IOC_SET_PIN_FILE:
4469        case F2FS_IOC_PRECACHE_EXTENTS:
4470        case F2FS_IOC_RESIZE_FS:
4471        case FS_IOC_ENABLE_VERITY:
4472        case FS_IOC_MEASURE_VERITY:
4473        case FS_IOC_READ_VERITY_METADATA:
4474        case FS_IOC_GETFSLABEL:
4475        case FS_IOC_SETFSLABEL:
4476        case F2FS_IOC_GET_COMPRESS_BLOCKS:
4477        case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4478        case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4479        case F2FS_IOC_SEC_TRIM_FILE:
4480        case F2FS_IOC_GET_COMPRESS_OPTION:
4481        case F2FS_IOC_SET_COMPRESS_OPTION:
4482        case F2FS_IOC_DECOMPRESS_FILE:
4483        case F2FS_IOC_COMPRESS_FILE:
4484                break;
4485        default:
4486                return -ENOIOCTLCMD;
4487        }
4488        return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4489}
4490#endif
4491
4492const struct file_operations f2fs_file_operations = {
4493        .llseek         = f2fs_llseek,
4494        .read_iter      = f2fs_file_read_iter,
4495        .write_iter     = f2fs_file_write_iter,
4496        .open           = f2fs_file_open,
4497        .release        = f2fs_release_file,
4498        .mmap           = f2fs_file_mmap,
4499        .flush          = f2fs_file_flush,
4500        .fsync          = f2fs_sync_file,
4501        .fallocate      = f2fs_fallocate,
4502        .unlocked_ioctl = f2fs_ioctl,
4503#ifdef CONFIG_COMPAT
4504        .compat_ioctl   = f2fs_compat_ioctl,
4505#endif
4506        .splice_read    = generic_file_splice_read,
4507        .splice_write   = iter_file_splice_write,
4508        .fadvise        = f2fs_file_fadvise,
4509};
4510