linux/fs/f2fs/file.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/file.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/stat.h>
  14#include <linux/buffer_head.h>
  15#include <linux/writeback.h>
  16#include <linux/blkdev.h>
  17#include <linux/falloc.h>
  18#include <linux/types.h>
  19#include <linux/compat.h>
  20#include <linux/uaccess.h>
  21#include <linux/mount.h>
  22
  23#include "f2fs.h"
  24#include "node.h"
  25#include "segment.h"
  26#include "xattr.h"
  27#include "acl.h"
  28#include <trace/events/f2fs.h>
  29
  30static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
  31                                                struct vm_fault *vmf)
  32{
  33        struct page *page = vmf->page;
  34        struct inode *inode = file_inode(vma->vm_file);
  35        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  36        block_t old_blk_addr;
  37        struct dnode_of_data dn;
  38        int err, ilock;
  39
  40        f2fs_balance_fs(sbi);
  41
  42        sb_start_pagefault(inode->i_sb);
  43
  44        /* block allocation */
  45        ilock = mutex_lock_op(sbi);
  46        set_new_dnode(&dn, inode, NULL, NULL, 0);
  47        err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
  48        if (err) {
  49                mutex_unlock_op(sbi, ilock);
  50                goto out;
  51        }
  52
  53        old_blk_addr = dn.data_blkaddr;
  54
  55        if (old_blk_addr == NULL_ADDR) {
  56                err = reserve_new_block(&dn);
  57                if (err) {
  58                        f2fs_put_dnode(&dn);
  59                        mutex_unlock_op(sbi, ilock);
  60                        goto out;
  61                }
  62        }
  63        f2fs_put_dnode(&dn);
  64        mutex_unlock_op(sbi, ilock);
  65
  66        file_update_time(vma->vm_file);
  67        lock_page(page);
  68        if (page->mapping != inode->i_mapping ||
  69                        page_offset(page) > i_size_read(inode) ||
  70                        !PageUptodate(page)) {
  71                unlock_page(page);
  72                err = -EFAULT;
  73                goto out;
  74        }
  75
  76        /*
  77         * check to see if the page is mapped already (no holes)
  78         */
  79        if (PageMappedToDisk(page))
  80                goto mapped;
  81
  82        /* page is wholly or partially inside EOF */
  83        if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
  84                unsigned offset;
  85                offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
  86                zero_user_segment(page, offset, PAGE_CACHE_SIZE);
  87        }
  88        set_page_dirty(page);
  89        SetPageUptodate(page);
  90
  91mapped:
  92        /* fill the page */
  93        wait_on_page_writeback(page);
  94out:
  95        sb_end_pagefault(inode->i_sb);
  96        return block_page_mkwrite_return(err);
  97}
  98
  99static const struct vm_operations_struct f2fs_file_vm_ops = {
 100        .fault          = filemap_fault,
 101        .page_mkwrite   = f2fs_vm_page_mkwrite,
 102        .remap_pages    = generic_file_remap_pages,
 103};
 104
 105static int get_parent_ino(struct inode *inode, nid_t *pino)
 106{
 107        struct dentry *dentry;
 108
 109        inode = igrab(inode);
 110        dentry = d_find_any_alias(inode);
 111        iput(inode);
 112        if (!dentry)
 113                return 0;
 114
 115        if (update_dent_inode(inode, &dentry->d_name)) {
 116                dput(dentry);
 117                return 0;
 118        }
 119
 120        *pino = parent_ino(dentry);
 121        dput(dentry);
 122        return 1;
 123}
 124
 125int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 126{
 127        struct inode *inode = file->f_mapping->host;
 128        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 129        int ret = 0;
 130        bool need_cp = false;
 131        struct writeback_control wbc = {
 132                .sync_mode = WB_SYNC_ALL,
 133                .nr_to_write = LONG_MAX,
 134                .for_reclaim = 0,
 135        };
 136
 137        if (f2fs_readonly(inode->i_sb))
 138                return 0;
 139
 140        trace_f2fs_sync_file_enter(inode);
 141        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
 142        if (ret) {
 143                trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
 144                return ret;
 145        }
 146
 147        /* guarantee free sections for fsync */
 148        f2fs_balance_fs(sbi);
 149
 150        mutex_lock(&inode->i_mutex);
 151
 152        /*
 153         * Both of fdatasync() and fsync() are able to be recovered from
 154         * sudden-power-off.
 155         */
 156        if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
 157                need_cp = true;
 158        else if (file_wrong_pino(inode))
 159                need_cp = true;
 160        else if (!space_for_roll_forward(sbi))
 161                need_cp = true;
 162        else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
 163                need_cp = true;
 164        else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
 165                need_cp = true;
 166
 167        if (need_cp) {
 168                nid_t pino;
 169
 170                F2FS_I(inode)->xattr_ver = 0;
 171
 172                /* all the dirty node pages should be flushed for POR */
 173                ret = f2fs_sync_fs(inode->i_sb, 1);
 174                if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
 175                                        get_parent_ino(inode, &pino)) {
 176                        F2FS_I(inode)->i_pino = pino;
 177                        file_got_pino(inode);
 178                        mark_inode_dirty_sync(inode);
 179                        ret = f2fs_write_inode(inode, NULL);
 180                        if (ret)
 181                                goto out;
 182                }
 183        } else {
 184                /* if there is no written node page, write its inode page */
 185                while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
 186                        mark_inode_dirty_sync(inode);
 187                        ret = f2fs_write_inode(inode, NULL);
 188                        if (ret)
 189                                goto out;
 190                }
 191                filemap_fdatawait_range(sbi->node_inode->i_mapping,
 192                                                        0, LONG_MAX);
 193                ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
 194        }
 195out:
 196        mutex_unlock(&inode->i_mutex);
 197        trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
 198        return ret;
 199}
 200
 201static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
 202{
 203        file_accessed(file);
 204        vma->vm_ops = &f2fs_file_vm_ops;
 205        return 0;
 206}
 207
 208int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 209{
 210        int nr_free = 0, ofs = dn->ofs_in_node;
 211        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 212        struct f2fs_node *raw_node;
 213        __le32 *addr;
 214
 215        raw_node = F2FS_NODE(dn->node_page);
 216        addr = blkaddr_in_node(raw_node) + ofs;
 217
 218        for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
 219                block_t blkaddr = le32_to_cpu(*addr);
 220                if (blkaddr == NULL_ADDR)
 221                        continue;
 222
 223                update_extent_cache(NULL_ADDR, dn);
 224                invalidate_blocks(sbi, blkaddr);
 225                nr_free++;
 226        }
 227        if (nr_free) {
 228                dec_valid_block_count(sbi, dn->inode, nr_free);
 229                set_page_dirty(dn->node_page);
 230                sync_inode_page(dn);
 231        }
 232        dn->ofs_in_node = ofs;
 233
 234        trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
 235                                         dn->ofs_in_node, nr_free);
 236        return nr_free;
 237}
 238
 239void truncate_data_blocks(struct dnode_of_data *dn)
 240{
 241        truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
 242}
 243
 244static void truncate_partial_data_page(struct inode *inode, u64 from)
 245{
 246        unsigned offset = from & (PAGE_CACHE_SIZE - 1);
 247        struct page *page;
 248
 249        if (!offset)
 250                return;
 251
 252        page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
 253        if (IS_ERR(page))
 254                return;
 255
 256        lock_page(page);
 257        if (page->mapping != inode->i_mapping) {
 258                f2fs_put_page(page, 1);
 259                return;
 260        }
 261        wait_on_page_writeback(page);
 262        zero_user(page, offset, PAGE_CACHE_SIZE - offset);
 263        set_page_dirty(page);
 264        f2fs_put_page(page, 1);
 265}
 266
 267static int truncate_blocks(struct inode *inode, u64 from)
 268{
 269        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 270        unsigned int blocksize = inode->i_sb->s_blocksize;
 271        struct dnode_of_data dn;
 272        pgoff_t free_from;
 273        int count = 0, ilock = -1;
 274        int err;
 275
 276        trace_f2fs_truncate_blocks_enter(inode, from);
 277
 278        free_from = (pgoff_t)
 279                        ((from + blocksize - 1) >> (sbi->log_blocksize));
 280
 281        ilock = mutex_lock_op(sbi);
 282        set_new_dnode(&dn, inode, NULL, NULL, 0);
 283        err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
 284        if (err) {
 285                if (err == -ENOENT)
 286                        goto free_next;
 287                mutex_unlock_op(sbi, ilock);
 288                trace_f2fs_truncate_blocks_exit(inode, err);
 289                return err;
 290        }
 291
 292        if (IS_INODE(dn.node_page))
 293                count = ADDRS_PER_INODE(F2FS_I(inode));
 294        else
 295                count = ADDRS_PER_BLOCK;
 296
 297        count -= dn.ofs_in_node;
 298        BUG_ON(count < 0);
 299
 300        if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
 301                truncate_data_blocks_range(&dn, count);
 302                free_from += count;
 303        }
 304
 305        f2fs_put_dnode(&dn);
 306free_next:
 307        err = truncate_inode_blocks(inode, free_from);
 308        mutex_unlock_op(sbi, ilock);
 309
 310        /* lastly zero out the first data page */
 311        truncate_partial_data_page(inode, from);
 312
 313        trace_f2fs_truncate_blocks_exit(inode, err);
 314        return err;
 315}
 316
 317void f2fs_truncate(struct inode *inode)
 318{
 319        if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 320                                S_ISLNK(inode->i_mode)))
 321                return;
 322
 323        trace_f2fs_truncate(inode);
 324
 325        if (!truncate_blocks(inode, i_size_read(inode))) {
 326                inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 327                mark_inode_dirty(inode);
 328        }
 329}
 330
 331int f2fs_getattr(struct vfsmount *mnt,
 332                         struct dentry *dentry, struct kstat *stat)
 333{
 334        struct inode *inode = dentry->d_inode;
 335        generic_fillattr(inode, stat);
 336        stat->blocks <<= 3;
 337        return 0;
 338}
 339
 340#ifdef CONFIG_F2FS_FS_POSIX_ACL
 341static void __setattr_copy(struct inode *inode, const struct iattr *attr)
 342{
 343        struct f2fs_inode_info *fi = F2FS_I(inode);
 344        unsigned int ia_valid = attr->ia_valid;
 345
 346        if (ia_valid & ATTR_UID)
 347                inode->i_uid = attr->ia_uid;
 348        if (ia_valid & ATTR_GID)
 349                inode->i_gid = attr->ia_gid;
 350        if (ia_valid & ATTR_ATIME)
 351                inode->i_atime = timespec_trunc(attr->ia_atime,
 352                                                inode->i_sb->s_time_gran);
 353        if (ia_valid & ATTR_MTIME)
 354                inode->i_mtime = timespec_trunc(attr->ia_mtime,
 355                                                inode->i_sb->s_time_gran);
 356        if (ia_valid & ATTR_CTIME)
 357                inode->i_ctime = timespec_trunc(attr->ia_ctime,
 358                                                inode->i_sb->s_time_gran);
 359        if (ia_valid & ATTR_MODE) {
 360                umode_t mode = attr->ia_mode;
 361
 362                if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
 363                        mode &= ~S_ISGID;
 364                set_acl_inode(fi, mode);
 365        }
 366}
 367#else
 368#define __setattr_copy setattr_copy
 369#endif
 370
 371int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
 372{
 373        struct inode *inode = dentry->d_inode;
 374        struct f2fs_inode_info *fi = F2FS_I(inode);
 375        int err;
 376
 377        err = inode_change_ok(inode, attr);
 378        if (err)
 379                return err;
 380
 381        if ((attr->ia_valid & ATTR_SIZE) &&
 382                        attr->ia_size != i_size_read(inode)) {
 383                truncate_setsize(inode, attr->ia_size);
 384                f2fs_truncate(inode);
 385                f2fs_balance_fs(F2FS_SB(inode->i_sb));
 386        }
 387
 388        __setattr_copy(inode, attr);
 389
 390        if (attr->ia_valid & ATTR_MODE) {
 391                err = f2fs_acl_chmod(inode);
 392                if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
 393                        inode->i_mode = fi->i_acl_mode;
 394                        clear_inode_flag(fi, FI_ACL_MODE);
 395                }
 396        }
 397
 398        mark_inode_dirty(inode);
 399        return err;
 400}
 401
 402const struct inode_operations f2fs_file_inode_operations = {
 403        .getattr        = f2fs_getattr,
 404        .setattr        = f2fs_setattr,
 405        .get_acl        = f2fs_get_acl,
 406#ifdef CONFIG_F2FS_FS_XATTR
 407        .setxattr       = generic_setxattr,
 408        .getxattr       = generic_getxattr,
 409        .listxattr      = f2fs_listxattr,
 410        .removexattr    = generic_removexattr,
 411#endif
 412};
 413
 414static void fill_zero(struct inode *inode, pgoff_t index,
 415                                        loff_t start, loff_t len)
 416{
 417        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 418        struct page *page;
 419        int ilock;
 420
 421        if (!len)
 422                return;
 423
 424        f2fs_balance_fs(sbi);
 425
 426        ilock = mutex_lock_op(sbi);
 427        page = get_new_data_page(inode, NULL, index, false);
 428        mutex_unlock_op(sbi, ilock);
 429
 430        if (!IS_ERR(page)) {
 431                wait_on_page_writeback(page);
 432                zero_user(page, start, len);
 433                set_page_dirty(page);
 434                f2fs_put_page(page, 1);
 435        }
 436}
 437
 438int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
 439{
 440        pgoff_t index;
 441        int err;
 442
 443        for (index = pg_start; index < pg_end; index++) {
 444                struct dnode_of_data dn;
 445
 446                set_new_dnode(&dn, inode, NULL, NULL, 0);
 447                err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 448                if (err) {
 449                        if (err == -ENOENT)
 450                                continue;
 451                        return err;
 452                }
 453
 454                if (dn.data_blkaddr != NULL_ADDR)
 455                        truncate_data_blocks_range(&dn, 1);
 456                f2fs_put_dnode(&dn);
 457        }
 458        return 0;
 459}
 460
 461static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
 462{
 463        pgoff_t pg_start, pg_end;
 464        loff_t off_start, off_end;
 465        int ret = 0;
 466
 467        pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
 468        pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
 469
 470        off_start = offset & (PAGE_CACHE_SIZE - 1);
 471        off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 472
 473        if (pg_start == pg_end) {
 474                fill_zero(inode, pg_start, off_start,
 475                                                off_end - off_start);
 476        } else {
 477                if (off_start)
 478                        fill_zero(inode, pg_start++, off_start,
 479                                        PAGE_CACHE_SIZE - off_start);
 480                if (off_end)
 481                        fill_zero(inode, pg_end, 0, off_end);
 482
 483                if (pg_start < pg_end) {
 484                        struct address_space *mapping = inode->i_mapping;
 485                        loff_t blk_start, blk_end;
 486                        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 487                        int ilock;
 488
 489                        f2fs_balance_fs(sbi);
 490
 491                        blk_start = pg_start << PAGE_CACHE_SHIFT;
 492                        blk_end = pg_end << PAGE_CACHE_SHIFT;
 493                        truncate_inode_pages_range(mapping, blk_start,
 494                                        blk_end - 1);
 495
 496                        ilock = mutex_lock_op(sbi);
 497                        ret = truncate_hole(inode, pg_start, pg_end);
 498                        mutex_unlock_op(sbi, ilock);
 499                }
 500        }
 501
 502        if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 503                i_size_read(inode) <= (offset + len)) {
 504                i_size_write(inode, offset);
 505                mark_inode_dirty(inode);
 506        }
 507
 508        return ret;
 509}
 510
 511static int expand_inode_data(struct inode *inode, loff_t offset,
 512                                        loff_t len, int mode)
 513{
 514        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 515        pgoff_t index, pg_start, pg_end;
 516        loff_t new_size = i_size_read(inode);
 517        loff_t off_start, off_end;
 518        int ret = 0;
 519
 520        ret = inode_newsize_ok(inode, (len + offset));
 521        if (ret)
 522                return ret;
 523
 524        pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
 525        pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
 526
 527        off_start = offset & (PAGE_CACHE_SIZE - 1);
 528        off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 529
 530        for (index = pg_start; index <= pg_end; index++) {
 531                struct dnode_of_data dn;
 532                int ilock;
 533
 534                ilock = mutex_lock_op(sbi);
 535                set_new_dnode(&dn, inode, NULL, NULL, 0);
 536                ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
 537                if (ret) {
 538                        mutex_unlock_op(sbi, ilock);
 539                        break;
 540                }
 541
 542                if (dn.data_blkaddr == NULL_ADDR) {
 543                        ret = reserve_new_block(&dn);
 544                        if (ret) {
 545                                f2fs_put_dnode(&dn);
 546                                mutex_unlock_op(sbi, ilock);
 547                                break;
 548                        }
 549                }
 550                f2fs_put_dnode(&dn);
 551                mutex_unlock_op(sbi, ilock);
 552
 553                if (pg_start == pg_end)
 554                        new_size = offset + len;
 555                else if (index == pg_start && off_start)
 556                        new_size = (index + 1) << PAGE_CACHE_SHIFT;
 557                else if (index == pg_end)
 558                        new_size = (index << PAGE_CACHE_SHIFT) + off_end;
 559                else
 560                        new_size += PAGE_CACHE_SIZE;
 561        }
 562
 563        if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 564                i_size_read(inode) < new_size) {
 565                i_size_write(inode, new_size);
 566                mark_inode_dirty(inode);
 567        }
 568
 569        return ret;
 570}
 571
 572static long f2fs_fallocate(struct file *file, int mode,
 573                                loff_t offset, loff_t len)
 574{
 575        struct inode *inode = file_inode(file);
 576        long ret;
 577
 578        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
 579                return -EOPNOTSUPP;
 580
 581        if (mode & FALLOC_FL_PUNCH_HOLE)
 582                ret = punch_hole(inode, offset, len, mode);
 583        else
 584                ret = expand_inode_data(inode, offset, len, mode);
 585
 586        if (!ret) {
 587                inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 588                mark_inode_dirty(inode);
 589        }
 590        trace_f2fs_fallocate(inode, mode, offset, len, ret);
 591        return ret;
 592}
 593
 594#define F2FS_REG_FLMASK         (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
 595#define F2FS_OTHER_FLMASK       (FS_NODUMP_FL | FS_NOATIME_FL)
 596
 597static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
 598{
 599        if (S_ISDIR(mode))
 600                return flags;
 601        else if (S_ISREG(mode))
 602                return flags & F2FS_REG_FLMASK;
 603        else
 604                return flags & F2FS_OTHER_FLMASK;
 605}
 606
 607long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 608{
 609        struct inode *inode = file_inode(filp);
 610        struct f2fs_inode_info *fi = F2FS_I(inode);
 611        unsigned int flags;
 612        int ret;
 613
 614        switch (cmd) {
 615        case F2FS_IOC_GETFLAGS:
 616                flags = fi->i_flags & FS_FL_USER_VISIBLE;
 617                return put_user(flags, (int __user *) arg);
 618        case F2FS_IOC_SETFLAGS:
 619        {
 620                unsigned int oldflags;
 621
 622                ret = mnt_want_write_file(filp);
 623                if (ret)
 624                        return ret;
 625
 626                if (!inode_owner_or_capable(inode)) {
 627                        ret = -EACCES;
 628                        goto out;
 629                }
 630
 631                if (get_user(flags, (int __user *) arg)) {
 632                        ret = -EFAULT;
 633                        goto out;
 634                }
 635
 636                flags = f2fs_mask_flags(inode->i_mode, flags);
 637
 638                mutex_lock(&inode->i_mutex);
 639
 640                oldflags = fi->i_flags;
 641
 642                if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
 643                        if (!capable(CAP_LINUX_IMMUTABLE)) {
 644                                mutex_unlock(&inode->i_mutex);
 645                                ret = -EPERM;
 646                                goto out;
 647                        }
 648                }
 649
 650                flags = flags & FS_FL_USER_MODIFIABLE;
 651                flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
 652                fi->i_flags = flags;
 653                mutex_unlock(&inode->i_mutex);
 654
 655                f2fs_set_inode_flags(inode);
 656                inode->i_ctime = CURRENT_TIME;
 657                mark_inode_dirty(inode);
 658out:
 659                mnt_drop_write_file(filp);
 660                return ret;
 661        }
 662        default:
 663                return -ENOTTY;
 664        }
 665}
 666
 667#ifdef CONFIG_COMPAT
 668long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 669{
 670        switch (cmd) {
 671        case F2FS_IOC32_GETFLAGS:
 672                cmd = F2FS_IOC_GETFLAGS;
 673                break;
 674        case F2FS_IOC32_SETFLAGS:
 675                cmd = F2FS_IOC_SETFLAGS;
 676                break;
 677        default:
 678                return -ENOIOCTLCMD;
 679        }
 680        return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
 681}
 682#endif
 683
 684const struct file_operations f2fs_file_operations = {
 685        .llseek         = generic_file_llseek,
 686        .read           = do_sync_read,
 687        .write          = do_sync_write,
 688        .aio_read       = generic_file_aio_read,
 689        .aio_write      = generic_file_aio_write,
 690        .open           = generic_file_open,
 691        .mmap           = f2fs_file_mmap,
 692        .fsync          = f2fs_sync_file,
 693        .fallocate      = f2fs_fallocate,
 694        .unlocked_ioctl = f2fs_ioctl,
 695#ifdef CONFIG_COMPAT
 696        .compat_ioctl   = f2fs_compat_ioctl,
 697#endif
 698        .splice_read    = generic_file_splice_read,
 699        .splice_write   = generic_file_splice_write,
 700};
 701