linux/fs/f2fs/inode.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/inode.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/buffer_head.h>
  14#include <linux/backing-dev.h>
  15#include <linux/writeback.h>
  16
  17#include "f2fs.h"
  18#include "node.h"
  19
  20#include <trace/events/f2fs.h>
  21
  22void f2fs_mark_inode_dirty_sync(struct inode *inode)
  23{
  24        if (f2fs_inode_dirtied(inode))
  25                return;
  26        mark_inode_dirty_sync(inode);
  27}
  28
  29void f2fs_set_inode_flags(struct inode *inode)
  30{
  31        unsigned int flags = F2FS_I(inode)->i_flags;
  32        unsigned int new_fl = 0;
  33
  34        if (flags & FS_SYNC_FL)
  35                new_fl |= S_SYNC;
  36        if (flags & FS_APPEND_FL)
  37                new_fl |= S_APPEND;
  38        if (flags & FS_IMMUTABLE_FL)
  39                new_fl |= S_IMMUTABLE;
  40        if (flags & FS_NOATIME_FL)
  41                new_fl |= S_NOATIME;
  42        if (flags & FS_DIRSYNC_FL)
  43                new_fl |= S_DIRSYNC;
  44        inode_set_flags(inode, new_fl,
  45                        S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
  46        f2fs_mark_inode_dirty_sync(inode);
  47}
  48
  49static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
  50{
  51        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
  52                        S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
  53                if (ri->i_addr[0])
  54                        inode->i_rdev =
  55                                old_decode_dev(le32_to_cpu(ri->i_addr[0]));
  56                else
  57                        inode->i_rdev =
  58                                new_decode_dev(le32_to_cpu(ri->i_addr[1]));
  59        }
  60}
  61
  62static bool __written_first_block(struct f2fs_inode *ri)
  63{
  64        block_t addr = le32_to_cpu(ri->i_addr[0]);
  65
  66        if (addr != NEW_ADDR && addr != NULL_ADDR)
  67                return true;
  68        return false;
  69}
  70
  71static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
  72{
  73        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  74                if (old_valid_dev(inode->i_rdev)) {
  75                        ri->i_addr[0] =
  76                                cpu_to_le32(old_encode_dev(inode->i_rdev));
  77                        ri->i_addr[1] = 0;
  78                } else {
  79                        ri->i_addr[0] = 0;
  80                        ri->i_addr[1] =
  81                                cpu_to_le32(new_encode_dev(inode->i_rdev));
  82                        ri->i_addr[2] = 0;
  83                }
  84        }
  85}
  86
  87static void __recover_inline_status(struct inode *inode, struct page *ipage)
  88{
  89        void *inline_data = inline_data_addr(ipage);
  90        __le32 *start = inline_data;
  91        __le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);
  92
  93        while (start < end) {
  94                if (*start++) {
  95                        f2fs_wait_on_page_writeback(ipage, NODE, true);
  96
  97                        set_inode_flag(inode, FI_DATA_EXIST);
  98                        set_raw_inline(inode, F2FS_INODE(ipage));
  99                        set_page_dirty(ipage);
 100                        return;
 101                }
 102        }
 103        return;
 104}
 105
 106static int do_read_inode(struct inode *inode)
 107{
 108        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 109        struct f2fs_inode_info *fi = F2FS_I(inode);
 110        struct page *node_page;
 111        struct f2fs_inode *ri;
 112
 113        /* Check if ino is within scope */
 114        if (check_nid_range(sbi, inode->i_ino)) {
 115                f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
 116                         (unsigned long) inode->i_ino);
 117                WARN_ON(1);
 118                return -EINVAL;
 119        }
 120
 121        node_page = get_node_page(sbi, inode->i_ino);
 122        if (IS_ERR(node_page))
 123                return PTR_ERR(node_page);
 124
 125        ri = F2FS_INODE(node_page);
 126
 127        inode->i_mode = le16_to_cpu(ri->i_mode);
 128        i_uid_write(inode, le32_to_cpu(ri->i_uid));
 129        i_gid_write(inode, le32_to_cpu(ri->i_gid));
 130        set_nlink(inode, le32_to_cpu(ri->i_links));
 131        inode->i_size = le64_to_cpu(ri->i_size);
 132        inode->i_blocks = le64_to_cpu(ri->i_blocks);
 133
 134        inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
 135        inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
 136        inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
 137        inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
 138        inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
 139        inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
 140        inode->i_generation = le32_to_cpu(ri->i_generation);
 141
 142        fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
 143        fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
 144        fi->i_flags = le32_to_cpu(ri->i_flags);
 145        fi->flags = 0;
 146        fi->i_advise = ri->i_advise;
 147        fi->i_pino = le32_to_cpu(ri->i_pino);
 148        fi->i_dir_level = ri->i_dir_level;
 149
 150        if (f2fs_init_extent_tree(inode, &ri->i_ext))
 151                set_page_dirty(node_page);
 152
 153        get_inline_info(inode, ri);
 154
 155        /* check data exist */
 156        if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
 157                __recover_inline_status(inode, node_page);
 158
 159        /* get rdev by using inline_info */
 160        __get_inode_rdev(inode, ri);
 161
 162        if (__written_first_block(ri))
 163                set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
 164
 165        if (!need_inode_block_update(sbi, inode->i_ino))
 166                fi->last_disk_size = inode->i_size;
 167
 168        f2fs_put_page(node_page, 1);
 169
 170        stat_inc_inline_xattr(inode);
 171        stat_inc_inline_inode(inode);
 172        stat_inc_inline_dir(inode);
 173
 174        return 0;
 175}
 176
 177struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
 178{
 179        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 180        struct inode *inode;
 181        int ret = 0;
 182
 183        inode = iget_locked(sb, ino);
 184        if (!inode)
 185                return ERR_PTR(-ENOMEM);
 186
 187        if (!(inode->i_state & I_NEW)) {
 188                trace_f2fs_iget(inode);
 189                return inode;
 190        }
 191        if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
 192                goto make_now;
 193
 194        ret = do_read_inode(inode);
 195        if (ret)
 196                goto bad_inode;
 197make_now:
 198        if (ino == F2FS_NODE_INO(sbi)) {
 199                inode->i_mapping->a_ops = &f2fs_node_aops;
 200                mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
 201        } else if (ino == F2FS_META_INO(sbi)) {
 202                inode->i_mapping->a_ops = &f2fs_meta_aops;
 203                mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
 204        } else if (S_ISREG(inode->i_mode)) {
 205                inode->i_op = &f2fs_file_inode_operations;
 206                inode->i_fop = &f2fs_file_operations;
 207                inode->i_mapping->a_ops = &f2fs_dblock_aops;
 208        } else if (S_ISDIR(inode->i_mode)) {
 209                inode->i_op = &f2fs_dir_inode_operations;
 210                inode->i_fop = &f2fs_dir_operations;
 211                inode->i_mapping->a_ops = &f2fs_dblock_aops;
 212                mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
 213        } else if (S_ISLNK(inode->i_mode)) {
 214                if (f2fs_encrypted_inode(inode))
 215                        inode->i_op = &f2fs_encrypted_symlink_inode_operations;
 216                else
 217                        inode->i_op = &f2fs_symlink_inode_operations;
 218                inode_nohighmem(inode);
 219                inode->i_mapping->a_ops = &f2fs_dblock_aops;
 220        } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
 221                        S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
 222                inode->i_op = &f2fs_special_inode_operations;
 223                init_special_inode(inode, inode->i_mode, inode->i_rdev);
 224        } else {
 225                ret = -EIO;
 226                goto bad_inode;
 227        }
 228        unlock_new_inode(inode);
 229        trace_f2fs_iget(inode);
 230        return inode;
 231
 232bad_inode:
 233        iget_failed(inode);
 234        trace_f2fs_iget_exit(inode, ret);
 235        return ERR_PTR(ret);
 236}
 237
 238struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
 239{
 240        struct inode *inode;
 241retry:
 242        inode = f2fs_iget(sb, ino);
 243        if (IS_ERR(inode)) {
 244                if (PTR_ERR(inode) == -ENOMEM) {
 245                        congestion_wait(BLK_RW_ASYNC, HZ/50);
 246                        goto retry;
 247                }
 248        }
 249        return inode;
 250}
 251
 252int update_inode(struct inode *inode, struct page *node_page)
 253{
 254        struct f2fs_inode *ri;
 255
 256        f2fs_inode_synced(inode);
 257
 258        f2fs_wait_on_page_writeback(node_page, NODE, true);
 259
 260        ri = F2FS_INODE(node_page);
 261
 262        ri->i_mode = cpu_to_le16(inode->i_mode);
 263        ri->i_advise = F2FS_I(inode)->i_advise;
 264        ri->i_uid = cpu_to_le32(i_uid_read(inode));
 265        ri->i_gid = cpu_to_le32(i_gid_read(inode));
 266        ri->i_links = cpu_to_le32(inode->i_nlink);
 267        ri->i_size = cpu_to_le64(i_size_read(inode));
 268        ri->i_blocks = cpu_to_le64(inode->i_blocks);
 269
 270        if (F2FS_I(inode)->extent_tree)
 271                set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
 272                                                        &ri->i_ext);
 273        else
 274                memset(&ri->i_ext, 0, sizeof(ri->i_ext));
 275        set_raw_inline(inode, ri);
 276
 277        ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
 278        ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 279        ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
 280        ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
 281        ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 282        ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 283        ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
 284        ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
 285        ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
 286        ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
 287        ri->i_generation = cpu_to_le32(inode->i_generation);
 288        ri->i_dir_level = F2FS_I(inode)->i_dir_level;
 289
 290        __set_inode_rdev(inode, ri);
 291        set_cold_node(inode, node_page);
 292
 293        /* deleted inode */
 294        if (inode->i_nlink == 0)
 295                clear_inline_node(node_page);
 296
 297        return set_page_dirty(node_page);
 298}
 299
 300int update_inode_page(struct inode *inode)
 301{
 302        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 303        struct page *node_page;
 304        int ret = 0;
 305retry:
 306        node_page = get_node_page(sbi, inode->i_ino);
 307        if (IS_ERR(node_page)) {
 308                int err = PTR_ERR(node_page);
 309                if (err == -ENOMEM) {
 310                        cond_resched();
 311                        goto retry;
 312                } else if (err != -ENOENT) {
 313                        f2fs_stop_checkpoint(sbi, false);
 314                }
 315                f2fs_inode_synced(inode);
 316                return 0;
 317        }
 318        ret = update_inode(inode, node_page);
 319        f2fs_put_page(node_page, 1);
 320        return ret;
 321}
 322
 323int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
 324{
 325        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 326
 327        if (inode->i_ino == F2FS_NODE_INO(sbi) ||
 328                        inode->i_ino == F2FS_META_INO(sbi))
 329                return 0;
 330
 331        if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
 332                return 0;
 333
 334        /*
 335         * We need to balance fs here to prevent from producing dirty node pages
 336         * during the urgent cleaning time when runing out of free sections.
 337         */
 338        if (update_inode_page(inode))
 339                f2fs_balance_fs(sbi, true);
 340        return 0;
 341}
 342
 343/*
 344 * Called at the last iput() if i_nlink is zero
 345 */
 346void f2fs_evict_inode(struct inode *inode)
 347{
 348        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 349        nid_t xnid = F2FS_I(inode)->i_xattr_nid;
 350        int err = 0;
 351
 352        /* some remained atomic pages should discarded */
 353        if (f2fs_is_atomic_file(inode))
 354                drop_inmem_pages(inode);
 355
 356        trace_f2fs_evict_inode(inode);
 357        truncate_inode_pages_final(&inode->i_data);
 358
 359        if (inode->i_ino == F2FS_NODE_INO(sbi) ||
 360                        inode->i_ino == F2FS_META_INO(sbi))
 361                goto out_clear;
 362
 363        f2fs_bug_on(sbi, get_dirty_pages(inode));
 364        remove_dirty_inode(inode);
 365
 366        f2fs_destroy_extent_tree(inode);
 367
 368        if (inode->i_nlink || is_bad_inode(inode))
 369                goto no_delete;
 370
 371#ifdef CONFIG_F2FS_FAULT_INJECTION
 372        if (time_to_inject(sbi, FAULT_EVICT_INODE))
 373                goto no_delete;
 374#endif
 375
 376        sb_start_intwrite(inode->i_sb);
 377        set_inode_flag(inode, FI_NO_ALLOC);
 378        i_size_write(inode, 0);
 379retry:
 380        if (F2FS_HAS_BLOCKS(inode))
 381                err = f2fs_truncate(inode);
 382
 383        if (!err) {
 384                f2fs_lock_op(sbi);
 385                err = remove_inode_page(inode);
 386                f2fs_unlock_op(sbi);
 387        }
 388
 389        /* give more chances, if ENOMEM case */
 390        if (err == -ENOMEM) {
 391                err = 0;
 392                goto retry;
 393        }
 394
 395        if (err)
 396                update_inode_page(inode);
 397        sb_end_intwrite(inode->i_sb);
 398no_delete:
 399        stat_dec_inline_xattr(inode);
 400        stat_dec_inline_dir(inode);
 401        stat_dec_inline_inode(inode);
 402
 403        invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
 404        if (xnid)
 405                invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
 406        if (is_inode_flag_set(inode, FI_APPEND_WRITE))
 407                add_ino_entry(sbi, inode->i_ino, APPEND_INO);
 408        if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
 409                add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
 410        if (is_inode_flag_set(inode, FI_FREE_NID)) {
 411                alloc_nid_failed(sbi, inode->i_ino);
 412                clear_inode_flag(inode, FI_FREE_NID);
 413        }
 414        f2fs_bug_on(sbi, err &&
 415                !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
 416out_clear:
 417        fscrypt_put_encryption_info(inode, NULL);
 418        clear_inode(inode);
 419}
 420
 421/* caller should call f2fs_lock_op() */
 422void handle_failed_inode(struct inode *inode)
 423{
 424        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 425        struct node_info ni;
 426
 427        /* don't make bad inode, since it becomes a regular file. */
 428        unlock_new_inode(inode);
 429
 430        /*
 431         * Note: we should add inode to orphan list before f2fs_unlock_op()
 432         * so we can prevent losing this orphan when encoutering checkpoint
 433         * and following suddenly power-off.
 434         */
 435        get_node_info(sbi, inode->i_ino, &ni);
 436
 437        if (ni.blk_addr != NULL_ADDR) {
 438                int err = acquire_orphan_inode(sbi);
 439                if (err) {
 440                        set_sbi_flag(sbi, SBI_NEED_FSCK);
 441                        f2fs_msg(sbi->sb, KERN_WARNING,
 442                                "Too many orphan inodes, run fsck to fix.");
 443                } else {
 444                        add_orphan_inode(inode);
 445                }
 446                alloc_nid_done(sbi, inode->i_ino);
 447        } else {
 448                set_inode_flag(inode, FI_FREE_NID);
 449        }
 450
 451        f2fs_unlock_op(sbi);
 452
 453        /* iput will drop the inode object */
 454        iput(inode);
 455}
 456