linux/fs/f2fs/inode.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/inode.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/buffer_head.h>
  11#include <linux/backing-dev.h>
  12#include <linux/writeback.h>
  13
  14#include "f2fs.h"
  15#include "node.h"
  16#include "segment.h"
  17#include "xattr.h"
  18
  19#include <trace/events/f2fs.h>
  20
  21#ifdef CONFIG_F2FS_FS_COMPRESSION
  22extern const struct address_space_operations f2fs_compress_aops;
  23#endif
  24
  25void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
  26{
  27        if (is_inode_flag_set(inode, FI_NEW_INODE))
  28                return;
  29
  30        if (f2fs_inode_dirtied(inode, sync))
  31                return;
  32
  33        mark_inode_dirty_sync(inode);
  34}
  35
  36void f2fs_set_inode_flags(struct inode *inode)
  37{
  38        unsigned int flags = F2FS_I(inode)->i_flags;
  39        unsigned int new_fl = 0;
  40
  41        if (flags & F2FS_SYNC_FL)
  42                new_fl |= S_SYNC;
  43        if (flags & F2FS_APPEND_FL)
  44                new_fl |= S_APPEND;
  45        if (flags & F2FS_IMMUTABLE_FL)
  46                new_fl |= S_IMMUTABLE;
  47        if (flags & F2FS_NOATIME_FL)
  48                new_fl |= S_NOATIME;
  49        if (flags & F2FS_DIRSYNC_FL)
  50                new_fl |= S_DIRSYNC;
  51        if (file_is_encrypt(inode))
  52                new_fl |= S_ENCRYPTED;
  53        if (file_is_verity(inode))
  54                new_fl |= S_VERITY;
  55        if (flags & F2FS_CASEFOLD_FL)
  56                new_fl |= S_CASEFOLD;
  57        inode_set_flags(inode, new_fl,
  58                        S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
  59                        S_ENCRYPTED|S_VERITY|S_CASEFOLD);
  60}
  61
  62static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
  63{
  64        int extra_size = get_extra_isize(inode);
  65
  66        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
  67                        S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
  68                if (ri->i_addr[extra_size])
  69                        inode->i_rdev = old_decode_dev(
  70                                le32_to_cpu(ri->i_addr[extra_size]));
  71                else
  72                        inode->i_rdev = new_decode_dev(
  73                                le32_to_cpu(ri->i_addr[extra_size + 1]));
  74        }
  75}
  76
  77static int __written_first_block(struct f2fs_sb_info *sbi,
  78                                        struct f2fs_inode *ri)
  79{
  80        block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
  81
  82        if (!__is_valid_data_blkaddr(addr))
  83                return 1;
  84        if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
  85                return -EFSCORRUPTED;
  86        return 0;
  87}
  88
  89static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
  90{
  91        int extra_size = get_extra_isize(inode);
  92
  93        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  94                if (old_valid_dev(inode->i_rdev)) {
  95                        ri->i_addr[extra_size] =
  96                                cpu_to_le32(old_encode_dev(inode->i_rdev));
  97                        ri->i_addr[extra_size + 1] = 0;
  98                } else {
  99                        ri->i_addr[extra_size] = 0;
 100                        ri->i_addr[extra_size + 1] =
 101                                cpu_to_le32(new_encode_dev(inode->i_rdev));
 102                        ri->i_addr[extra_size + 2] = 0;
 103                }
 104        }
 105}
 106
 107static void __recover_inline_status(struct inode *inode, struct page *ipage)
 108{
 109        void *inline_data = inline_data_addr(inode, ipage);
 110        __le32 *start = inline_data;
 111        __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
 112
 113        while (start < end) {
 114                if (*start++) {
 115                        f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 116
 117                        set_inode_flag(inode, FI_DATA_EXIST);
 118                        set_raw_inline(inode, F2FS_INODE(ipage));
 119                        set_page_dirty(ipage);
 120                        return;
 121                }
 122        }
 123        return;
 124}
 125
 126static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
 127{
 128        struct f2fs_inode *ri = &F2FS_NODE(page)->i;
 129
 130        if (!f2fs_sb_has_inode_chksum(sbi))
 131                return false;
 132
 133        if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
 134                return false;
 135
 136        if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
 137                                i_inode_checksum))
 138                return false;
 139
 140        return true;
 141}
 142
 143static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
 144{
 145        struct f2fs_node *node = F2FS_NODE(page);
 146        struct f2fs_inode *ri = &node->i;
 147        __le32 ino = node->footer.ino;
 148        __le32 gen = ri->i_generation;
 149        __u32 chksum, chksum_seed;
 150        __u32 dummy_cs = 0;
 151        unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
 152        unsigned int cs_size = sizeof(dummy_cs);
 153
 154        chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
 155                                                        sizeof(ino));
 156        chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
 157
 158        chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
 159        chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
 160        offset += cs_size;
 161        chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
 162                                                F2FS_BLKSIZE - offset);
 163        return chksum;
 164}
 165
 166bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
 167{
 168        struct f2fs_inode *ri;
 169        __u32 provided, calculated;
 170
 171        if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
 172                return true;
 173
 174#ifdef CONFIG_F2FS_CHECK_FS
 175        if (!f2fs_enable_inode_chksum(sbi, page))
 176#else
 177        if (!f2fs_enable_inode_chksum(sbi, page) ||
 178                        PageDirty(page) || PageWriteback(page))
 179#endif
 180                return true;
 181
 182        ri = &F2FS_NODE(page)->i;
 183        provided = le32_to_cpu(ri->i_inode_checksum);
 184        calculated = f2fs_inode_chksum(sbi, page);
 185
 186        if (provided != calculated)
 187                f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
 188                          page->index, ino_of_node(page), provided, calculated);
 189
 190        return provided == calculated;
 191}
 192
 193void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
 194{
 195        struct f2fs_inode *ri = &F2FS_NODE(page)->i;
 196
 197        if (!f2fs_enable_inode_chksum(sbi, page))
 198                return;
 199
 200        ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
 201}
 202
 203static bool sanity_check_inode(struct inode *inode, struct page *node_page)
 204{
 205        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 206        struct f2fs_inode_info *fi = F2FS_I(inode);
 207        struct f2fs_inode *ri = F2FS_INODE(node_page);
 208        unsigned long long iblocks;
 209
 210        iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
 211        if (!iblocks) {
 212                set_sbi_flag(sbi, SBI_NEED_FSCK);
 213                f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
 214                          __func__, inode->i_ino, iblocks);
 215                return false;
 216        }
 217
 218        if (ino_of_node(node_page) != nid_of_node(node_page)) {
 219                set_sbi_flag(sbi, SBI_NEED_FSCK);
 220                f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
 221                          __func__, inode->i_ino,
 222                          ino_of_node(node_page), nid_of_node(node_page));
 223                return false;
 224        }
 225
 226        if (f2fs_sb_has_flexible_inline_xattr(sbi)
 227                        && !f2fs_has_extra_attr(inode)) {
 228                set_sbi_flag(sbi, SBI_NEED_FSCK);
 229                f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
 230                          __func__, inode->i_ino);
 231                return false;
 232        }
 233
 234        if (f2fs_has_extra_attr(inode) &&
 235                        !f2fs_sb_has_extra_attr(sbi)) {
 236                set_sbi_flag(sbi, SBI_NEED_FSCK);
 237                f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
 238                          __func__, inode->i_ino);
 239                return false;
 240        }
 241
 242        if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
 243                        fi->i_extra_isize % sizeof(__le32)) {
 244                set_sbi_flag(sbi, SBI_NEED_FSCK);
 245                f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
 246                          __func__, inode->i_ino, fi->i_extra_isize,
 247                          F2FS_TOTAL_EXTRA_ATTR_SIZE);
 248                return false;
 249        }
 250
 251        if (f2fs_has_extra_attr(inode) &&
 252                f2fs_sb_has_flexible_inline_xattr(sbi) &&
 253                f2fs_has_inline_xattr(inode) &&
 254                (!fi->i_inline_xattr_size ||
 255                fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
 256                set_sbi_flag(sbi, SBI_NEED_FSCK);
 257                f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
 258                          __func__, inode->i_ino, fi->i_inline_xattr_size,
 259                          MAX_INLINE_XATTR_SIZE);
 260                return false;
 261        }
 262
 263        if (F2FS_I(inode)->extent_tree) {
 264                struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
 265
 266                if (ei->len &&
 267                        (!f2fs_is_valid_blkaddr(sbi, ei->blk,
 268                                                DATA_GENERIC_ENHANCE) ||
 269                        !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
 270                                                DATA_GENERIC_ENHANCE))) {
 271                        set_sbi_flag(sbi, SBI_NEED_FSCK);
 272                        f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
 273                                  __func__, inode->i_ino,
 274                                  ei->blk, ei->fofs, ei->len);
 275                        return false;
 276                }
 277        }
 278
 279        if (f2fs_has_inline_data(inode) &&
 280                        (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
 281                set_sbi_flag(sbi, SBI_NEED_FSCK);
 282                f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
 283                          __func__, inode->i_ino, inode->i_mode);
 284                return false;
 285        }
 286
 287        if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
 288                set_sbi_flag(sbi, SBI_NEED_FSCK);
 289                f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
 290                          __func__, inode->i_ino, inode->i_mode);
 291                return false;
 292        }
 293
 294        if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
 295                set_sbi_flag(sbi, SBI_NEED_FSCK);
 296                f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
 297                          __func__, inode->i_ino);
 298                return false;
 299        }
 300
 301        if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
 302                        fi->i_flags & F2FS_COMPR_FL &&
 303                        F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
 304                                                i_log_cluster_size)) {
 305                if (ri->i_compress_algorithm >= COMPRESS_MAX) {
 306                        set_sbi_flag(sbi, SBI_NEED_FSCK);
 307                        f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
 308                                "compress algorithm: %u, run fsck to fix",
 309                                  __func__, inode->i_ino,
 310                                  ri->i_compress_algorithm);
 311                        return false;
 312                }
 313                if (le64_to_cpu(ri->i_compr_blocks) >
 314                                SECTOR_TO_BLOCK(inode->i_blocks)) {
 315                        set_sbi_flag(sbi, SBI_NEED_FSCK);
 316                        f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
 317                                "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
 318                                  __func__, inode->i_ino,
 319                                  le64_to_cpu(ri->i_compr_blocks),
 320                                  SECTOR_TO_BLOCK(inode->i_blocks));
 321                        return false;
 322                }
 323                if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
 324                        ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
 325                        set_sbi_flag(sbi, SBI_NEED_FSCK);
 326                        f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
 327                                "log cluster size: %u, run fsck to fix",
 328                                  __func__, inode->i_ino,
 329                                  ri->i_log_cluster_size);
 330                        return false;
 331                }
 332        }
 333
 334        return true;
 335}
 336
 337static int do_read_inode(struct inode *inode)
 338{
 339        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 340        struct f2fs_inode_info *fi = F2FS_I(inode);
 341        struct page *node_page;
 342        struct f2fs_inode *ri;
 343        projid_t i_projid;
 344        int err;
 345
 346        /* Check if ino is within scope */
 347        if (f2fs_check_nid_range(sbi, inode->i_ino))
 348                return -EINVAL;
 349
 350        node_page = f2fs_get_node_page(sbi, inode->i_ino);
 351        if (IS_ERR(node_page))
 352                return PTR_ERR(node_page);
 353
 354        ri = F2FS_INODE(node_page);
 355
 356        inode->i_mode = le16_to_cpu(ri->i_mode);
 357        i_uid_write(inode, le32_to_cpu(ri->i_uid));
 358        i_gid_write(inode, le32_to_cpu(ri->i_gid));
 359        set_nlink(inode, le32_to_cpu(ri->i_links));
 360        inode->i_size = le64_to_cpu(ri->i_size);
 361        inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
 362
 363        inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
 364        inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
 365        inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
 366        inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
 367        inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
 368        inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
 369        inode->i_generation = le32_to_cpu(ri->i_generation);
 370        if (S_ISDIR(inode->i_mode))
 371                fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
 372        else if (S_ISREG(inode->i_mode))
 373                fi->i_gc_failures[GC_FAILURE_PIN] =
 374                                        le16_to_cpu(ri->i_gc_failures);
 375        fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
 376        fi->i_flags = le32_to_cpu(ri->i_flags);
 377        if (S_ISREG(inode->i_mode))
 378                fi->i_flags &= ~F2FS_PROJINHERIT_FL;
 379        bitmap_zero(fi->flags, FI_MAX);
 380        fi->i_advise = ri->i_advise;
 381        fi->i_pino = le32_to_cpu(ri->i_pino);
 382        fi->i_dir_level = ri->i_dir_level;
 383
 384        f2fs_init_extent_tree(inode, node_page);
 385
 386        get_inline_info(inode, ri);
 387
 388        fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
 389                                        le16_to_cpu(ri->i_extra_isize) : 0;
 390
 391        if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
 392                fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
 393        } else if (f2fs_has_inline_xattr(inode) ||
 394                                f2fs_has_inline_dentry(inode)) {
 395                fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
 396        } else {
 397
 398                /*
 399                 * Previous inline data or directory always reserved 200 bytes
 400                 * in inode layout, even if inline_xattr is disabled. In order
 401                 * to keep inline_dentry's structure for backward compatibility,
 402                 * we get the space back only from inline_data.
 403                 */
 404                fi->i_inline_xattr_size = 0;
 405        }
 406
 407        if (!sanity_check_inode(inode, node_page)) {
 408                f2fs_put_page(node_page, 1);
 409                return -EFSCORRUPTED;
 410        }
 411
 412        /* check data exist */
 413        if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
 414                __recover_inline_status(inode, node_page);
 415
 416        /* try to recover cold bit for non-dir inode */
 417        if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
 418                f2fs_wait_on_page_writeback(node_page, NODE, true, true);
 419                set_cold_node(node_page, false);
 420                set_page_dirty(node_page);
 421        }
 422
 423        /* get rdev by using inline_info */
 424        __get_inode_rdev(inode, ri);
 425
 426        if (S_ISREG(inode->i_mode)) {
 427                err = __written_first_block(sbi, ri);
 428                if (err < 0) {
 429                        f2fs_put_page(node_page, 1);
 430                        return err;
 431                }
 432                if (!err)
 433                        set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
 434        }
 435
 436        if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
 437                fi->last_disk_size = inode->i_size;
 438
 439        if (fi->i_flags & F2FS_PROJINHERIT_FL)
 440                set_inode_flag(inode, FI_PROJ_INHERIT);
 441
 442        if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
 443                        F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
 444                i_projid = (projid_t)le32_to_cpu(ri->i_projid);
 445        else
 446                i_projid = F2FS_DEF_PROJID;
 447        fi->i_projid = make_kprojid(&init_user_ns, i_projid);
 448
 449        if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
 450                        F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
 451                fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
 452                fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
 453        }
 454
 455        if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
 456                                        (fi->i_flags & F2FS_COMPR_FL)) {
 457                if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
 458                                        i_log_cluster_size)) {
 459                        atomic_set(&fi->i_compr_blocks,
 460                                        le64_to_cpu(ri->i_compr_blocks));
 461                        fi->i_compress_algorithm = ri->i_compress_algorithm;
 462                        fi->i_log_cluster_size = ri->i_log_cluster_size;
 463                        fi->i_compress_flag = le16_to_cpu(ri->i_compress_flag);
 464                        fi->i_cluster_size = 1 << fi->i_log_cluster_size;
 465                        set_inode_flag(inode, FI_COMPRESSED_FILE);
 466                }
 467        }
 468
 469        F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
 470        F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
 471        F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
 472        F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
 473        f2fs_put_page(node_page, 1);
 474
 475        stat_inc_inline_xattr(inode);
 476        stat_inc_inline_inode(inode);
 477        stat_inc_inline_dir(inode);
 478        stat_inc_compr_inode(inode);
 479        stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
 480
 481        return 0;
 482}
 483
 484struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
 485{
 486        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 487        struct inode *inode;
 488        int ret = 0;
 489
 490        inode = iget_locked(sb, ino);
 491        if (!inode)
 492                return ERR_PTR(-ENOMEM);
 493
 494        if (!(inode->i_state & I_NEW)) {
 495                trace_f2fs_iget(inode);
 496                return inode;
 497        }
 498        if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
 499                goto make_now;
 500
 501#ifdef CONFIG_F2FS_FS_COMPRESSION
 502        if (ino == F2FS_COMPRESS_INO(sbi))
 503                goto make_now;
 504#endif
 505
 506        ret = do_read_inode(inode);
 507        if (ret)
 508                goto bad_inode;
 509make_now:
 510        if (ino == F2FS_NODE_INO(sbi)) {
 511                inode->i_mapping->a_ops = &f2fs_node_aops;
 512                mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
 513        } else if (ino == F2FS_META_INO(sbi)) {
 514                inode->i_mapping->a_ops = &f2fs_meta_aops;
 515                mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
 516        } else if (ino == F2FS_COMPRESS_INO(sbi)) {
 517#ifdef CONFIG_F2FS_FS_COMPRESSION
 518                inode->i_mapping->a_ops = &f2fs_compress_aops;
 519#endif
 520                mapping_set_gfp_mask(inode->i_mapping,
 521                        GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
 522        } else if (S_ISREG(inode->i_mode)) {
 523                inode->i_op = &f2fs_file_inode_operations;
 524                inode->i_fop = &f2fs_file_operations;
 525                inode->i_mapping->a_ops = &f2fs_dblock_aops;
 526        } else if (S_ISDIR(inode->i_mode)) {
 527                inode->i_op = &f2fs_dir_inode_operations;
 528                inode->i_fop = &f2fs_dir_operations;
 529                inode->i_mapping->a_ops = &f2fs_dblock_aops;
 530                inode_nohighmem(inode);
 531        } else if (S_ISLNK(inode->i_mode)) {
 532                if (file_is_encrypt(inode))
 533                        inode->i_op = &f2fs_encrypted_symlink_inode_operations;
 534                else
 535                        inode->i_op = &f2fs_symlink_inode_operations;
 536                inode_nohighmem(inode);
 537                inode->i_mapping->a_ops = &f2fs_dblock_aops;
 538        } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
 539                        S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
 540                inode->i_op = &f2fs_special_inode_operations;
 541                init_special_inode(inode, inode->i_mode, inode->i_rdev);
 542        } else {
 543                ret = -EIO;
 544                goto bad_inode;
 545        }
 546        f2fs_set_inode_flags(inode);
 547        unlock_new_inode(inode);
 548        trace_f2fs_iget(inode);
 549        return inode;
 550
 551bad_inode:
 552        f2fs_inode_synced(inode);
 553        iget_failed(inode);
 554        trace_f2fs_iget_exit(inode, ret);
 555        return ERR_PTR(ret);
 556}
 557
 558struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
 559{
 560        struct inode *inode;
 561retry:
 562        inode = f2fs_iget(sb, ino);
 563        if (IS_ERR(inode)) {
 564                if (PTR_ERR(inode) == -ENOMEM) {
 565                        congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
 566                        goto retry;
 567                }
 568        }
 569        return inode;
 570}
 571
 572void f2fs_update_inode(struct inode *inode, struct page *node_page)
 573{
 574        struct f2fs_inode *ri;
 575        struct extent_tree *et = F2FS_I(inode)->extent_tree;
 576
 577        f2fs_wait_on_page_writeback(node_page, NODE, true, true);
 578        set_page_dirty(node_page);
 579
 580        f2fs_inode_synced(inode);
 581
 582        ri = F2FS_INODE(node_page);
 583
 584        ri->i_mode = cpu_to_le16(inode->i_mode);
 585        ri->i_advise = F2FS_I(inode)->i_advise;
 586        ri->i_uid = cpu_to_le32(i_uid_read(inode));
 587        ri->i_gid = cpu_to_le32(i_gid_read(inode));
 588        ri->i_links = cpu_to_le32(inode->i_nlink);
 589        ri->i_size = cpu_to_le64(i_size_read(inode));
 590        ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
 591
 592        if (et) {
 593                read_lock(&et->lock);
 594                set_raw_extent(&et->largest, &ri->i_ext);
 595                read_unlock(&et->lock);
 596        } else {
 597                memset(&ri->i_ext, 0, sizeof(ri->i_ext));
 598        }
 599        set_raw_inline(inode, ri);
 600
 601        ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
 602        ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 603        ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
 604        ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
 605        ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 606        ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 607        if (S_ISDIR(inode->i_mode))
 608                ri->i_current_depth =
 609                        cpu_to_le32(F2FS_I(inode)->i_current_depth);
 610        else if (S_ISREG(inode->i_mode))
 611                ri->i_gc_failures =
 612                        cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
 613        ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
 614        ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
 615        ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
 616        ri->i_generation = cpu_to_le32(inode->i_generation);
 617        ri->i_dir_level = F2FS_I(inode)->i_dir_level;
 618
 619        if (f2fs_has_extra_attr(inode)) {
 620                ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
 621
 622                if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
 623                        ri->i_inline_xattr_size =
 624                                cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
 625
 626                if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
 627                        F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
 628                                                                i_projid)) {
 629                        projid_t i_projid;
 630
 631                        i_projid = from_kprojid(&init_user_ns,
 632                                                F2FS_I(inode)->i_projid);
 633                        ri->i_projid = cpu_to_le32(i_projid);
 634                }
 635
 636                if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
 637                        F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
 638                                                                i_crtime)) {
 639                        ri->i_crtime =
 640                                cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
 641                        ri->i_crtime_nsec =
 642                                cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
 643                }
 644
 645                if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
 646                        F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
 647                                                        i_log_cluster_size)) {
 648                        ri->i_compr_blocks =
 649                                cpu_to_le64(atomic_read(
 650                                        &F2FS_I(inode)->i_compr_blocks));
 651                        ri->i_compress_algorithm =
 652                                F2FS_I(inode)->i_compress_algorithm;
 653                        ri->i_compress_flag =
 654                                cpu_to_le16(F2FS_I(inode)->i_compress_flag);
 655                        ri->i_log_cluster_size =
 656                                F2FS_I(inode)->i_log_cluster_size;
 657                }
 658        }
 659
 660        __set_inode_rdev(inode, ri);
 661
 662        /* deleted inode */
 663        if (inode->i_nlink == 0)
 664                clear_page_private_inline(node_page);
 665
 666        F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
 667        F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
 668        F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
 669        F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
 670
 671#ifdef CONFIG_F2FS_CHECK_FS
 672        f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
 673#endif
 674}
 675
 676void f2fs_update_inode_page(struct inode *inode)
 677{
 678        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 679        struct page *node_page;
 680retry:
 681        node_page = f2fs_get_node_page(sbi, inode->i_ino);
 682        if (IS_ERR(node_page)) {
 683                int err = PTR_ERR(node_page);
 684
 685                if (err == -ENOMEM) {
 686                        cond_resched();
 687                        goto retry;
 688                } else if (err != -ENOENT) {
 689                        f2fs_stop_checkpoint(sbi, false);
 690                }
 691                return;
 692        }
 693        f2fs_update_inode(inode, node_page);
 694        f2fs_put_page(node_page, 1);
 695}
 696
 697int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
 698{
 699        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 700
 701        if (inode->i_ino == F2FS_NODE_INO(sbi) ||
 702                        inode->i_ino == F2FS_META_INO(sbi))
 703                return 0;
 704
 705        /*
 706         * atime could be updated without dirtying f2fs inode in lazytime mode
 707         */
 708        if (f2fs_is_time_consistent(inode) &&
 709                !is_inode_flag_set(inode, FI_DIRTY_INODE))
 710                return 0;
 711
 712        if (!f2fs_is_checkpoint_ready(sbi))
 713                return -ENOSPC;
 714
 715        /*
 716         * We need to balance fs here to prevent from producing dirty node pages
 717         * during the urgent cleaning time when running out of free sections.
 718         */
 719        f2fs_update_inode_page(inode);
 720        if (wbc && wbc->nr_to_write)
 721                f2fs_balance_fs(sbi, true);
 722        return 0;
 723}
 724
 725/*
 726 * Called at the last iput() if i_nlink is zero
 727 */
 728void f2fs_evict_inode(struct inode *inode)
 729{
 730        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 731        nid_t xnid = F2FS_I(inode)->i_xattr_nid;
 732        int err = 0;
 733
 734        /* some remained atomic pages should discarded */
 735        if (f2fs_is_atomic_file(inode))
 736                f2fs_drop_inmem_pages(inode);
 737
 738        trace_f2fs_evict_inode(inode);
 739        truncate_inode_pages_final(&inode->i_data);
 740
 741        if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
 742                f2fs_invalidate_compress_pages(sbi, inode->i_ino);
 743
 744        if (inode->i_ino == F2FS_NODE_INO(sbi) ||
 745                        inode->i_ino == F2FS_META_INO(sbi) ||
 746                        inode->i_ino == F2FS_COMPRESS_INO(sbi))
 747                goto out_clear;
 748
 749        f2fs_bug_on(sbi, get_dirty_pages(inode));
 750        f2fs_remove_dirty_inode(inode);
 751
 752        f2fs_destroy_extent_tree(inode);
 753
 754        if (inode->i_nlink || is_bad_inode(inode))
 755                goto no_delete;
 756
 757        err = dquot_initialize(inode);
 758        if (err) {
 759                err = 0;
 760                set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
 761        }
 762
 763        f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
 764        f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
 765        f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
 766
 767        sb_start_intwrite(inode->i_sb);
 768        set_inode_flag(inode, FI_NO_ALLOC);
 769        i_size_write(inode, 0);
 770retry:
 771        if (F2FS_HAS_BLOCKS(inode))
 772                err = f2fs_truncate(inode);
 773
 774        if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
 775                f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
 776                err = -EIO;
 777        }
 778
 779        if (!err) {
 780                f2fs_lock_op(sbi);
 781                err = f2fs_remove_inode_page(inode);
 782                f2fs_unlock_op(sbi);
 783                if (err == -ENOENT)
 784                        err = 0;
 785        }
 786
 787        /* give more chances, if ENOMEM case */
 788        if (err == -ENOMEM) {
 789                err = 0;
 790                goto retry;
 791        }
 792
 793        if (err) {
 794                f2fs_update_inode_page(inode);
 795                if (dquot_initialize_needed(inode))
 796                        set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
 797        }
 798        sb_end_intwrite(inode->i_sb);
 799no_delete:
 800        dquot_drop(inode);
 801
 802        stat_dec_inline_xattr(inode);
 803        stat_dec_inline_dir(inode);
 804        stat_dec_inline_inode(inode);
 805        stat_dec_compr_inode(inode);
 806        stat_sub_compr_blocks(inode,
 807                        atomic_read(&F2FS_I(inode)->i_compr_blocks));
 808
 809        if (likely(!f2fs_cp_error(sbi) &&
 810                                !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
 811                f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
 812        else
 813                f2fs_inode_synced(inode);
 814
 815        /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
 816        if (inode->i_ino)
 817                invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
 818                                                        inode->i_ino);
 819        if (xnid)
 820                invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
 821        if (inode->i_nlink) {
 822                if (is_inode_flag_set(inode, FI_APPEND_WRITE))
 823                        f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
 824                if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
 825                        f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
 826        }
 827        if (is_inode_flag_set(inode, FI_FREE_NID)) {
 828                f2fs_alloc_nid_failed(sbi, inode->i_ino);
 829                clear_inode_flag(inode, FI_FREE_NID);
 830        } else {
 831                /*
 832                 * If xattr nid is corrupted, we can reach out error condition,
 833                 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
 834                 * In that case, f2fs_check_nid_range() is enough to give a clue.
 835                 */
 836        }
 837out_clear:
 838        fscrypt_put_encryption_info(inode);
 839        fsverity_cleanup_inode(inode);
 840        clear_inode(inode);
 841}
 842
 843/* caller should call f2fs_lock_op() */
 844void f2fs_handle_failed_inode(struct inode *inode)
 845{
 846        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 847        struct node_info ni;
 848        int err;
 849
 850        /*
 851         * clear nlink of inode in order to release resource of inode
 852         * immediately.
 853         */
 854        clear_nlink(inode);
 855
 856        /*
 857         * we must call this to avoid inode being remained as dirty, resulting
 858         * in a panic when flushing dirty inodes in gdirty_list.
 859         */
 860        f2fs_update_inode_page(inode);
 861        f2fs_inode_synced(inode);
 862
 863        /* don't make bad inode, since it becomes a regular file. */
 864        unlock_new_inode(inode);
 865
 866        /*
 867         * Note: we should add inode to orphan list before f2fs_unlock_op()
 868         * so we can prevent losing this orphan when encoutering checkpoint
 869         * and following suddenly power-off.
 870         */
 871        err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
 872        if (err) {
 873                set_sbi_flag(sbi, SBI_NEED_FSCK);
 874                f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
 875                goto out;
 876        }
 877
 878        if (ni.blk_addr != NULL_ADDR) {
 879                err = f2fs_acquire_orphan_inode(sbi);
 880                if (err) {
 881                        set_sbi_flag(sbi, SBI_NEED_FSCK);
 882                        f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
 883                } else {
 884                        f2fs_add_orphan_inode(inode);
 885                }
 886                f2fs_alloc_nid_done(sbi, inode->i_ino);
 887        } else {
 888                set_inode_flag(inode, FI_FREE_NID);
 889        }
 890
 891out:
 892        f2fs_unlock_op(sbi);
 893
 894        /* iput will drop the inode object */
 895        iput(inode);
 896}
 897