linux/fs/f2fs/f2fs.h
<<
>>
Prefs
   1/*
   2 * fs/f2fs/f2fs.h
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#ifndef _LINUX_F2FS_H
  12#define _LINUX_F2FS_H
  13
  14#include <linux/types.h>
  15#include <linux/page-flags.h>
  16#include <linux/buffer_head.h>
  17#include <linux/slab.h>
  18#include <linux/crc32.h>
  19#include <linux/magic.h>
  20
  21/*
  22 * For mount options
  23 */
  24#define F2FS_MOUNT_BG_GC                0x00000001
  25#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
  26#define F2FS_MOUNT_DISCARD              0x00000004
  27#define F2FS_MOUNT_NOHEAP               0x00000008
  28#define F2FS_MOUNT_XATTR_USER           0x00000010
  29#define F2FS_MOUNT_POSIX_ACL            0x00000020
  30#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
  31
  32#define clear_opt(sbi, option)  (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
  33#define set_opt(sbi, option)    (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
  34#define test_opt(sbi, option)   (sbi->mount_opt.opt & F2FS_MOUNT_##option)
  35
  36#define ver_after(a, b) (typecheck(unsigned long long, a) &&            \
  37                typecheck(unsigned long long, b) &&                     \
  38                ((long long)((a) - (b)) > 0))
  39
  40typedef u32 block_t;    /*
  41                         * should not change u32, since it is the on-disk block
  42                         * address format, __le32.
  43                         */
  44typedef u32 nid_t;
  45
  46struct f2fs_mount_info {
  47        unsigned int    opt;
  48};
  49
  50#define CRCPOLY_LE 0xedb88320
  51
  52static inline __u32 f2fs_crc32(void *buf, size_t len)
  53{
  54        unsigned char *p = (unsigned char *)buf;
  55        __u32 crc = F2FS_SUPER_MAGIC;
  56        int i;
  57
  58        while (len--) {
  59                crc ^= *p++;
  60                for (i = 0; i < 8; i++)
  61                        crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
  62        }
  63        return crc;
  64}
  65
  66static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
  67{
  68        return f2fs_crc32(buf, buf_size) == blk_crc;
  69}
  70
  71/*
  72 * For checkpoint manager
  73 */
  74enum {
  75        NAT_BITMAP,
  76        SIT_BITMAP
  77};
  78
  79/* for the list of orphan inodes */
  80struct orphan_inode_entry {
  81        struct list_head list;  /* list head */
  82        nid_t ino;              /* inode number */
  83};
  84
  85/* for the list of directory inodes */
  86struct dir_inode_entry {
  87        struct list_head list;  /* list head */
  88        struct inode *inode;    /* vfs inode pointer */
  89};
  90
  91/* for the list of fsync inodes, used only during recovery */
  92struct fsync_inode_entry {
  93        struct list_head list;  /* list head */
  94        struct inode *inode;    /* vfs inode pointer */
  95        block_t blkaddr;        /* block address locating the last inode */
  96};
  97
  98#define nats_in_cursum(sum)             (le16_to_cpu(sum->n_nats))
  99#define sits_in_cursum(sum)             (le16_to_cpu(sum->n_sits))
 100
 101#define nat_in_journal(sum, i)          (sum->nat_j.entries[i].ne)
 102#define nid_in_journal(sum, i)          (sum->nat_j.entries[i].nid)
 103#define sit_in_journal(sum, i)          (sum->sit_j.entries[i].se)
 104#define segno_in_journal(sum, i)        (sum->sit_j.entries[i].segno)
 105
 106static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
 107{
 108        int before = nats_in_cursum(rs);
 109        rs->n_nats = cpu_to_le16(before + i);
 110        return before;
 111}
 112
 113static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
 114{
 115        int before = sits_in_cursum(rs);
 116        rs->n_sits = cpu_to_le16(before + i);
 117        return before;
 118}
 119
 120/*
 121 * ioctl commands
 122 */
 123#define F2FS_IOC_GETFLAGS               FS_IOC_GETFLAGS
 124#define F2FS_IOC_SETFLAGS               FS_IOC_SETFLAGS
 125
 126#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 127/*
 128 * ioctl commands in 32 bit emulation
 129 */
 130#define F2FS_IOC32_GETFLAGS             FS_IOC32_GETFLAGS
 131#define F2FS_IOC32_SETFLAGS             FS_IOC32_SETFLAGS
 132#endif
 133
 134/*
 135 * For INODE and NODE manager
 136 */
 137#define XATTR_NODE_OFFSET       (-1)    /*
 138                                         * store xattrs to one node block per
 139                                         * file keeping -1 as its node offset to
 140                                         * distinguish from index node blocks.
 141                                         */
 142enum {
 143        ALLOC_NODE,                     /* allocate a new node page if needed */
 144        LOOKUP_NODE,                    /* look up a node without readahead */
 145        LOOKUP_NODE_RA,                 /*
 146                                         * look up a node with readahead called
 147                                         * by get_datablock_ro.
 148                                         */
 149};
 150
 151#define F2FS_LINK_MAX           32000   /* maximum link count per file */
 152
 153/* for in-memory extent cache entry */
 154struct extent_info {
 155        rwlock_t ext_lock;      /* rwlock for consistency */
 156        unsigned int fofs;      /* start offset in a file */
 157        u32 blk_addr;           /* start block address of the extent */
 158        unsigned int len;       /* length of the extent */
 159};
 160
 161/*
 162 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
 163 */
 164#define FADVISE_COLD_BIT        0x01
 165#define FADVISE_LOST_PINO_BIT   0x02
 166
 167struct f2fs_inode_info {
 168        struct inode vfs_inode;         /* serve a vfs inode */
 169        unsigned long i_flags;          /* keep an inode flags for ioctl */
 170        unsigned char i_advise;         /* use to give file attribute hints */
 171        unsigned int i_current_depth;   /* use only in directory structure */
 172        unsigned int i_pino;            /* parent inode number */
 173        umode_t i_acl_mode;             /* keep file acl mode temporarily */
 174
 175        /* Use below internally in f2fs*/
 176        unsigned long flags;            /* use to pass per-file flags */
 177        atomic_t dirty_dents;           /* # of dirty dentry pages */
 178        f2fs_hash_t chash;              /* hash value of given file name */
 179        unsigned int clevel;            /* maximum level of given file name */
 180        nid_t i_xattr_nid;              /* node id that contains xattrs */
 181        struct extent_info ext;         /* in-memory extent cache entry */
 182};
 183
 184static inline void get_extent_info(struct extent_info *ext,
 185                                        struct f2fs_extent i_ext)
 186{
 187        write_lock(&ext->ext_lock);
 188        ext->fofs = le32_to_cpu(i_ext.fofs);
 189        ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
 190        ext->len = le32_to_cpu(i_ext.len);
 191        write_unlock(&ext->ext_lock);
 192}
 193
 194static inline void set_raw_extent(struct extent_info *ext,
 195                                        struct f2fs_extent *i_ext)
 196{
 197        read_lock(&ext->ext_lock);
 198        i_ext->fofs = cpu_to_le32(ext->fofs);
 199        i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
 200        i_ext->len = cpu_to_le32(ext->len);
 201        read_unlock(&ext->ext_lock);
 202}
 203
 204struct f2fs_nm_info {
 205        block_t nat_blkaddr;            /* base disk address of NAT */
 206        nid_t max_nid;                  /* maximum possible node ids */
 207        nid_t next_scan_nid;            /* the next nid to be scanned */
 208
 209        /* NAT cache management */
 210        struct radix_tree_root nat_root;/* root of the nat entry cache */
 211        rwlock_t nat_tree_lock;         /* protect nat_tree_lock */
 212        unsigned int nat_cnt;           /* the # of cached nat entries */
 213        struct list_head nat_entries;   /* cached nat entry list (clean) */
 214        struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */
 215
 216        /* free node ids management */
 217        struct list_head free_nid_list; /* a list for free nids */
 218        spinlock_t free_nid_list_lock;  /* protect free nid list */
 219        unsigned int fcnt;              /* the number of free node id */
 220        struct mutex build_lock;        /* lock for build free nids */
 221
 222        /* for checkpoint */
 223        char *nat_bitmap;               /* NAT bitmap pointer */
 224        int bitmap_size;                /* bitmap size */
 225};
 226
 227/*
 228 * this structure is used as one of function parameters.
 229 * all the information are dedicated to a given direct node block determined
 230 * by the data offset in a file.
 231 */
 232struct dnode_of_data {
 233        struct inode *inode;            /* vfs inode pointer */
 234        struct page *inode_page;        /* its inode page, NULL is possible */
 235        struct page *node_page;         /* cached direct node page */
 236        nid_t nid;                      /* node id of the direct node block */
 237        unsigned int ofs_in_node;       /* data offset in the node page */
 238        bool inode_page_locked;         /* inode page is locked or not */
 239        block_t data_blkaddr;           /* block address of the node block */
 240};
 241
 242static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
 243                struct page *ipage, struct page *npage, nid_t nid)
 244{
 245        memset(dn, 0, sizeof(*dn));
 246        dn->inode = inode;
 247        dn->inode_page = ipage;
 248        dn->node_page = npage;
 249        dn->nid = nid;
 250}
 251
 252/*
 253 * For SIT manager
 254 *
 255 * By default, there are 6 active log areas across the whole main area.
 256 * When considering hot and cold data separation to reduce cleaning overhead,
 257 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
 258 * respectively.
 259 * In the current design, you should not change the numbers intentionally.
 260 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
 261 * logs individually according to the underlying devices. (default: 6)
 262 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
 263 * data and 8 for node logs.
 264 */
 265#define NR_CURSEG_DATA_TYPE     (3)
 266#define NR_CURSEG_NODE_TYPE     (3)
 267#define NR_CURSEG_TYPE  (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
 268
 269enum {
 270        CURSEG_HOT_DATA = 0,    /* directory entry blocks */
 271        CURSEG_WARM_DATA,       /* data blocks */
 272        CURSEG_COLD_DATA,       /* multimedia or GCed data blocks */
 273        CURSEG_HOT_NODE,        /* direct node blocks of directory files */
 274        CURSEG_WARM_NODE,       /* direct node blocks of normal files */
 275        CURSEG_COLD_NODE,       /* indirect node blocks */
 276        NO_CHECK_TYPE
 277};
 278
 279struct f2fs_sm_info {
 280        struct sit_info *sit_info;              /* whole segment information */
 281        struct free_segmap_info *free_info;     /* free segment information */
 282        struct dirty_seglist_info *dirty_info;  /* dirty segment information */
 283        struct curseg_info *curseg_array;       /* active segment information */
 284
 285        struct list_head wblist_head;   /* list of under-writeback pages */
 286        spinlock_t wblist_lock;         /* lock for checkpoint */
 287
 288        block_t seg0_blkaddr;           /* block address of 0'th segment */
 289        block_t main_blkaddr;           /* start block address of main area */
 290        block_t ssa_blkaddr;            /* start block address of SSA area */
 291
 292        unsigned int segment_count;     /* total # of segments */
 293        unsigned int main_segments;     /* # of segments in main area */
 294        unsigned int reserved_segments; /* # of reserved segments */
 295        unsigned int ovp_segments;      /* # of overprovision segments */
 296};
 297
 298/*
 299 * For directory operation
 300 */
 301#define NODE_DIR1_BLOCK         (ADDRS_PER_INODE + 1)
 302#define NODE_DIR2_BLOCK         (ADDRS_PER_INODE + 2)
 303#define NODE_IND1_BLOCK         (ADDRS_PER_INODE + 3)
 304#define NODE_IND2_BLOCK         (ADDRS_PER_INODE + 4)
 305#define NODE_DIND_BLOCK         (ADDRS_PER_INODE + 5)
 306
 307/*
 308 * For superblock
 309 */
 310/*
 311 * COUNT_TYPE for monitoring
 312 *
 313 * f2fs monitors the number of several block types such as on-writeback,
 314 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
 315 */
 316enum count_type {
 317        F2FS_WRITEBACK,
 318        F2FS_DIRTY_DENTS,
 319        F2FS_DIRTY_NODES,
 320        F2FS_DIRTY_META,
 321        NR_COUNT_TYPE,
 322};
 323
 324/*
 325 * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
 326 * The checkpoint procedure blocks all the locks in this fs_lock array.
 327 * Some FS operations grab free locks, and if there is no free lock,
 328 * then wait to grab a lock in a round-robin manner.
 329 */
 330#define NR_GLOBAL_LOCKS 8
 331
 332/*
 333 * The below are the page types of bios used in submti_bio().
 334 * The available types are:
 335 * DATA                 User data pages. It operates as async mode.
 336 * NODE                 Node pages. It operates as async mode.
 337 * META                 FS metadata pages such as SIT, NAT, CP.
 338 * NR_PAGE_TYPE         The number of page types.
 339 * META_FLUSH           Make sure the previous pages are written
 340 *                      with waiting the bio's completion
 341 * ...                  Only can be used with META.
 342 */
 343enum page_type {
 344        DATA,
 345        NODE,
 346        META,
 347        NR_PAGE_TYPE,
 348        META_FLUSH,
 349};
 350
 351struct f2fs_sb_info {
 352        struct super_block *sb;                 /* pointer to VFS super block */
 353        struct buffer_head *raw_super_buf;      /* buffer head of raw sb */
 354        struct f2fs_super_block *raw_super;     /* raw super block pointer */
 355        int s_dirty;                            /* dirty flag for checkpoint */
 356
 357        /* for node-related operations */
 358        struct f2fs_nm_info *nm_info;           /* node manager */
 359        struct inode *node_inode;               /* cache node blocks */
 360
 361        /* for segment-related operations */
 362        struct f2fs_sm_info *sm_info;           /* segment manager */
 363        struct bio *bio[NR_PAGE_TYPE];          /* bios to merge */
 364        sector_t last_block_in_bio[NR_PAGE_TYPE];       /* last block number */
 365        struct rw_semaphore bio_sem;            /* IO semaphore */
 366
 367        /* for checkpoint */
 368        struct f2fs_checkpoint *ckpt;           /* raw checkpoint pointer */
 369        struct inode *meta_inode;               /* cache meta blocks */
 370        struct mutex cp_mutex;                  /* checkpoint procedure lock */
 371        struct mutex fs_lock[NR_GLOBAL_LOCKS];  /* blocking FS operations */
 372        struct mutex node_write;                /* locking node writes */
 373        struct mutex writepages;                /* mutex for writepages() */
 374        unsigned char next_lock_num;            /* round-robin global locks */
 375        int por_doing;                          /* recovery is doing or not */
 376        int on_build_free_nids;                 /* build_free_nids is doing */
 377
 378        /* for orphan inode management */
 379        struct list_head orphan_inode_list;     /* orphan inode list */
 380        struct mutex orphan_inode_mutex;        /* for orphan inode list */
 381        unsigned int n_orphans;                 /* # of orphan inodes */
 382
 383        /* for directory inode management */
 384        struct list_head dir_inode_list;        /* dir inode list */
 385        spinlock_t dir_inode_lock;              /* for dir inode list lock */
 386
 387        /* basic file system units */
 388        unsigned int log_sectors_per_block;     /* log2 sectors per block */
 389        unsigned int log_blocksize;             /* log2 block size */
 390        unsigned int blocksize;                 /* block size */
 391        unsigned int root_ino_num;              /* root inode number*/
 392        unsigned int node_ino_num;              /* node inode number*/
 393        unsigned int meta_ino_num;              /* meta inode number*/
 394        unsigned int log_blocks_per_seg;        /* log2 blocks per segment */
 395        unsigned int blocks_per_seg;            /* blocks per segment */
 396        unsigned int segs_per_sec;              /* segments per section */
 397        unsigned int secs_per_zone;             /* sections per zone */
 398        unsigned int total_sections;            /* total section count */
 399        unsigned int total_node_count;          /* total node block count */
 400        unsigned int total_valid_node_count;    /* valid node block count */
 401        unsigned int total_valid_inode_count;   /* valid inode count */
 402        int active_logs;                        /* # of active logs */
 403
 404        block_t user_block_count;               /* # of user blocks */
 405        block_t total_valid_block_count;        /* # of valid blocks */
 406        block_t alloc_valid_block_count;        /* # of allocated blocks */
 407        block_t last_valid_block_count;         /* for recovery */
 408        u32 s_next_generation;                  /* for NFS support */
 409        atomic_t nr_pages[NR_COUNT_TYPE];       /* # of pages, see count_type */
 410
 411        struct f2fs_mount_info mount_opt;       /* mount options */
 412
 413        /* for cleaning operations */
 414        struct mutex gc_mutex;                  /* mutex for GC */
 415        struct f2fs_gc_kthread  *gc_thread;     /* GC thread */
 416        unsigned int cur_victim_sec;            /* current victim section num */
 417
 418        /*
 419         * for stat information.
 420         * one is for the LFS mode, and the other is for the SSR mode.
 421         */
 422#ifdef CONFIG_F2FS_STAT_FS
 423        struct f2fs_stat_info *stat_info;       /* FS status information */
 424        unsigned int segment_count[2];          /* # of allocated segments */
 425        unsigned int block_count[2];            /* # of allocated blocks */
 426        int total_hit_ext, read_hit_ext;        /* extent cache hit ratio */
 427        int bg_gc;                              /* background gc calls */
 428        unsigned int n_dirty_dirs;              /* # of dir inodes */
 429#endif
 430        unsigned int last_victim[2];            /* last victim segment # */
 431        spinlock_t stat_lock;                   /* lock for stat operations */
 432};
 433
 434/*
 435 * Inline functions
 436 */
 437static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
 438{
 439        return container_of(inode, struct f2fs_inode_info, vfs_inode);
 440}
 441
 442static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
 443{
 444        return sb->s_fs_info;
 445}
 446
 447static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
 448{
 449        return (struct f2fs_super_block *)(sbi->raw_super);
 450}
 451
 452static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
 453{
 454        return (struct f2fs_checkpoint *)(sbi->ckpt);
 455}
 456
 457static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
 458{
 459        return (struct f2fs_nm_info *)(sbi->nm_info);
 460}
 461
 462static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
 463{
 464        return (struct f2fs_sm_info *)(sbi->sm_info);
 465}
 466
 467static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
 468{
 469        return (struct sit_info *)(SM_I(sbi)->sit_info);
 470}
 471
 472static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
 473{
 474        return (struct free_segmap_info *)(SM_I(sbi)->free_info);
 475}
 476
 477static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
 478{
 479        return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
 480}
 481
 482static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
 483{
 484        sbi->s_dirty = 1;
 485}
 486
 487static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
 488{
 489        sbi->s_dirty = 0;
 490}
 491
 492static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 493{
 494        unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
 495        return ckpt_flags & f;
 496}
 497
 498static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 499{
 500        unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
 501        ckpt_flags |= f;
 502        cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 503}
 504
 505static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 506{
 507        unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
 508        ckpt_flags &= (~f);
 509        cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 510}
 511
 512static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
 513{
 514        int i;
 515
 516        for (i = 0; i < NR_GLOBAL_LOCKS; i++) {
 517                /*
 518                 * This is the only time we take multiple fs_lock[]
 519                 * instances; the order is immaterial since we
 520                 * always hold cp_mutex, which serializes multiple
 521                 * such operations.
 522                 */
 523                mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex);
 524        }
 525}
 526
 527static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
 528{
 529        int i = 0;
 530        for (; i < NR_GLOBAL_LOCKS; i++)
 531                mutex_unlock(&sbi->fs_lock[i]);
 532}
 533
 534static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
 535{
 536        unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
 537        int i = 0;
 538
 539        for (; i < NR_GLOBAL_LOCKS; i++)
 540                if (mutex_trylock(&sbi->fs_lock[i]))
 541                        return i;
 542
 543        mutex_lock(&sbi->fs_lock[next_lock]);
 544        sbi->next_lock_num++;
 545        return next_lock;
 546}
 547
 548static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
 549{
 550        if (ilock < 0)
 551                return;
 552        BUG_ON(ilock >= NR_GLOBAL_LOCKS);
 553        mutex_unlock(&sbi->fs_lock[ilock]);
 554}
 555
 556/*
 557 * Check whether the given nid is within node id range.
 558 */
 559static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
 560{
 561        WARN_ON((nid >= NM_I(sbi)->max_nid));
 562        if (nid >= NM_I(sbi)->max_nid)
 563                return -EINVAL;
 564        return 0;
 565}
 566
 567#define F2FS_DEFAULT_ALLOCATED_BLOCKS   1
 568
 569/*
 570 * Check whether the inode has blocks or not
 571 */
 572static inline int F2FS_HAS_BLOCKS(struct inode *inode)
 573{
 574        if (F2FS_I(inode)->i_xattr_nid)
 575                return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1);
 576        else
 577                return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS);
 578}
 579
 580static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
 581                                 struct inode *inode, blkcnt_t count)
 582{
 583        block_t valid_block_count;
 584
 585        spin_lock(&sbi->stat_lock);
 586        valid_block_count =
 587                sbi->total_valid_block_count + (block_t)count;
 588        if (valid_block_count > sbi->user_block_count) {
 589                spin_unlock(&sbi->stat_lock);
 590                return false;
 591        }
 592        inode->i_blocks += count;
 593        sbi->total_valid_block_count = valid_block_count;
 594        sbi->alloc_valid_block_count += (block_t)count;
 595        spin_unlock(&sbi->stat_lock);
 596        return true;
 597}
 598
 599static inline int dec_valid_block_count(struct f2fs_sb_info *sbi,
 600                                                struct inode *inode,
 601                                                blkcnt_t count)
 602{
 603        spin_lock(&sbi->stat_lock);
 604        BUG_ON(sbi->total_valid_block_count < (block_t) count);
 605        BUG_ON(inode->i_blocks < count);
 606        inode->i_blocks -= count;
 607        sbi->total_valid_block_count -= (block_t)count;
 608        spin_unlock(&sbi->stat_lock);
 609        return 0;
 610}
 611
 612static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
 613{
 614        atomic_inc(&sbi->nr_pages[count_type]);
 615        F2FS_SET_SB_DIRT(sbi);
 616}
 617
 618static inline void inode_inc_dirty_dents(struct inode *inode)
 619{
 620        atomic_inc(&F2FS_I(inode)->dirty_dents);
 621}
 622
 623static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
 624{
 625        atomic_dec(&sbi->nr_pages[count_type]);
 626}
 627
 628static inline void inode_dec_dirty_dents(struct inode *inode)
 629{
 630        atomic_dec(&F2FS_I(inode)->dirty_dents);
 631}
 632
 633static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
 634{
 635        return atomic_read(&sbi->nr_pages[count_type]);
 636}
 637
 638static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
 639{
 640        unsigned int pages_per_sec = sbi->segs_per_sec *
 641                                        (1 << sbi->log_blocks_per_seg);
 642        return ((get_pages(sbi, block_type) + pages_per_sec - 1)
 643                        >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
 644}
 645
 646static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
 647{
 648        block_t ret;
 649        spin_lock(&sbi->stat_lock);
 650        ret = sbi->total_valid_block_count;
 651        spin_unlock(&sbi->stat_lock);
 652        return ret;
 653}
 654
 655static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
 656{
 657        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 658
 659        /* return NAT or SIT bitmap */
 660        if (flag == NAT_BITMAP)
 661                return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
 662        else if (flag == SIT_BITMAP)
 663                return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
 664
 665        return 0;
 666}
 667
 668static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
 669{
 670        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 671        int offset = (flag == NAT_BITMAP) ?
 672                        le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
 673        return &ckpt->sit_nat_version_bitmap + offset;
 674}
 675
 676static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
 677{
 678        block_t start_addr;
 679        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 680        unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
 681
 682        start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
 683
 684        /*
 685         * odd numbered checkpoint should at cp segment 0
 686         * and even segent must be at cp segment 1
 687         */
 688        if (!(ckpt_version & 1))
 689                start_addr += sbi->blocks_per_seg;
 690
 691        return start_addr;
 692}
 693
 694static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
 695{
 696        return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
 697}
 698
 699static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
 700                                                struct inode *inode,
 701                                                unsigned int count)
 702{
 703        block_t valid_block_count;
 704        unsigned int valid_node_count;
 705
 706        spin_lock(&sbi->stat_lock);
 707
 708        valid_block_count = sbi->total_valid_block_count + (block_t)count;
 709        sbi->alloc_valid_block_count += (block_t)count;
 710        valid_node_count = sbi->total_valid_node_count + count;
 711
 712        if (valid_block_count > sbi->user_block_count) {
 713                spin_unlock(&sbi->stat_lock);
 714                return false;
 715        }
 716
 717        if (valid_node_count > sbi->total_node_count) {
 718                spin_unlock(&sbi->stat_lock);
 719                return false;
 720        }
 721
 722        if (inode)
 723                inode->i_blocks += count;
 724        sbi->total_valid_node_count = valid_node_count;
 725        sbi->total_valid_block_count = valid_block_count;
 726        spin_unlock(&sbi->stat_lock);
 727
 728        return true;
 729}
 730
 731static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
 732                                                struct inode *inode,
 733                                                unsigned int count)
 734{
 735        spin_lock(&sbi->stat_lock);
 736
 737        BUG_ON(sbi->total_valid_block_count < count);
 738        BUG_ON(sbi->total_valid_node_count < count);
 739        BUG_ON(inode->i_blocks < count);
 740
 741        inode->i_blocks -= count;
 742        sbi->total_valid_node_count -= count;
 743        sbi->total_valid_block_count -= (block_t)count;
 744
 745        spin_unlock(&sbi->stat_lock);
 746}
 747
 748static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
 749{
 750        unsigned int ret;
 751        spin_lock(&sbi->stat_lock);
 752        ret = sbi->total_valid_node_count;
 753        spin_unlock(&sbi->stat_lock);
 754        return ret;
 755}
 756
 757static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
 758{
 759        spin_lock(&sbi->stat_lock);
 760        BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
 761        sbi->total_valid_inode_count++;
 762        spin_unlock(&sbi->stat_lock);
 763}
 764
 765static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi)
 766{
 767        spin_lock(&sbi->stat_lock);
 768        BUG_ON(!sbi->total_valid_inode_count);
 769        sbi->total_valid_inode_count--;
 770        spin_unlock(&sbi->stat_lock);
 771        return 0;
 772}
 773
 774static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
 775{
 776        unsigned int ret;
 777        spin_lock(&sbi->stat_lock);
 778        ret = sbi->total_valid_inode_count;
 779        spin_unlock(&sbi->stat_lock);
 780        return ret;
 781}
 782
 783static inline void f2fs_put_page(struct page *page, int unlock)
 784{
 785        if (!page || IS_ERR(page))
 786                return;
 787
 788        if (unlock) {
 789                BUG_ON(!PageLocked(page));
 790                unlock_page(page);
 791        }
 792        page_cache_release(page);
 793}
 794
 795static inline void f2fs_put_dnode(struct dnode_of_data *dn)
 796{
 797        if (dn->node_page)
 798                f2fs_put_page(dn->node_page, 1);
 799        if (dn->inode_page && dn->node_page != dn->inode_page)
 800                f2fs_put_page(dn->inode_page, 0);
 801        dn->node_page = NULL;
 802        dn->inode_page = NULL;
 803}
 804
 805static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
 806                                        size_t size, void (*ctor)(void *))
 807{
 808        return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
 809}
 810
 811#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
 812
 813static inline bool IS_INODE(struct page *page)
 814{
 815        struct f2fs_node *p = (struct f2fs_node *)page_address(page);
 816        return RAW_IS_INODE(p);
 817}
 818
 819static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
 820{
 821        return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
 822}
 823
 824static inline block_t datablock_addr(struct page *node_page,
 825                unsigned int offset)
 826{
 827        struct f2fs_node *raw_node;
 828        __le32 *addr_array;
 829        raw_node = (struct f2fs_node *)page_address(node_page);
 830        addr_array = blkaddr_in_node(raw_node);
 831        return le32_to_cpu(addr_array[offset]);
 832}
 833
 834static inline int f2fs_test_bit(unsigned int nr, char *addr)
 835{
 836        int mask;
 837
 838        addr += (nr >> 3);
 839        mask = 1 << (7 - (nr & 0x07));
 840        return mask & *addr;
 841}
 842
 843static inline int f2fs_set_bit(unsigned int nr, char *addr)
 844{
 845        int mask;
 846        int ret;
 847
 848        addr += (nr >> 3);
 849        mask = 1 << (7 - (nr & 0x07));
 850        ret = mask & *addr;
 851        *addr |= mask;
 852        return ret;
 853}
 854
 855static inline int f2fs_clear_bit(unsigned int nr, char *addr)
 856{
 857        int mask;
 858        int ret;
 859
 860        addr += (nr >> 3);
 861        mask = 1 << (7 - (nr & 0x07));
 862        ret = mask & *addr;
 863        *addr &= ~mask;
 864        return ret;
 865}
 866
 867/* used for f2fs_inode_info->flags */
 868enum {
 869        FI_NEW_INODE,           /* indicate newly allocated inode */
 870        FI_DIRTY_INODE,         /* indicate inode is dirty or not */
 871        FI_INC_LINK,            /* need to increment i_nlink */
 872        FI_ACL_MODE,            /* indicate acl mode */
 873        FI_NO_ALLOC,            /* should not allocate any blocks */
 874        FI_UPDATE_DIR,          /* should update inode block for consistency */
 875        FI_DELAY_IPUT,          /* used for the recovery */
 876};
 877
 878static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
 879{
 880        set_bit(flag, &fi->flags);
 881}
 882
 883static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
 884{
 885        return test_bit(flag, &fi->flags);
 886}
 887
 888static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
 889{
 890        clear_bit(flag, &fi->flags);
 891}
 892
 893static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
 894{
 895        fi->i_acl_mode = mode;
 896        set_inode_flag(fi, FI_ACL_MODE);
 897}
 898
 899static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
 900{
 901        if (is_inode_flag_set(fi, FI_ACL_MODE)) {
 902                clear_inode_flag(fi, FI_ACL_MODE);
 903                return 1;
 904        }
 905        return 0;
 906}
 907
 908static inline int f2fs_readonly(struct super_block *sb)
 909{
 910        return sb->s_flags & MS_RDONLY;
 911}
 912
 913/*
 914 * file.c
 915 */
 916int f2fs_sync_file(struct file *, loff_t, loff_t, int);
 917void truncate_data_blocks(struct dnode_of_data *);
 918void f2fs_truncate(struct inode *);
 919int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 920int f2fs_setattr(struct dentry *, struct iattr *);
 921int truncate_hole(struct inode *, pgoff_t, pgoff_t);
 922int truncate_data_blocks_range(struct dnode_of_data *, int);
 923long f2fs_ioctl(struct file *, unsigned int, unsigned long);
 924long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
 925
 926/*
 927 * inode.c
 928 */
 929void f2fs_set_inode_flags(struct inode *);
 930struct inode *f2fs_iget(struct super_block *, unsigned long);
 931void update_inode(struct inode *, struct page *);
 932int update_inode_page(struct inode *);
 933int f2fs_write_inode(struct inode *, struct writeback_control *);
 934void f2fs_evict_inode(struct inode *);
 935
 936/*
 937 * namei.c
 938 */
 939struct dentry *f2fs_get_parent(struct dentry *child);
 940
 941/*
 942 * dir.c
 943 */
 944struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
 945                                                        struct page **);
 946struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
 947ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
 948void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
 949                                struct page *, struct inode *);
 950int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
 951void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
 952int f2fs_make_empty(struct inode *, struct inode *);
 953bool f2fs_empty_dir(struct inode *);
 954
 955static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
 956{
 957        return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
 958                                inode);
 959}
 960
 961/*
 962 * super.c
 963 */
 964int f2fs_sync_fs(struct super_block *, int);
 965extern __printf(3, 4)
 966void f2fs_msg(struct super_block *, const char *, const char *, ...);
 967
 968/*
 969 * hash.c
 970 */
 971f2fs_hash_t f2fs_dentry_hash(const char *, size_t);
 972
 973/*
 974 * node.c
 975 */
 976struct dnode_of_data;
 977struct node_info;
 978
 979int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
 980void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
 981int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
 982int truncate_inode_blocks(struct inode *, pgoff_t);
 983int remove_inode_page(struct inode *);
 984struct page *new_inode_page(struct inode *, const struct qstr *);
 985struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
 986void ra_node_page(struct f2fs_sb_info *, nid_t);
 987struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
 988struct page *get_node_page_ra(struct page *, int);
 989void sync_inode_page(struct dnode_of_data *);
 990int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
 991bool alloc_nid(struct f2fs_sb_info *, nid_t *);
 992void alloc_nid_done(struct f2fs_sb_info *, nid_t);
 993void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
 994void recover_node_page(struct f2fs_sb_info *, struct page *,
 995                struct f2fs_summary *, struct node_info *, block_t);
 996int recover_inode_page(struct f2fs_sb_info *, struct page *);
 997int restore_node_summary(struct f2fs_sb_info *, unsigned int,
 998                                struct f2fs_summary_block *);
 999void flush_nat_entries(struct f2fs_sb_info *);
1000int build_node_manager(struct f2fs_sb_info *);
1001void destroy_node_manager(struct f2fs_sb_info *);
1002int __init create_node_manager_caches(void);
1003void destroy_node_manager_caches(void);
1004
1005/*
1006 * segment.c
1007 */
1008void f2fs_balance_fs(struct f2fs_sb_info *);
1009void invalidate_blocks(struct f2fs_sb_info *, block_t);
1010void clear_prefree_segments(struct f2fs_sb_info *);
1011int npages_for_summary_flush(struct f2fs_sb_info *);
1012void allocate_new_segments(struct f2fs_sb_info *);
1013struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
1014struct bio *f2fs_bio_alloc(struct block_device *, int);
1015void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
1016void write_meta_page(struct f2fs_sb_info *, struct page *);
1017void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
1018                                        block_t, block_t *);
1019void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
1020                                        block_t, block_t *);
1021void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t);
1022void recover_data_page(struct f2fs_sb_info *, struct page *,
1023                                struct f2fs_summary *, block_t, block_t);
1024void rewrite_node_page(struct f2fs_sb_info *, struct page *,
1025                                struct f2fs_summary *, block_t, block_t);
1026void write_data_summaries(struct f2fs_sb_info *, block_t);
1027void write_node_summaries(struct f2fs_sb_info *, block_t);
1028int lookup_journal_in_cursum(struct f2fs_summary_block *,
1029                                        int, unsigned int, int);
1030void flush_sit_entries(struct f2fs_sb_info *);
1031int build_segment_manager(struct f2fs_sb_info *);
1032void destroy_segment_manager(struct f2fs_sb_info *);
1033
1034/*
1035 * checkpoint.c
1036 */
1037struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
1038struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
1039long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
1040int check_orphan_space(struct f2fs_sb_info *);
1041void add_orphan_inode(struct f2fs_sb_info *, nid_t);
1042void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
1043int recover_orphan_inodes(struct f2fs_sb_info *);
1044int get_valid_checkpoint(struct f2fs_sb_info *);
1045void set_dirty_dir_page(struct inode *, struct page *);
1046void add_dirty_dir_inode(struct inode *);
1047void remove_dirty_dir_inode(struct inode *);
1048struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t);
1049void sync_dirty_dir_inodes(struct f2fs_sb_info *);
1050void write_checkpoint(struct f2fs_sb_info *, bool);
1051void init_orphan_info(struct f2fs_sb_info *);
1052int __init create_checkpoint_caches(void);
1053void destroy_checkpoint_caches(void);
1054
1055/*
1056 * data.c
1057 */
1058int reserve_new_block(struct dnode_of_data *);
1059void update_extent_cache(block_t, struct dnode_of_data *);
1060struct page *find_data_page(struct inode *, pgoff_t, bool);
1061struct page *get_lock_data_page(struct inode *, pgoff_t);
1062struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
1063int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
1064int do_write_data_page(struct page *);
1065
1066/*
1067 * gc.c
1068 */
1069int start_gc_thread(struct f2fs_sb_info *);
1070void stop_gc_thread(struct f2fs_sb_info *);
1071block_t start_bidx_of_node(unsigned int);
1072int f2fs_gc(struct f2fs_sb_info *);
1073void build_gc_manager(struct f2fs_sb_info *);
1074int __init create_gc_caches(void);
1075void destroy_gc_caches(void);
1076
1077/*
1078 * recovery.c
1079 */
1080int recover_fsync_data(struct f2fs_sb_info *);
1081bool space_for_roll_forward(struct f2fs_sb_info *);
1082
1083/*
1084 * debug.c
1085 */
1086#ifdef CONFIG_F2FS_STAT_FS
1087struct f2fs_stat_info {
1088        struct list_head stat_list;
1089        struct f2fs_sb_info *sbi;
1090        struct mutex stat_lock;
1091        int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
1092        int main_area_segs, main_area_sections, main_area_zones;
1093        int hit_ext, total_ext;
1094        int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
1095        int nats, sits, fnids;
1096        int total_count, utilization;
1097        int bg_gc;
1098        unsigned int valid_count, valid_node_count, valid_inode_count;
1099        unsigned int bimodal, avg_vblocks;
1100        int util_free, util_valid, util_invalid;
1101        int rsvd_segs, overp_segs;
1102        int dirty_count, node_pages, meta_pages;
1103        int prefree_count, call_count;
1104        int tot_segs, node_segs, data_segs, free_segs, free_secs;
1105        int tot_blks, data_blks, node_blks;
1106        int curseg[NR_CURSEG_TYPE];
1107        int cursec[NR_CURSEG_TYPE];
1108        int curzone[NR_CURSEG_TYPE];
1109
1110        unsigned int segment_count[2];
1111        unsigned int block_count[2];
1112        unsigned base_mem, cache_mem;
1113};
1114
1115#define stat_inc_call_count(si) ((si)->call_count++)
1116
1117#define stat_inc_seg_count(sbi, type)                                   \
1118        do {                                                            \
1119                struct f2fs_stat_info *si = sbi->stat_info;             \
1120                (si)->tot_segs++;                                       \
1121                if (type == SUM_TYPE_DATA)                              \
1122                        si->data_segs++;                                \
1123                else                                                    \
1124                        si->node_segs++;                                \
1125        } while (0)
1126
1127#define stat_inc_tot_blk_count(si, blks)                                \
1128        (si->tot_blks += (blks))
1129
1130#define stat_inc_data_blk_count(sbi, blks)                              \
1131        do {                                                            \
1132                struct f2fs_stat_info *si = sbi->stat_info;             \
1133                stat_inc_tot_blk_count(si, blks);                       \
1134                si->data_blks += (blks);                                \
1135        } while (0)
1136
1137#define stat_inc_node_blk_count(sbi, blks)                              \
1138        do {                                                            \
1139                struct f2fs_stat_info *si = sbi->stat_info;             \
1140                stat_inc_tot_blk_count(si, blks);                       \
1141                si->node_blks += (blks);                                \
1142        } while (0)
1143
1144int f2fs_build_stats(struct f2fs_sb_info *);
1145void f2fs_destroy_stats(struct f2fs_sb_info *);
1146void __init f2fs_create_root_stats(void);
1147void f2fs_destroy_root_stats(void);
1148#else
1149#define stat_inc_call_count(si)
1150#define stat_inc_seg_count(si, type)
1151#define stat_inc_tot_blk_count(si, blks)
1152#define stat_inc_data_blk_count(si, blks)
1153#define stat_inc_node_blk_count(sbi, blks)
1154
1155static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
1156static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
1157static inline void __init f2fs_create_root_stats(void) { }
1158static inline void f2fs_destroy_root_stats(void) { }
1159#endif
1160
1161extern const struct file_operations f2fs_dir_operations;
1162extern const struct file_operations f2fs_file_operations;
1163extern const struct inode_operations f2fs_file_inode_operations;
1164extern const struct address_space_operations f2fs_dblock_aops;
1165extern const struct address_space_operations f2fs_node_aops;
1166extern const struct address_space_operations f2fs_meta_aops;
1167extern const struct inode_operations f2fs_dir_inode_operations;
1168extern const struct inode_operations f2fs_symlink_inode_operations;
1169extern const struct inode_operations f2fs_special_inode_operations;
1170#endif
1171