linux/fs/f2fs/node.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/node.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/mpage.h>
  14#include <linux/backing-dev.h>
  15#include <linux/blkdev.h>
  16#include <linux/pagevec.h>
  17#include <linux/swap.h>
  18
  19#include "f2fs.h"
  20#include "node.h"
  21#include "segment.h"
  22#include "trace.h"
  23#include <trace/events/f2fs.h>
  24
  25#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
  26
  27static struct kmem_cache *nat_entry_slab;
  28static struct kmem_cache *free_nid_slab;
  29static struct kmem_cache *nat_entry_set_slab;
  30
  31bool available_free_memory(struct f2fs_sb_info *sbi, int type)
  32{
  33        struct f2fs_nm_info *nm_i = NM_I(sbi);
  34        struct sysinfo val;
  35        unsigned long avail_ram;
  36        unsigned long mem_size = 0;
  37        bool res = false;
  38
  39        si_meminfo(&val);
  40
  41        /* only uses low memory */
  42        avail_ram = val.totalram - val.totalhigh;
  43
  44        /*
  45         * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
  46         */
  47        if (type == FREE_NIDS) {
  48                mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
  49                                                        PAGE_SHIFT;
  50                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
  51        } else if (type == NAT_ENTRIES) {
  52                mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
  53                                                        PAGE_SHIFT;
  54                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
  55        } else if (type == DIRTY_DENTS) {
  56                if (sbi->sb->s_bdi->wb.dirty_exceeded)
  57                        return false;
  58                mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
  59                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
  60        } else if (type == INO_ENTRIES) {
  61                int i;
  62
  63                for (i = 0; i <= UPDATE_INO; i++)
  64                        mem_size += (sbi->im[i].ino_num *
  65                                sizeof(struct ino_entry)) >> PAGE_SHIFT;
  66                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
  67        } else if (type == EXTENT_CACHE) {
  68                mem_size = (atomic_read(&sbi->total_ext_tree) *
  69                                sizeof(struct extent_tree) +
  70                                atomic_read(&sbi->total_ext_node) *
  71                                sizeof(struct extent_node)) >> PAGE_SHIFT;
  72                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
  73        } else {
  74                if (!sbi->sb->s_bdi->wb.dirty_exceeded)
  75                        return true;
  76        }
  77        return res;
  78}
  79
  80static void clear_node_page_dirty(struct page *page)
  81{
  82        struct address_space *mapping = page->mapping;
  83        unsigned int long flags;
  84
  85        if (PageDirty(page)) {
  86                spin_lock_irqsave(&mapping->tree_lock, flags);
  87                radix_tree_tag_clear(&mapping->page_tree,
  88                                page_index(page),
  89                                PAGECACHE_TAG_DIRTY);
  90                spin_unlock_irqrestore(&mapping->tree_lock, flags);
  91
  92                clear_page_dirty_for_io(page);
  93                dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
  94        }
  95        ClearPageUptodate(page);
  96}
  97
  98static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
  99{
 100        pgoff_t index = current_nat_addr(sbi, nid);
 101        return get_meta_page(sbi, index);
 102}
 103
 104static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 105{
 106        struct page *src_page;
 107        struct page *dst_page;
 108        pgoff_t src_off;
 109        pgoff_t dst_off;
 110        void *src_addr;
 111        void *dst_addr;
 112        struct f2fs_nm_info *nm_i = NM_I(sbi);
 113
 114        src_off = current_nat_addr(sbi, nid);
 115        dst_off = next_nat_addr(sbi, src_off);
 116
 117        /* get current nat block page with lock */
 118        src_page = get_meta_page(sbi, src_off);
 119        dst_page = grab_meta_page(sbi, dst_off);
 120        f2fs_bug_on(sbi, PageDirty(src_page));
 121
 122        src_addr = page_address(src_page);
 123        dst_addr = page_address(dst_page);
 124        memcpy(dst_addr, src_addr, PAGE_SIZE);
 125        set_page_dirty(dst_page);
 126        f2fs_put_page(src_page, 1);
 127
 128        set_to_next_nat(nm_i, nid);
 129
 130        return dst_page;
 131}
 132
 133static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
 134{
 135        return radix_tree_lookup(&nm_i->nat_root, n);
 136}
 137
 138static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
 139                nid_t start, unsigned int nr, struct nat_entry **ep)
 140{
 141        return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
 142}
 143
 144static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
 145{
 146        list_del(&e->list);
 147        radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
 148        nm_i->nat_cnt--;
 149        kmem_cache_free(nat_entry_slab, e);
 150}
 151
 152static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
 153                                                struct nat_entry *ne)
 154{
 155        nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
 156        struct nat_entry_set *head;
 157
 158        if (get_nat_flag(ne, IS_DIRTY))
 159                return;
 160
 161        head = radix_tree_lookup(&nm_i->nat_set_root, set);
 162        if (!head) {
 163                head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
 164
 165                INIT_LIST_HEAD(&head->entry_list);
 166                INIT_LIST_HEAD(&head->set_list);
 167                head->set = set;
 168                head->entry_cnt = 0;
 169                f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
 170        }
 171        list_move_tail(&ne->list, &head->entry_list);
 172        nm_i->dirty_nat_cnt++;
 173        head->entry_cnt++;
 174        set_nat_flag(ne, IS_DIRTY, true);
 175}
 176
 177static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
 178                                                struct nat_entry *ne)
 179{
 180        nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
 181        struct nat_entry_set *head;
 182
 183        head = radix_tree_lookup(&nm_i->nat_set_root, set);
 184        if (head) {
 185                list_move_tail(&ne->list, &nm_i->nat_entries);
 186                set_nat_flag(ne, IS_DIRTY, false);
 187                head->entry_cnt--;
 188                nm_i->dirty_nat_cnt--;
 189        }
 190}
 191
 192static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
 193                nid_t start, unsigned int nr, struct nat_entry_set **ep)
 194{
 195        return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
 196                                                        start, nr);
 197}
 198
 199int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
 200{
 201        struct f2fs_nm_info *nm_i = NM_I(sbi);
 202        struct nat_entry *e;
 203        bool need = false;
 204
 205        down_read(&nm_i->nat_tree_lock);
 206        e = __lookup_nat_cache(nm_i, nid);
 207        if (e) {
 208                if (!get_nat_flag(e, IS_CHECKPOINTED) &&
 209                                !get_nat_flag(e, HAS_FSYNCED_INODE))
 210                        need = true;
 211        }
 212        up_read(&nm_i->nat_tree_lock);
 213        return need;
 214}
 215
 216bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
 217{
 218        struct f2fs_nm_info *nm_i = NM_I(sbi);
 219        struct nat_entry *e;
 220        bool is_cp = true;
 221
 222        down_read(&nm_i->nat_tree_lock);
 223        e = __lookup_nat_cache(nm_i, nid);
 224        if (e && !get_nat_flag(e, IS_CHECKPOINTED))
 225                is_cp = false;
 226        up_read(&nm_i->nat_tree_lock);
 227        return is_cp;
 228}
 229
 230bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
 231{
 232        struct f2fs_nm_info *nm_i = NM_I(sbi);
 233        struct nat_entry *e;
 234        bool need_update = true;
 235
 236        down_read(&nm_i->nat_tree_lock);
 237        e = __lookup_nat_cache(nm_i, ino);
 238        if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
 239                        (get_nat_flag(e, IS_CHECKPOINTED) ||
 240                         get_nat_flag(e, HAS_FSYNCED_INODE)))
 241                need_update = false;
 242        up_read(&nm_i->nat_tree_lock);
 243        return need_update;
 244}
 245
 246static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
 247{
 248        struct nat_entry *new;
 249
 250        new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
 251        f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
 252        memset(new, 0, sizeof(struct nat_entry));
 253        nat_set_nid(new, nid);
 254        nat_reset_flag(new);
 255        list_add_tail(&new->list, &nm_i->nat_entries);
 256        nm_i->nat_cnt++;
 257        return new;
 258}
 259
 260static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
 261                                                struct f2fs_nat_entry *ne)
 262{
 263        struct f2fs_nm_info *nm_i = NM_I(sbi);
 264        struct nat_entry *e;
 265
 266        e = __lookup_nat_cache(nm_i, nid);
 267        if (!e) {
 268                e = grab_nat_entry(nm_i, nid);
 269                node_info_from_raw_nat(&e->ni, ne);
 270        } else {
 271                f2fs_bug_on(sbi, nat_get_ino(e) != ne->ino ||
 272                                nat_get_blkaddr(e) != ne->block_addr ||
 273                                nat_get_version(e) != ne->version);
 274        }
 275}
 276
 277static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
 278                        block_t new_blkaddr, bool fsync_done)
 279{
 280        struct f2fs_nm_info *nm_i = NM_I(sbi);
 281        struct nat_entry *e;
 282
 283        down_write(&nm_i->nat_tree_lock);
 284        e = __lookup_nat_cache(nm_i, ni->nid);
 285        if (!e) {
 286                e = grab_nat_entry(nm_i, ni->nid);
 287                copy_node_info(&e->ni, ni);
 288                f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
 289        } else if (new_blkaddr == NEW_ADDR) {
 290                /*
 291                 * when nid is reallocated,
 292                 * previous nat entry can be remained in nat cache.
 293                 * So, reinitialize it with new information.
 294                 */
 295                copy_node_info(&e->ni, ni);
 296                f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
 297        }
 298
 299        /* sanity check */
 300        f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
 301        f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
 302                        new_blkaddr == NULL_ADDR);
 303        f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
 304                        new_blkaddr == NEW_ADDR);
 305        f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
 306                        nat_get_blkaddr(e) != NULL_ADDR &&
 307                        new_blkaddr == NEW_ADDR);
 308
 309        /* increment version no as node is removed */
 310        if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
 311                unsigned char version = nat_get_version(e);
 312                nat_set_version(e, inc_node_version(version));
 313
 314                /* in order to reuse the nid */
 315                if (nm_i->next_scan_nid > ni->nid)
 316                        nm_i->next_scan_nid = ni->nid;
 317        }
 318
 319        /* change address */
 320        nat_set_blkaddr(e, new_blkaddr);
 321        if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
 322                set_nat_flag(e, IS_CHECKPOINTED, false);
 323        __set_nat_cache_dirty(nm_i, e);
 324
 325        /* update fsync_mark if its inode nat entry is still alive */
 326        if (ni->nid != ni->ino)
 327                e = __lookup_nat_cache(nm_i, ni->ino);
 328        if (e) {
 329                if (fsync_done && ni->nid == ni->ino)
 330                        set_nat_flag(e, HAS_FSYNCED_INODE, true);
 331                set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
 332        }
 333        up_write(&nm_i->nat_tree_lock);
 334}
 335
 336int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 337{
 338        struct f2fs_nm_info *nm_i = NM_I(sbi);
 339        int nr = nr_shrink;
 340
 341        if (!down_write_trylock(&nm_i->nat_tree_lock))
 342                return 0;
 343
 344        while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
 345                struct nat_entry *ne;
 346                ne = list_first_entry(&nm_i->nat_entries,
 347                                        struct nat_entry, list);
 348                __del_from_nat_cache(nm_i, ne);
 349                nr_shrink--;
 350        }
 351        up_write(&nm_i->nat_tree_lock);
 352        return nr - nr_shrink;
 353}
 354
 355/*
 356 * This function always returns success
 357 */
 358void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
 359{
 360        struct f2fs_nm_info *nm_i = NM_I(sbi);
 361        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
 362        struct f2fs_journal *journal = curseg->journal;
 363        nid_t start_nid = START_NID(nid);
 364        struct f2fs_nat_block *nat_blk;
 365        struct page *page = NULL;
 366        struct f2fs_nat_entry ne;
 367        struct nat_entry *e;
 368        int i;
 369
 370        ni->nid = nid;
 371
 372        /* Check nat cache */
 373        down_read(&nm_i->nat_tree_lock);
 374        e = __lookup_nat_cache(nm_i, nid);
 375        if (e) {
 376                ni->ino = nat_get_ino(e);
 377                ni->blk_addr = nat_get_blkaddr(e);
 378                ni->version = nat_get_version(e);
 379                up_read(&nm_i->nat_tree_lock);
 380                return;
 381        }
 382
 383        memset(&ne, 0, sizeof(struct f2fs_nat_entry));
 384
 385        /* Check current segment summary */
 386        down_read(&curseg->journal_rwsem);
 387        i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
 388        if (i >= 0) {
 389                ne = nat_in_journal(journal, i);
 390                node_info_from_raw_nat(ni, &ne);
 391        }
 392        up_read(&curseg->journal_rwsem);
 393        if (i >= 0)
 394                goto cache;
 395
 396        /* Fill node_info from nat page */
 397        page = get_current_nat_page(sbi, start_nid);
 398        nat_blk = (struct f2fs_nat_block *)page_address(page);
 399        ne = nat_blk->entries[nid - start_nid];
 400        node_info_from_raw_nat(ni, &ne);
 401        f2fs_put_page(page, 1);
 402cache:
 403        up_read(&nm_i->nat_tree_lock);
 404        /* cache nat entry */
 405        down_write(&nm_i->nat_tree_lock);
 406        cache_nat_entry(sbi, nid, &ne);
 407        up_write(&nm_i->nat_tree_lock);
 408}
 409
 410/*
 411 * readahead MAX_RA_NODE number of node pages.
 412 */
 413static void ra_node_pages(struct page *parent, int start, int n)
 414{
 415        struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
 416        struct blk_plug plug;
 417        int i, end;
 418        nid_t nid;
 419
 420        blk_start_plug(&plug);
 421
 422        /* Then, try readahead for siblings of the desired node */
 423        end = start + n;
 424        end = min(end, NIDS_PER_BLOCK);
 425        for (i = start; i < end; i++) {
 426                nid = get_nid(parent, i, false);
 427                ra_node_page(sbi, nid);
 428        }
 429
 430        blk_finish_plug(&plug);
 431}
 432
 433pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
 434{
 435        const long direct_index = ADDRS_PER_INODE(dn->inode);
 436        const long direct_blks = ADDRS_PER_BLOCK;
 437        const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
 438        unsigned int skipped_unit = ADDRS_PER_BLOCK;
 439        int cur_level = dn->cur_level;
 440        int max_level = dn->max_level;
 441        pgoff_t base = 0;
 442
 443        if (!dn->max_level)
 444                return pgofs + 1;
 445
 446        while (max_level-- > cur_level)
 447                skipped_unit *= NIDS_PER_BLOCK;
 448
 449        switch (dn->max_level) {
 450        case 3:
 451                base += 2 * indirect_blks;
 452        case 2:
 453                base += 2 * direct_blks;
 454        case 1:
 455                base += direct_index;
 456                break;
 457        default:
 458                f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
 459        }
 460
 461        return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
 462}
 463
 464/*
 465 * The maximum depth is four.
 466 * Offset[0] will have raw inode offset.
 467 */
 468static int get_node_path(struct inode *inode, long block,
 469                                int offset[4], unsigned int noffset[4])
 470{
 471        const long direct_index = ADDRS_PER_INODE(inode);
 472        const long direct_blks = ADDRS_PER_BLOCK;
 473        const long dptrs_per_blk = NIDS_PER_BLOCK;
 474        const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
 475        const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
 476        int n = 0;
 477        int level = 0;
 478
 479        noffset[0] = 0;
 480
 481        if (block < direct_index) {
 482                offset[n] = block;
 483                goto got;
 484        }
 485        block -= direct_index;
 486        if (block < direct_blks) {
 487                offset[n++] = NODE_DIR1_BLOCK;
 488                noffset[n] = 1;
 489                offset[n] = block;
 490                level = 1;
 491                goto got;
 492        }
 493        block -= direct_blks;
 494        if (block < direct_blks) {
 495                offset[n++] = NODE_DIR2_BLOCK;
 496                noffset[n] = 2;
 497                offset[n] = block;
 498                level = 1;
 499                goto got;
 500        }
 501        block -= direct_blks;
 502        if (block < indirect_blks) {
 503                offset[n++] = NODE_IND1_BLOCK;
 504                noffset[n] = 3;
 505                offset[n++] = block / direct_blks;
 506                noffset[n] = 4 + offset[n - 1];
 507                offset[n] = block % direct_blks;
 508                level = 2;
 509                goto got;
 510        }
 511        block -= indirect_blks;
 512        if (block < indirect_blks) {
 513                offset[n++] = NODE_IND2_BLOCK;
 514                noffset[n] = 4 + dptrs_per_blk;
 515                offset[n++] = block / direct_blks;
 516                noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
 517                offset[n] = block % direct_blks;
 518                level = 2;
 519                goto got;
 520        }
 521        block -= indirect_blks;
 522        if (block < dindirect_blks) {
 523                offset[n++] = NODE_DIND_BLOCK;
 524                noffset[n] = 5 + (dptrs_per_blk * 2);
 525                offset[n++] = block / indirect_blks;
 526                noffset[n] = 6 + (dptrs_per_blk * 2) +
 527                              offset[n - 1] * (dptrs_per_blk + 1);
 528                offset[n++] = (block / direct_blks) % dptrs_per_blk;
 529                noffset[n] = 7 + (dptrs_per_blk * 2) +
 530                              offset[n - 2] * (dptrs_per_blk + 1) +
 531                              offset[n - 1];
 532                offset[n] = block % direct_blks;
 533                level = 3;
 534                goto got;
 535        } else {
 536                BUG();
 537        }
 538got:
 539        return level;
 540}
 541
 542/*
 543 * Caller should call f2fs_put_dnode(dn).
 544 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
 545 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
 546 * In the case of RDONLY_NODE, we don't need to care about mutex.
 547 */
 548int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 549{
 550        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 551        struct page *npage[4];
 552        struct page *parent = NULL;
 553        int offset[4];
 554        unsigned int noffset[4];
 555        nid_t nids[4];
 556        int level, i = 0;
 557        int err = 0;
 558
 559        level = get_node_path(dn->inode, index, offset, noffset);
 560
 561        nids[0] = dn->inode->i_ino;
 562        npage[0] = dn->inode_page;
 563
 564        if (!npage[0]) {
 565                npage[0] = get_node_page(sbi, nids[0]);
 566                if (IS_ERR(npage[0]))
 567                        return PTR_ERR(npage[0]);
 568        }
 569
 570        /* if inline_data is set, should not report any block indices */
 571        if (f2fs_has_inline_data(dn->inode) && index) {
 572                err = -ENOENT;
 573                f2fs_put_page(npage[0], 1);
 574                goto release_out;
 575        }
 576
 577        parent = npage[0];
 578        if (level != 0)
 579                nids[1] = get_nid(parent, offset[0], true);
 580        dn->inode_page = npage[0];
 581        dn->inode_page_locked = true;
 582
 583        /* get indirect or direct nodes */
 584        for (i = 1; i <= level; i++) {
 585                bool done = false;
 586
 587                if (!nids[i] && mode == ALLOC_NODE) {
 588                        /* alloc new node */
 589                        if (!alloc_nid(sbi, &(nids[i]))) {
 590                                err = -ENOSPC;
 591                                goto release_pages;
 592                        }
 593
 594                        dn->nid = nids[i];
 595                        npage[i] = new_node_page(dn, noffset[i], NULL);
 596                        if (IS_ERR(npage[i])) {
 597                                alloc_nid_failed(sbi, nids[i]);
 598                                err = PTR_ERR(npage[i]);
 599                                goto release_pages;
 600                        }
 601
 602                        set_nid(parent, offset[i - 1], nids[i], i == 1);
 603                        alloc_nid_done(sbi, nids[i]);
 604                        done = true;
 605                } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
 606                        npage[i] = get_node_page_ra(parent, offset[i - 1]);
 607                        if (IS_ERR(npage[i])) {
 608                                err = PTR_ERR(npage[i]);
 609                                goto release_pages;
 610                        }
 611                        done = true;
 612                }
 613                if (i == 1) {
 614                        dn->inode_page_locked = false;
 615                        unlock_page(parent);
 616                } else {
 617                        f2fs_put_page(parent, 1);
 618                }
 619
 620                if (!done) {
 621                        npage[i] = get_node_page(sbi, nids[i]);
 622                        if (IS_ERR(npage[i])) {
 623                                err = PTR_ERR(npage[i]);
 624                                f2fs_put_page(npage[0], 0);
 625                                goto release_out;
 626                        }
 627                }
 628                if (i < level) {
 629                        parent = npage[i];
 630                        nids[i + 1] = get_nid(parent, offset[i], false);
 631                }
 632        }
 633        dn->nid = nids[level];
 634        dn->ofs_in_node = offset[level];
 635        dn->node_page = npage[level];
 636        dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
 637        return 0;
 638
 639release_pages:
 640        f2fs_put_page(parent, 1);
 641        if (i > 1)
 642                f2fs_put_page(npage[0], 0);
 643release_out:
 644        dn->inode_page = NULL;
 645        dn->node_page = NULL;
 646        if (err == -ENOENT) {
 647                dn->cur_level = i;
 648                dn->max_level = level;
 649        }
 650        return err;
 651}
 652
 653static void truncate_node(struct dnode_of_data *dn)
 654{
 655        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 656        struct node_info ni;
 657
 658        get_node_info(sbi, dn->nid, &ni);
 659        if (dn->inode->i_blocks == 0) {
 660                f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR);
 661                goto invalidate;
 662        }
 663        f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
 664
 665        /* Deallocate node address */
 666        invalidate_blocks(sbi, ni.blk_addr);
 667        dec_valid_node_count(sbi, dn->inode);
 668        set_node_addr(sbi, &ni, NULL_ADDR, false);
 669
 670        if (dn->nid == dn->inode->i_ino) {
 671                remove_orphan_inode(sbi, dn->nid);
 672                dec_valid_inode_count(sbi);
 673        } else {
 674                sync_inode_page(dn);
 675        }
 676invalidate:
 677        clear_node_page_dirty(dn->node_page);
 678        set_sbi_flag(sbi, SBI_IS_DIRTY);
 679
 680        f2fs_put_page(dn->node_page, 1);
 681
 682        invalidate_mapping_pages(NODE_MAPPING(sbi),
 683                        dn->node_page->index, dn->node_page->index);
 684
 685        dn->node_page = NULL;
 686        trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
 687}
 688
 689static int truncate_dnode(struct dnode_of_data *dn)
 690{
 691        struct page *page;
 692
 693        if (dn->nid == 0)
 694                return 1;
 695
 696        /* get direct node */
 697        page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
 698        if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
 699                return 1;
 700        else if (IS_ERR(page))
 701                return PTR_ERR(page);
 702
 703        /* Make dnode_of_data for parameter */
 704        dn->node_page = page;
 705        dn->ofs_in_node = 0;
 706        truncate_data_blocks(dn);
 707        truncate_node(dn);
 708        return 1;
 709}
 710
 711static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
 712                                                int ofs, int depth)
 713{
 714        struct dnode_of_data rdn = *dn;
 715        struct page *page;
 716        struct f2fs_node *rn;
 717        nid_t child_nid;
 718        unsigned int child_nofs;
 719        int freed = 0;
 720        int i, ret;
 721
 722        if (dn->nid == 0)
 723                return NIDS_PER_BLOCK + 1;
 724
 725        trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
 726
 727        page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
 728        if (IS_ERR(page)) {
 729                trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
 730                return PTR_ERR(page);
 731        }
 732
 733        ra_node_pages(page, ofs, NIDS_PER_BLOCK);
 734
 735        rn = F2FS_NODE(page);
 736        if (depth < 3) {
 737                for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
 738                        child_nid = le32_to_cpu(rn->in.nid[i]);
 739                        if (child_nid == 0)
 740                                continue;
 741                        rdn.nid = child_nid;
 742                        ret = truncate_dnode(&rdn);
 743                        if (ret < 0)
 744                                goto out_err;
 745                        if (set_nid(page, i, 0, false))
 746                                dn->node_changed = true;
 747                }
 748        } else {
 749                child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
 750                for (i = ofs; i < NIDS_PER_BLOCK; i++) {
 751                        child_nid = le32_to_cpu(rn->in.nid[i]);
 752                        if (child_nid == 0) {
 753                                child_nofs += NIDS_PER_BLOCK + 1;
 754                                continue;
 755                        }
 756                        rdn.nid = child_nid;
 757                        ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
 758                        if (ret == (NIDS_PER_BLOCK + 1)) {
 759                                if (set_nid(page, i, 0, false))
 760                                        dn->node_changed = true;
 761                                child_nofs += ret;
 762                        } else if (ret < 0 && ret != -ENOENT) {
 763                                goto out_err;
 764                        }
 765                }
 766                freed = child_nofs;
 767        }
 768
 769        if (!ofs) {
 770                /* remove current indirect node */
 771                dn->node_page = page;
 772                truncate_node(dn);
 773                freed++;
 774        } else {
 775                f2fs_put_page(page, 1);
 776        }
 777        trace_f2fs_truncate_nodes_exit(dn->inode, freed);
 778        return freed;
 779
 780out_err:
 781        f2fs_put_page(page, 1);
 782        trace_f2fs_truncate_nodes_exit(dn->inode, ret);
 783        return ret;
 784}
 785
 786static int truncate_partial_nodes(struct dnode_of_data *dn,
 787                        struct f2fs_inode *ri, int *offset, int depth)
 788{
 789        struct page *pages[2];
 790        nid_t nid[3];
 791        nid_t child_nid;
 792        int err = 0;
 793        int i;
 794        int idx = depth - 2;
 795
 796        nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
 797        if (!nid[0])
 798                return 0;
 799
 800        /* get indirect nodes in the path */
 801        for (i = 0; i < idx + 1; i++) {
 802                /* reference count'll be increased */
 803                pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
 804                if (IS_ERR(pages[i])) {
 805                        err = PTR_ERR(pages[i]);
 806                        idx = i - 1;
 807                        goto fail;
 808                }
 809                nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
 810        }
 811
 812        ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
 813
 814        /* free direct nodes linked to a partial indirect node */
 815        for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
 816                child_nid = get_nid(pages[idx], i, false);
 817                if (!child_nid)
 818                        continue;
 819                dn->nid = child_nid;
 820                err = truncate_dnode(dn);
 821                if (err < 0)
 822                        goto fail;
 823                if (set_nid(pages[idx], i, 0, false))
 824                        dn->node_changed = true;
 825        }
 826
 827        if (offset[idx + 1] == 0) {
 828                dn->node_page = pages[idx];
 829                dn->nid = nid[idx];
 830                truncate_node(dn);
 831        } else {
 832                f2fs_put_page(pages[idx], 1);
 833        }
 834        offset[idx]++;
 835        offset[idx + 1] = 0;
 836        idx--;
 837fail:
 838        for (i = idx; i >= 0; i--)
 839                f2fs_put_page(pages[i], 1);
 840
 841        trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
 842
 843        return err;
 844}
 845
 846/*
 847 * All the block addresses of data and nodes should be nullified.
 848 */
 849int truncate_inode_blocks(struct inode *inode, pgoff_t from)
 850{
 851        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 852        int err = 0, cont = 1;
 853        int level, offset[4], noffset[4];
 854        unsigned int nofs = 0;
 855        struct f2fs_inode *ri;
 856        struct dnode_of_data dn;
 857        struct page *page;
 858
 859        trace_f2fs_truncate_inode_blocks_enter(inode, from);
 860
 861        level = get_node_path(inode, from, offset, noffset);
 862
 863        page = get_node_page(sbi, inode->i_ino);
 864        if (IS_ERR(page)) {
 865                trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
 866                return PTR_ERR(page);
 867        }
 868
 869        set_new_dnode(&dn, inode, page, NULL, 0);
 870        unlock_page(page);
 871
 872        ri = F2FS_INODE(page);
 873        switch (level) {
 874        case 0:
 875        case 1:
 876                nofs = noffset[1];
 877                break;
 878        case 2:
 879                nofs = noffset[1];
 880                if (!offset[level - 1])
 881                        goto skip_partial;
 882                err = truncate_partial_nodes(&dn, ri, offset, level);
 883                if (err < 0 && err != -ENOENT)
 884                        goto fail;
 885                nofs += 1 + NIDS_PER_BLOCK;
 886                break;
 887        case 3:
 888                nofs = 5 + 2 * NIDS_PER_BLOCK;
 889                if (!offset[level - 1])
 890                        goto skip_partial;
 891                err = truncate_partial_nodes(&dn, ri, offset, level);
 892                if (err < 0 && err != -ENOENT)
 893                        goto fail;
 894                break;
 895        default:
 896                BUG();
 897        }
 898
 899skip_partial:
 900        while (cont) {
 901                dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
 902                switch (offset[0]) {
 903                case NODE_DIR1_BLOCK:
 904                case NODE_DIR2_BLOCK:
 905                        err = truncate_dnode(&dn);
 906                        break;
 907
 908                case NODE_IND1_BLOCK:
 909                case NODE_IND2_BLOCK:
 910                        err = truncate_nodes(&dn, nofs, offset[1], 2);
 911                        break;
 912
 913                case NODE_DIND_BLOCK:
 914                        err = truncate_nodes(&dn, nofs, offset[1], 3);
 915                        cont = 0;
 916                        break;
 917
 918                default:
 919                        BUG();
 920                }
 921                if (err < 0 && err != -ENOENT)
 922                        goto fail;
 923                if (offset[1] == 0 &&
 924                                ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
 925                        lock_page(page);
 926                        BUG_ON(page->mapping != NODE_MAPPING(sbi));
 927                        f2fs_wait_on_page_writeback(page, NODE, true);
 928                        ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
 929                        set_page_dirty(page);
 930                        unlock_page(page);
 931                }
 932                offset[1] = 0;
 933                offset[0]++;
 934                nofs += err;
 935        }
 936fail:
 937        f2fs_put_page(page, 0);
 938        trace_f2fs_truncate_inode_blocks_exit(inode, err);
 939        return err > 0 ? 0 : err;
 940}
 941
 942int truncate_xattr_node(struct inode *inode, struct page *page)
 943{
 944        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 945        nid_t nid = F2FS_I(inode)->i_xattr_nid;
 946        struct dnode_of_data dn;
 947        struct page *npage;
 948
 949        if (!nid)
 950                return 0;
 951
 952        npage = get_node_page(sbi, nid);
 953        if (IS_ERR(npage))
 954                return PTR_ERR(npage);
 955
 956        F2FS_I(inode)->i_xattr_nid = 0;
 957
 958        /* need to do checkpoint during fsync */
 959        F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
 960
 961        set_new_dnode(&dn, inode, page, npage, nid);
 962
 963        if (page)
 964                dn.inode_page_locked = true;
 965        truncate_node(&dn);
 966        return 0;
 967}
 968
 969/*
 970 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
 971 * f2fs_unlock_op().
 972 */
 973int remove_inode_page(struct inode *inode)
 974{
 975        struct dnode_of_data dn;
 976        int err;
 977
 978        set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
 979        err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
 980        if (err)
 981                return err;
 982
 983        err = truncate_xattr_node(inode, dn.inode_page);
 984        if (err) {
 985                f2fs_put_dnode(&dn);
 986                return err;
 987        }
 988
 989        /* remove potential inline_data blocks */
 990        if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 991                                S_ISLNK(inode->i_mode))
 992                truncate_data_blocks_range(&dn, 1);
 993
 994        /* 0 is possible, after f2fs_new_inode() has failed */
 995        f2fs_bug_on(F2FS_I_SB(inode),
 996                        inode->i_blocks != 0 && inode->i_blocks != 1);
 997
 998        /* will put inode & node pages */
 999        truncate_node(&dn);
1000        return 0;
1001}
1002
1003struct page *new_inode_page(struct inode *inode)
1004{
1005        struct dnode_of_data dn;
1006
1007        /* allocate inode page for new inode */
1008        set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1009
1010        /* caller should f2fs_put_page(page, 1); */
1011        return new_node_page(&dn, 0, NULL);
1012}
1013
1014struct page *new_node_page(struct dnode_of_data *dn,
1015                                unsigned int ofs, struct page *ipage)
1016{
1017        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1018        struct node_info old_ni, new_ni;
1019        struct page *page;
1020        int err;
1021
1022        if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
1023                return ERR_PTR(-EPERM);
1024
1025        page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1026        if (!page)
1027                return ERR_PTR(-ENOMEM);
1028
1029        if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
1030                err = -ENOSPC;
1031                goto fail;
1032        }
1033
1034        get_node_info(sbi, dn->nid, &old_ni);
1035
1036        /* Reinitialize old_ni with new node page */
1037        f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR);
1038        new_ni = old_ni;
1039        new_ni.ino = dn->inode->i_ino;
1040        set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1041
1042        f2fs_wait_on_page_writeback(page, NODE, true);
1043        fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1044        set_cold_node(dn->inode, page);
1045        SetPageUptodate(page);
1046        if (set_page_dirty(page))
1047                dn->node_changed = true;
1048
1049        if (f2fs_has_xattr_block(ofs))
1050                F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
1051
1052        dn->node_page = page;
1053        if (ipage)
1054                update_inode(dn->inode, ipage);
1055        else
1056                sync_inode_page(dn);
1057        if (ofs == 0)
1058                inc_valid_inode_count(sbi);
1059
1060        return page;
1061
1062fail:
1063        clear_node_page_dirty(page);
1064        f2fs_put_page(page, 1);
1065        return ERR_PTR(err);
1066}
1067
1068/*
1069 * Caller should do after getting the following values.
1070 * 0: f2fs_put_page(page, 0)
1071 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1072 */
1073static int read_node_page(struct page *page, int rw)
1074{
1075        struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1076        struct node_info ni;
1077        struct f2fs_io_info fio = {
1078                .sbi = sbi,
1079                .type = NODE,
1080                .rw = rw,
1081                .page = page,
1082                .encrypted_page = NULL,
1083        };
1084
1085        get_node_info(sbi, page->index, &ni);
1086
1087        if (unlikely(ni.blk_addr == NULL_ADDR)) {
1088                ClearPageUptodate(page);
1089                return -ENOENT;
1090        }
1091
1092        if (PageUptodate(page))
1093                return LOCKED_PAGE;
1094
1095        fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1096        return f2fs_submit_page_bio(&fio);
1097}
1098
1099/*
1100 * Readahead a node page
1101 */
1102void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1103{
1104        struct page *apage;
1105        int err;
1106
1107        if (!nid)
1108                return;
1109        f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1110
1111        rcu_read_lock();
1112        apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
1113        rcu_read_unlock();
1114        if (apage)
1115                return;
1116
1117        apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1118        if (!apage)
1119                return;
1120
1121        err = read_node_page(apage, READA);
1122        f2fs_put_page(apage, err ? 1 : 0);
1123}
1124
1125static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1126                                        struct page *parent, int start)
1127{
1128        struct page *page;
1129        int err;
1130
1131        if (!nid)
1132                return ERR_PTR(-ENOENT);
1133        f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1134repeat:
1135        page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1136        if (!page)
1137                return ERR_PTR(-ENOMEM);
1138
1139        err = read_node_page(page, READ_SYNC);
1140        if (err < 0) {
1141                f2fs_put_page(page, 1);
1142                return ERR_PTR(err);
1143        } else if (err == LOCKED_PAGE) {
1144                goto page_hit;
1145        }
1146
1147        if (parent)
1148                ra_node_pages(parent, start + 1, MAX_RA_NODE);
1149
1150        lock_page(page);
1151
1152        if (unlikely(!PageUptodate(page))) {
1153                f2fs_put_page(page, 1);
1154                return ERR_PTR(-EIO);
1155        }
1156        if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1157                f2fs_put_page(page, 1);
1158                goto repeat;
1159        }
1160page_hit:
1161        f2fs_bug_on(sbi, nid != nid_of_node(page));
1162        return page;
1163}
1164
1165struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1166{
1167        return __get_node_page(sbi, nid, NULL, 0);
1168}
1169
1170struct page *get_node_page_ra(struct page *parent, int start)
1171{
1172        struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1173        nid_t nid = get_nid(parent, start, false);
1174
1175        return __get_node_page(sbi, nid, parent, start);
1176}
1177
1178void sync_inode_page(struct dnode_of_data *dn)
1179{
1180        int ret = 0;
1181
1182        if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1183                ret = update_inode(dn->inode, dn->node_page);
1184        } else if (dn->inode_page) {
1185                if (!dn->inode_page_locked)
1186                        lock_page(dn->inode_page);
1187                ret = update_inode(dn->inode, dn->inode_page);
1188                if (!dn->inode_page_locked)
1189                        unlock_page(dn->inode_page);
1190        } else {
1191                ret = update_inode_page(dn->inode);
1192        }
1193        dn->node_changed = ret ? true: false;
1194}
1195
1196static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1197{
1198        struct inode *inode;
1199        struct page *page;
1200        int ret;
1201
1202        /* should flush inline_data before evict_inode */
1203        inode = ilookup(sbi->sb, ino);
1204        if (!inode)
1205                return;
1206
1207        page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
1208        if (!page)
1209                goto iput_out;
1210
1211        if (!PageUptodate(page))
1212                goto page_out;
1213
1214        if (!PageDirty(page))
1215                goto page_out;
1216
1217        if (!clear_page_dirty_for_io(page))
1218                goto page_out;
1219
1220        ret = f2fs_write_inline_data(inode, page);
1221        inode_dec_dirty_pages(inode);
1222        if (ret)
1223                set_page_dirty(page);
1224page_out:
1225        f2fs_put_page(page, 1);
1226iput_out:
1227        iput(inode);
1228}
1229
1230void move_node_page(struct page *node_page, int gc_type)
1231{
1232        if (gc_type == FG_GC) {
1233                struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
1234                struct writeback_control wbc = {
1235                        .sync_mode = WB_SYNC_ALL,
1236                        .nr_to_write = 1,
1237                        .for_reclaim = 0,
1238                };
1239
1240                set_page_dirty(node_page);
1241                f2fs_wait_on_page_writeback(node_page, NODE, true);
1242
1243                f2fs_bug_on(sbi, PageWriteback(node_page));
1244                if (!clear_page_dirty_for_io(node_page))
1245                        goto out_page;
1246
1247                if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
1248                        unlock_page(node_page);
1249                goto release_page;
1250        } else {
1251                /* set page dirty and write it */
1252                if (!PageWriteback(node_page))
1253                        set_page_dirty(node_page);
1254        }
1255out_page:
1256        unlock_page(node_page);
1257release_page:
1258        f2fs_put_page(node_page, 0);
1259}
1260
1261static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1262{
1263        pgoff_t index, end;
1264        struct pagevec pvec;
1265        struct page *last_page = NULL;
1266
1267        pagevec_init(&pvec, 0);
1268        index = 0;
1269        end = ULONG_MAX;
1270
1271        while (index <= end) {
1272                int i, nr_pages;
1273                nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1274                                PAGECACHE_TAG_DIRTY,
1275                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1276                if (nr_pages == 0)
1277                        break;
1278
1279                for (i = 0; i < nr_pages; i++) {
1280                        struct page *page = pvec.pages[i];
1281
1282                        if (unlikely(f2fs_cp_error(sbi))) {
1283                                f2fs_put_page(last_page, 0);
1284                                pagevec_release(&pvec);
1285                                return ERR_PTR(-EIO);
1286                        }
1287
1288                        if (!IS_DNODE(page) || !is_cold_node(page))
1289                                continue;
1290                        if (ino_of_node(page) != ino)
1291                                continue;
1292
1293                        lock_page(page);
1294
1295                        if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1296continue_unlock:
1297                                unlock_page(page);
1298                                continue;
1299                        }
1300                        if (ino_of_node(page) != ino)
1301                                goto continue_unlock;
1302
1303                        if (!PageDirty(page)) {
1304                                /* someone wrote it for us */
1305                                goto continue_unlock;
1306                        }
1307
1308                        if (last_page)
1309                                f2fs_put_page(last_page, 0);
1310
1311                        get_page(page);
1312                        last_page = page;
1313                        unlock_page(page);
1314                }
1315                pagevec_release(&pvec);
1316                cond_resched();
1317        }
1318        return last_page;
1319}
1320
1321int fsync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1322                        struct writeback_control *wbc, bool atomic)
1323{
1324        pgoff_t index, end;
1325        struct pagevec pvec;
1326        int ret = 0;
1327        struct page *last_page = NULL;
1328        bool marked = false;
1329
1330        if (atomic) {
1331                last_page = last_fsync_dnode(sbi, ino);
1332                if (IS_ERR_OR_NULL(last_page))
1333                        return PTR_ERR_OR_ZERO(last_page);
1334        }
1335retry:
1336        pagevec_init(&pvec, 0);
1337        index = 0;
1338        end = ULONG_MAX;
1339
1340        while (index <= end) {
1341                int i, nr_pages;
1342                nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1343                                PAGECACHE_TAG_DIRTY,
1344                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1345                if (nr_pages == 0)
1346                        break;
1347
1348                for (i = 0; i < nr_pages; i++) {
1349                        struct page *page = pvec.pages[i];
1350
1351                        if (unlikely(f2fs_cp_error(sbi))) {
1352                                f2fs_put_page(last_page, 0);
1353                                pagevec_release(&pvec);
1354                                return -EIO;
1355                        }
1356
1357                        if (!IS_DNODE(page) || !is_cold_node(page))
1358                                continue;
1359                        if (ino_of_node(page) != ino)
1360                                continue;
1361
1362                        lock_page(page);
1363
1364                        if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1365continue_unlock:
1366                                unlock_page(page);
1367                                continue;
1368                        }
1369                        if (ino_of_node(page) != ino)
1370                                goto continue_unlock;
1371
1372                        if (!PageDirty(page) && page != last_page) {
1373                                /* someone wrote it for us */
1374                                goto continue_unlock;
1375                        }
1376
1377                        f2fs_wait_on_page_writeback(page, NODE, true);
1378                        BUG_ON(PageWriteback(page));
1379
1380                        if (!atomic || page == last_page) {
1381                                set_fsync_mark(page, 1);
1382                                if (IS_INODE(page))
1383                                        set_dentry_mark(page,
1384                                                need_dentry_mark(sbi, ino));
1385                                /*  may be written by other thread */
1386                                if (!PageDirty(page))
1387                                        set_page_dirty(page);
1388                        }
1389
1390                        if (!clear_page_dirty_for_io(page))
1391                                goto continue_unlock;
1392
1393                        ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
1394                        if (ret) {
1395                                unlock_page(page);
1396                                f2fs_put_page(last_page, 0);
1397                                break;
1398                        }
1399                        if (page == last_page) {
1400                                f2fs_put_page(page, 0);
1401                                marked = true;
1402                                break;
1403                        }
1404                }
1405                pagevec_release(&pvec);
1406                cond_resched();
1407
1408                if (ret || marked)
1409                        break;
1410        }
1411        if (!ret && atomic && !marked) {
1412                f2fs_msg(sbi->sb, KERN_DEBUG,
1413                        "Retry to write fsync mark: ino=%u, idx=%lx",
1414                                        ino, last_page->index);
1415                lock_page(last_page);
1416                set_page_dirty(last_page);
1417                unlock_page(last_page);
1418                goto retry;
1419        }
1420        return ret ? -EIO: 0;
1421}
1422
1423int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
1424{
1425        pgoff_t index, end;
1426        struct pagevec pvec;
1427        int step = 0;
1428        int nwritten = 0;
1429
1430        pagevec_init(&pvec, 0);
1431
1432next_step:
1433        index = 0;
1434        end = ULONG_MAX;
1435
1436        while (index <= end) {
1437                int i, nr_pages;
1438                nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1439                                PAGECACHE_TAG_DIRTY,
1440                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1441                if (nr_pages == 0)
1442                        break;
1443
1444                for (i = 0; i < nr_pages; i++) {
1445                        struct page *page = pvec.pages[i];
1446
1447                        if (unlikely(f2fs_cp_error(sbi))) {
1448                                pagevec_release(&pvec);
1449                                return -EIO;
1450                        }
1451
1452                        /*
1453                         * flushing sequence with step:
1454                         * 0. indirect nodes
1455                         * 1. dentry dnodes
1456                         * 2. file dnodes
1457                         */
1458                        if (step == 0 && IS_DNODE(page))
1459                                continue;
1460                        if (step == 1 && (!IS_DNODE(page) ||
1461                                                is_cold_node(page)))
1462                                continue;
1463                        if (step == 2 && (!IS_DNODE(page) ||
1464                                                !is_cold_node(page)))
1465                                continue;
1466lock_node:
1467                        if (!trylock_page(page))
1468                                continue;
1469
1470                        if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1471continue_unlock:
1472                                unlock_page(page);
1473                                continue;
1474                        }
1475
1476                        if (!PageDirty(page)) {
1477                                /* someone wrote it for us */
1478                                goto continue_unlock;
1479                        }
1480
1481                        /* flush inline_data */
1482                        if (is_inline_node(page)) {
1483                                clear_inline_node(page);
1484                                unlock_page(page);
1485                                flush_inline_data(sbi, ino_of_node(page));
1486                                goto lock_node;
1487                        }
1488
1489                        f2fs_wait_on_page_writeback(page, NODE, true);
1490
1491                        BUG_ON(PageWriteback(page));
1492                        if (!clear_page_dirty_for_io(page))
1493                                goto continue_unlock;
1494
1495                        set_fsync_mark(page, 0);
1496                        set_dentry_mark(page, 0);
1497
1498                        if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
1499                                unlock_page(page);
1500
1501                        if (--wbc->nr_to_write == 0)
1502                                break;
1503                }
1504                pagevec_release(&pvec);
1505                cond_resched();
1506
1507                if (wbc->nr_to_write == 0) {
1508                        step = 2;
1509                        break;
1510                }
1511        }
1512
1513        if (step < 2) {
1514                step++;
1515                goto next_step;
1516        }
1517        return nwritten;
1518}
1519
1520int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1521{
1522        pgoff_t index = 0, end = ULONG_MAX;
1523        struct pagevec pvec;
1524        int ret2 = 0, ret = 0;
1525
1526        pagevec_init(&pvec, 0);
1527
1528        while (index <= end) {
1529                int i, nr_pages;
1530                nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1531                                PAGECACHE_TAG_WRITEBACK,
1532                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1533                if (nr_pages == 0)
1534                        break;
1535
1536                for (i = 0; i < nr_pages; i++) {
1537                        struct page *page = pvec.pages[i];
1538
1539                        /* until radix tree lookup accepts end_index */
1540                        if (unlikely(page->index > end))
1541                                continue;
1542
1543                        if (ino && ino_of_node(page) == ino) {
1544                                f2fs_wait_on_page_writeback(page, NODE, true);
1545                                if (TestClearPageError(page))
1546                                        ret = -EIO;
1547                        }
1548                }
1549                pagevec_release(&pvec);
1550                cond_resched();
1551        }
1552
1553        if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
1554                ret2 = -ENOSPC;
1555        if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
1556                ret2 = -EIO;
1557        if (!ret)
1558                ret = ret2;
1559        return ret;
1560}
1561
1562static int f2fs_write_node_page(struct page *page,
1563                                struct writeback_control *wbc)
1564{
1565        struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1566        nid_t nid;
1567        struct node_info ni;
1568        struct f2fs_io_info fio = {
1569                .sbi = sbi,
1570                .type = NODE,
1571                .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1572                .page = page,
1573                .encrypted_page = NULL,
1574        };
1575
1576        trace_f2fs_writepage(page, NODE);
1577
1578        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1579                goto redirty_out;
1580        if (unlikely(f2fs_cp_error(sbi)))
1581                goto redirty_out;
1582
1583        /* get old block addr of this node page */
1584        nid = nid_of_node(page);
1585        f2fs_bug_on(sbi, page->index != nid);
1586
1587        if (wbc->for_reclaim) {
1588                if (!down_read_trylock(&sbi->node_write))
1589                        goto redirty_out;
1590        } else {
1591                down_read(&sbi->node_write);
1592        }
1593
1594        get_node_info(sbi, nid, &ni);
1595
1596        /* This page is already truncated */
1597        if (unlikely(ni.blk_addr == NULL_ADDR)) {
1598                ClearPageUptodate(page);
1599                dec_page_count(sbi, F2FS_DIRTY_NODES);
1600                up_read(&sbi->node_write);
1601                unlock_page(page);
1602                return 0;
1603        }
1604
1605        set_page_writeback(page);
1606        fio.old_blkaddr = ni.blk_addr;
1607        write_node_page(nid, &fio);
1608        set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1609        dec_page_count(sbi, F2FS_DIRTY_NODES);
1610        up_read(&sbi->node_write);
1611
1612        if (wbc->for_reclaim)
1613                f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE);
1614
1615        unlock_page(page);
1616
1617        if (unlikely(f2fs_cp_error(sbi)))
1618                f2fs_submit_merged_bio(sbi, NODE, WRITE);
1619
1620        return 0;
1621
1622redirty_out:
1623        redirty_page_for_writepage(wbc, page);
1624        return AOP_WRITEPAGE_ACTIVATE;
1625}
1626
1627static int f2fs_write_node_pages(struct address_space *mapping,
1628                            struct writeback_control *wbc)
1629{
1630        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1631        long diff;
1632
1633        /* balancing f2fs's metadata in background */
1634        f2fs_balance_fs_bg(sbi);
1635
1636        /* collect a number of dirty node pages and write together */
1637        if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1638                goto skip_write;
1639
1640        trace_f2fs_writepages(mapping->host, wbc, NODE);
1641
1642        diff = nr_pages_to_write(sbi, NODE, wbc);
1643        wbc->sync_mode = WB_SYNC_NONE;
1644        sync_node_pages(sbi, wbc);
1645        wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1646        return 0;
1647
1648skip_write:
1649        wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1650        trace_f2fs_writepages(mapping->host, wbc, NODE);
1651        return 0;
1652}
1653
1654static int f2fs_set_node_page_dirty(struct page *page)
1655{
1656        trace_f2fs_set_page_dirty(page, NODE);
1657
1658        SetPageUptodate(page);
1659        if (!PageDirty(page)) {
1660                __set_page_dirty_nobuffers(page);
1661                inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1662                SetPagePrivate(page);
1663                f2fs_trace_pid(page);
1664                return 1;
1665        }
1666        return 0;
1667}
1668
1669/*
1670 * Structure of the f2fs node operations
1671 */
1672const struct address_space_operations f2fs_node_aops = {
1673        .writepage      = f2fs_write_node_page,
1674        .writepages     = f2fs_write_node_pages,
1675        .set_page_dirty = f2fs_set_node_page_dirty,
1676        .invalidatepage = f2fs_invalidate_page,
1677        .releasepage    = f2fs_release_page,
1678};
1679
1680static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1681                                                nid_t n)
1682{
1683        return radix_tree_lookup(&nm_i->free_nid_root, n);
1684}
1685
1686static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1687                                                struct free_nid *i)
1688{
1689        list_del(&i->list);
1690        radix_tree_delete(&nm_i->free_nid_root, i->nid);
1691}
1692
1693static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1694{
1695        struct f2fs_nm_info *nm_i = NM_I(sbi);
1696        struct free_nid *i;
1697        struct nat_entry *ne;
1698
1699        if (!available_free_memory(sbi, FREE_NIDS))
1700                return -1;
1701
1702        /* 0 nid should not be used */
1703        if (unlikely(nid == 0))
1704                return 0;
1705
1706        if (build) {
1707                /* do not add allocated nids */
1708                ne = __lookup_nat_cache(nm_i, nid);
1709                if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1710                                nat_get_blkaddr(ne) != NULL_ADDR))
1711                        return 0;
1712        }
1713
1714        i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1715        i->nid = nid;
1716        i->state = NID_NEW;
1717
1718        if (radix_tree_preload(GFP_NOFS)) {
1719                kmem_cache_free(free_nid_slab, i);
1720                return 0;
1721        }
1722
1723        spin_lock(&nm_i->free_nid_list_lock);
1724        if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
1725                spin_unlock(&nm_i->free_nid_list_lock);
1726                radix_tree_preload_end();
1727                kmem_cache_free(free_nid_slab, i);
1728                return 0;
1729        }
1730        list_add_tail(&i->list, &nm_i->free_nid_list);
1731        nm_i->fcnt++;
1732        spin_unlock(&nm_i->free_nid_list_lock);
1733        radix_tree_preload_end();
1734        return 1;
1735}
1736
1737static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1738{
1739        struct free_nid *i;
1740        bool need_free = false;
1741
1742        spin_lock(&nm_i->free_nid_list_lock);
1743        i = __lookup_free_nid_list(nm_i, nid);
1744        if (i && i->state == NID_NEW) {
1745                __del_from_free_nid_list(nm_i, i);
1746                nm_i->fcnt--;
1747                need_free = true;
1748        }
1749        spin_unlock(&nm_i->free_nid_list_lock);
1750
1751        if (need_free)
1752                kmem_cache_free(free_nid_slab, i);
1753}
1754
1755static void scan_nat_page(struct f2fs_sb_info *sbi,
1756                        struct page *nat_page, nid_t start_nid)
1757{
1758        struct f2fs_nm_info *nm_i = NM_I(sbi);
1759        struct f2fs_nat_block *nat_blk = page_address(nat_page);
1760        block_t blk_addr;
1761        int i;
1762
1763        i = start_nid % NAT_ENTRY_PER_BLOCK;
1764
1765        for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1766
1767                if (unlikely(start_nid >= nm_i->max_nid))
1768                        break;
1769
1770                blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1771                f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
1772                if (blk_addr == NULL_ADDR) {
1773                        if (add_free_nid(sbi, start_nid, true) < 0)
1774                                break;
1775                }
1776        }
1777}
1778
1779static void build_free_nids(struct f2fs_sb_info *sbi)
1780{
1781        struct f2fs_nm_info *nm_i = NM_I(sbi);
1782        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1783        struct f2fs_journal *journal = curseg->journal;
1784        int i = 0;
1785        nid_t nid = nm_i->next_scan_nid;
1786
1787        /* Enough entries */
1788        if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1789                return;
1790
1791        /* readahead nat pages to be scanned */
1792        ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
1793                                                        META_NAT, true);
1794
1795        down_read(&nm_i->nat_tree_lock);
1796
1797        while (1) {
1798                struct page *page = get_current_nat_page(sbi, nid);
1799
1800                scan_nat_page(sbi, page, nid);
1801                f2fs_put_page(page, 1);
1802
1803                nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1804                if (unlikely(nid >= nm_i->max_nid))
1805                        nid = 0;
1806
1807                if (++i >= FREE_NID_PAGES)
1808                        break;
1809        }
1810
1811        /* go to the next free nat pages to find free nids abundantly */
1812        nm_i->next_scan_nid = nid;
1813
1814        /* find free nids from current sum_pages */
1815        down_read(&curseg->journal_rwsem);
1816        for (i = 0; i < nats_in_cursum(journal); i++) {
1817                block_t addr;
1818
1819                addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
1820                nid = le32_to_cpu(nid_in_journal(journal, i));
1821                if (addr == NULL_ADDR)
1822                        add_free_nid(sbi, nid, true);
1823                else
1824                        remove_free_nid(nm_i, nid);
1825        }
1826        up_read(&curseg->journal_rwsem);
1827        up_read(&nm_i->nat_tree_lock);
1828
1829        ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
1830                                        nm_i->ra_nid_pages, META_NAT, false);
1831}
1832
1833/*
1834 * If this function returns success, caller can obtain a new nid
1835 * from second parameter of this function.
1836 * The returned nid could be used ino as well as nid when inode is created.
1837 */
1838bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1839{
1840        struct f2fs_nm_info *nm_i = NM_I(sbi);
1841        struct free_nid *i = NULL;
1842retry:
1843#ifdef CONFIG_F2FS_FAULT_INJECTION
1844        if (time_to_inject(FAULT_ALLOC_NID))
1845                return false;
1846#endif
1847        if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
1848                return false;
1849
1850        spin_lock(&nm_i->free_nid_list_lock);
1851
1852        /* We should not use stale free nids created by build_free_nids */
1853        if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
1854                f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
1855                list_for_each_entry(i, &nm_i->free_nid_list, list)
1856                        if (i->state == NID_NEW)
1857                                break;
1858
1859                f2fs_bug_on(sbi, i->state != NID_NEW);
1860                *nid = i->nid;
1861                i->state = NID_ALLOC;
1862                nm_i->fcnt--;
1863                spin_unlock(&nm_i->free_nid_list_lock);
1864                return true;
1865        }
1866        spin_unlock(&nm_i->free_nid_list_lock);
1867
1868        /* Let's scan nat pages and its caches to get free nids */
1869        mutex_lock(&nm_i->build_lock);
1870        build_free_nids(sbi);
1871        mutex_unlock(&nm_i->build_lock);
1872        goto retry;
1873}
1874
1875/*
1876 * alloc_nid() should be called prior to this function.
1877 */
1878void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1879{
1880        struct f2fs_nm_info *nm_i = NM_I(sbi);
1881        struct free_nid *i;
1882
1883        spin_lock(&nm_i->free_nid_list_lock);
1884        i = __lookup_free_nid_list(nm_i, nid);
1885        f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
1886        __del_from_free_nid_list(nm_i, i);
1887        spin_unlock(&nm_i->free_nid_list_lock);
1888
1889        kmem_cache_free(free_nid_slab, i);
1890}
1891
1892/*
1893 * alloc_nid() should be called prior to this function.
1894 */
1895void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1896{
1897        struct f2fs_nm_info *nm_i = NM_I(sbi);
1898        struct free_nid *i;
1899        bool need_free = false;
1900
1901        if (!nid)
1902                return;
1903
1904        spin_lock(&nm_i->free_nid_list_lock);
1905        i = __lookup_free_nid_list(nm_i, nid);
1906        f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
1907        if (!available_free_memory(sbi, FREE_NIDS)) {
1908                __del_from_free_nid_list(nm_i, i);
1909                need_free = true;
1910        } else {
1911                i->state = NID_NEW;
1912                nm_i->fcnt++;
1913        }
1914        spin_unlock(&nm_i->free_nid_list_lock);
1915
1916        if (need_free)
1917                kmem_cache_free(free_nid_slab, i);
1918}
1919
1920int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
1921{
1922        struct f2fs_nm_info *nm_i = NM_I(sbi);
1923        struct free_nid *i, *next;
1924        int nr = nr_shrink;
1925
1926        if (!mutex_trylock(&nm_i->build_lock))
1927                return 0;
1928
1929        spin_lock(&nm_i->free_nid_list_lock);
1930        list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
1931                if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
1932                        break;
1933                if (i->state == NID_ALLOC)
1934                        continue;
1935                __del_from_free_nid_list(nm_i, i);
1936                kmem_cache_free(free_nid_slab, i);
1937                nm_i->fcnt--;
1938                nr_shrink--;
1939        }
1940        spin_unlock(&nm_i->free_nid_list_lock);
1941        mutex_unlock(&nm_i->build_lock);
1942
1943        return nr - nr_shrink;
1944}
1945
1946void recover_inline_xattr(struct inode *inode, struct page *page)
1947{
1948        void *src_addr, *dst_addr;
1949        size_t inline_size;
1950        struct page *ipage;
1951        struct f2fs_inode *ri;
1952
1953        ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
1954        f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
1955
1956        ri = F2FS_INODE(page);
1957        if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
1958                clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
1959                goto update_inode;
1960        }
1961
1962        dst_addr = inline_xattr_addr(ipage);
1963        src_addr = inline_xattr_addr(page);
1964        inline_size = inline_xattr_size(inode);
1965
1966        f2fs_wait_on_page_writeback(ipage, NODE, true);
1967        memcpy(dst_addr, src_addr, inline_size);
1968update_inode:
1969        update_inode(inode, ipage);
1970        f2fs_put_page(ipage, 1);
1971}
1972
1973void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
1974{
1975        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1976        nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1977        nid_t new_xnid = nid_of_node(page);
1978        struct node_info ni;
1979
1980        /* 1: invalidate the previous xattr nid */
1981        if (!prev_xnid)
1982                goto recover_xnid;
1983
1984        /* Deallocate node address */
1985        get_node_info(sbi, prev_xnid, &ni);
1986        f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
1987        invalidate_blocks(sbi, ni.blk_addr);
1988        dec_valid_node_count(sbi, inode);
1989        set_node_addr(sbi, &ni, NULL_ADDR, false);
1990
1991recover_xnid:
1992        /* 2: allocate new xattr nid */
1993        if (unlikely(!inc_valid_node_count(sbi, inode)))
1994                f2fs_bug_on(sbi, 1);
1995
1996        remove_free_nid(NM_I(sbi), new_xnid);
1997        get_node_info(sbi, new_xnid, &ni);
1998        ni.ino = inode->i_ino;
1999        set_node_addr(sbi, &ni, NEW_ADDR, false);
2000        F2FS_I(inode)->i_xattr_nid = new_xnid;
2001
2002        /* 3: update xattr blkaddr */
2003        refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
2004        set_node_addr(sbi, &ni, blkaddr, false);
2005
2006        update_inode_page(inode);
2007}
2008
2009int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2010{
2011        struct f2fs_inode *src, *dst;
2012        nid_t ino = ino_of_node(page);
2013        struct node_info old_ni, new_ni;
2014        struct page *ipage;
2015
2016        get_node_info(sbi, ino, &old_ni);
2017
2018        if (unlikely(old_ni.blk_addr != NULL_ADDR))
2019                return -EINVAL;
2020
2021        ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2022        if (!ipage)
2023                return -ENOMEM;
2024
2025        /* Should not use this inode from free nid list */
2026        remove_free_nid(NM_I(sbi), ino);
2027
2028        SetPageUptodate(ipage);
2029        fill_node_footer(ipage, ino, ino, 0, true);
2030
2031        src = F2FS_INODE(page);
2032        dst = F2FS_INODE(ipage);
2033
2034        memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2035        dst->i_size = 0;
2036        dst->i_blocks = cpu_to_le64(1);
2037        dst->i_links = cpu_to_le32(1);
2038        dst->i_xattr_nid = 0;
2039        dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
2040
2041        new_ni = old_ni;
2042        new_ni.ino = ino;
2043
2044        if (unlikely(!inc_valid_node_count(sbi, NULL)))
2045                WARN_ON(1);
2046        set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2047        inc_valid_inode_count(sbi);
2048        set_page_dirty(ipage);
2049        f2fs_put_page(ipage, 1);
2050        return 0;
2051}
2052
2053int restore_node_summary(struct f2fs_sb_info *sbi,
2054                        unsigned int segno, struct f2fs_summary_block *sum)
2055{
2056        struct f2fs_node *rn;
2057        struct f2fs_summary *sum_entry;
2058        block_t addr;
2059        int bio_blocks = MAX_BIO_BLOCKS(sbi);
2060        int i, idx, last_offset, nrpages;
2061
2062        /* scan the node segment */
2063        last_offset = sbi->blocks_per_seg;
2064        addr = START_BLOCK(sbi, segno);
2065        sum_entry = &sum->entries[0];
2066
2067        for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2068                nrpages = min(last_offset - i, bio_blocks);
2069
2070                /* readahead node pages */
2071                ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2072
2073                for (idx = addr; idx < addr + nrpages; idx++) {
2074                        struct page *page = get_tmp_page(sbi, idx);
2075
2076                        rn = F2FS_NODE(page);
2077                        sum_entry->nid = rn->footer.nid;
2078                        sum_entry->version = 0;
2079                        sum_entry->ofs_in_node = 0;
2080                        sum_entry++;
2081                        f2fs_put_page(page, 1);
2082                }
2083
2084                invalidate_mapping_pages(META_MAPPING(sbi), addr,
2085                                                        addr + nrpages);
2086        }
2087        return 0;
2088}
2089
2090static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2091{
2092        struct f2fs_nm_info *nm_i = NM_I(sbi);
2093        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2094        struct f2fs_journal *journal = curseg->journal;
2095        int i;
2096
2097        down_write(&curseg->journal_rwsem);
2098        for (i = 0; i < nats_in_cursum(journal); i++) {
2099                struct nat_entry *ne;
2100                struct f2fs_nat_entry raw_ne;
2101                nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2102
2103                raw_ne = nat_in_journal(journal, i);
2104
2105                ne = __lookup_nat_cache(nm_i, nid);
2106                if (!ne) {
2107                        ne = grab_nat_entry(nm_i, nid);
2108                        node_info_from_raw_nat(&ne->ni, &raw_ne);
2109                }
2110                __set_nat_cache_dirty(nm_i, ne);
2111        }
2112        update_nats_in_cursum(journal, -i);
2113        up_write(&curseg->journal_rwsem);
2114}
2115
2116static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2117                                                struct list_head *head, int max)
2118{
2119        struct nat_entry_set *cur;
2120
2121        if (nes->entry_cnt >= max)
2122                goto add_out;
2123
2124        list_for_each_entry(cur, head, set_list) {
2125                if (cur->entry_cnt >= nes->entry_cnt) {
2126                        list_add(&nes->set_list, cur->set_list.prev);
2127                        return;
2128                }
2129        }
2130add_out:
2131        list_add_tail(&nes->set_list, head);
2132}
2133
2134static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2135                                        struct nat_entry_set *set)
2136{
2137        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2138        struct f2fs_journal *journal = curseg->journal;
2139        nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2140        bool to_journal = true;
2141        struct f2fs_nat_block *nat_blk;
2142        struct nat_entry *ne, *cur;
2143        struct page *page = NULL;
2144
2145        /*
2146         * there are two steps to flush nat entries:
2147         * #1, flush nat entries to journal in current hot data summary block.
2148         * #2, flush nat entries to nat page.
2149         */
2150        if (!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2151                to_journal = false;
2152
2153        if (to_journal) {
2154                down_write(&curseg->journal_rwsem);
2155        } else {
2156                page = get_next_nat_page(sbi, start_nid);
2157                nat_blk = page_address(page);
2158                f2fs_bug_on(sbi, !nat_blk);
2159        }
2160
2161        /* flush dirty nats in nat entry set */
2162        list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2163                struct f2fs_nat_entry *raw_ne;
2164                nid_t nid = nat_get_nid(ne);
2165                int offset;
2166
2167                if (nat_get_blkaddr(ne) == NEW_ADDR)
2168                        continue;
2169
2170                if (to_journal) {
2171                        offset = lookup_journal_in_cursum(journal,
2172                                                        NAT_JOURNAL, nid, 1);
2173                        f2fs_bug_on(sbi, offset < 0);
2174                        raw_ne = &nat_in_journal(journal, offset);
2175                        nid_in_journal(journal, offset) = cpu_to_le32(nid);
2176                } else {
2177                        raw_ne = &nat_blk->entries[nid - start_nid];
2178                }
2179                raw_nat_from_node_info(raw_ne, &ne->ni);
2180                nat_reset_flag(ne);
2181                __clear_nat_cache_dirty(NM_I(sbi), ne);
2182                if (nat_get_blkaddr(ne) == NULL_ADDR)
2183                        add_free_nid(sbi, nid, false);
2184        }
2185
2186        if (to_journal)
2187                up_write(&curseg->journal_rwsem);
2188        else
2189                f2fs_put_page(page, 1);
2190
2191        f2fs_bug_on(sbi, set->entry_cnt);
2192
2193        radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2194        kmem_cache_free(nat_entry_set_slab, set);
2195}
2196
2197/*
2198 * This function is called during the checkpointing process.
2199 */
2200void flush_nat_entries(struct f2fs_sb_info *sbi)
2201{
2202        struct f2fs_nm_info *nm_i = NM_I(sbi);
2203        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2204        struct f2fs_journal *journal = curseg->journal;
2205        struct nat_entry_set *setvec[SETVEC_SIZE];
2206        struct nat_entry_set *set, *tmp;
2207        unsigned int found;
2208        nid_t set_idx = 0;
2209        LIST_HEAD(sets);
2210
2211        if (!nm_i->dirty_nat_cnt)
2212                return;
2213
2214        down_write(&nm_i->nat_tree_lock);
2215
2216        /*
2217         * if there are no enough space in journal to store dirty nat
2218         * entries, remove all entries from journal and merge them
2219         * into nat entry set.
2220         */
2221        if (!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2222                remove_nats_in_journal(sbi);
2223
2224        while ((found = __gang_lookup_nat_set(nm_i,
2225                                        set_idx, SETVEC_SIZE, setvec))) {
2226                unsigned idx;
2227                set_idx = setvec[found - 1]->set + 1;
2228                for (idx = 0; idx < found; idx++)
2229                        __adjust_nat_entry_set(setvec[idx], &sets,
2230                                                MAX_NAT_JENTRIES(journal));
2231        }
2232
2233        /* flush dirty nats in nat entry set */
2234        list_for_each_entry_safe(set, tmp, &sets, set_list)
2235                __flush_nat_entry_set(sbi, set);
2236
2237        up_write(&nm_i->nat_tree_lock);
2238
2239        f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
2240}
2241
2242static int init_node_manager(struct f2fs_sb_info *sbi)
2243{
2244        struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2245        struct f2fs_nm_info *nm_i = NM_I(sbi);
2246        unsigned char *version_bitmap;
2247        unsigned int nat_segs, nat_blocks;
2248
2249        nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2250
2251        /* segment_count_nat includes pair segment so divide to 2. */
2252        nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
2253        nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2254
2255        nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
2256
2257        /* not used nids: 0, node, meta, (and root counted as valid node) */
2258        nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
2259        nm_i->fcnt = 0;
2260        nm_i->nat_cnt = 0;
2261        nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2262        nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
2263        nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2264
2265        INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
2266        INIT_LIST_HEAD(&nm_i->free_nid_list);
2267        INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2268        INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2269        INIT_LIST_HEAD(&nm_i->nat_entries);
2270
2271        mutex_init(&nm_i->build_lock);
2272        spin_lock_init(&nm_i->free_nid_list_lock);
2273        init_rwsem(&nm_i->nat_tree_lock);
2274
2275        nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2276        nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2277        version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2278        if (!version_bitmap)
2279                return -EFAULT;
2280
2281        nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2282                                        GFP_KERNEL);
2283        if (!nm_i->nat_bitmap)
2284                return -ENOMEM;
2285        return 0;
2286}
2287
2288int build_node_manager(struct f2fs_sb_info *sbi)
2289{
2290        int err;
2291
2292        sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
2293        if (!sbi->nm_info)
2294                return -ENOMEM;
2295
2296        err = init_node_manager(sbi);
2297        if (err)
2298                return err;
2299
2300        build_free_nids(sbi);
2301        return 0;
2302}
2303
2304void destroy_node_manager(struct f2fs_sb_info *sbi)
2305{
2306        struct f2fs_nm_info *nm_i = NM_I(sbi);
2307        struct free_nid *i, *next_i;
2308        struct nat_entry *natvec[NATVEC_SIZE];
2309        struct nat_entry_set *setvec[SETVEC_SIZE];
2310        nid_t nid = 0;
2311        unsigned int found;
2312
2313        if (!nm_i)
2314                return;
2315
2316        /* destroy free nid list */
2317        spin_lock(&nm_i->free_nid_list_lock);
2318        list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
2319                f2fs_bug_on(sbi, i->state == NID_ALLOC);
2320                __del_from_free_nid_list(nm_i, i);
2321                nm_i->fcnt--;
2322                spin_unlock(&nm_i->free_nid_list_lock);
2323                kmem_cache_free(free_nid_slab, i);
2324                spin_lock(&nm_i->free_nid_list_lock);
2325        }
2326        f2fs_bug_on(sbi, nm_i->fcnt);
2327        spin_unlock(&nm_i->free_nid_list_lock);
2328
2329        /* destroy nat cache */
2330        down_write(&nm_i->nat_tree_lock);
2331        while ((found = __gang_lookup_nat_cache(nm_i,
2332                                        nid, NATVEC_SIZE, natvec))) {
2333                unsigned idx;
2334
2335                nid = nat_get_nid(natvec[found - 1]) + 1;
2336                for (idx = 0; idx < found; idx++)
2337                        __del_from_nat_cache(nm_i, natvec[idx]);
2338        }
2339        f2fs_bug_on(sbi, nm_i->nat_cnt);
2340
2341        /* destroy nat set cache */
2342        nid = 0;
2343        while ((found = __gang_lookup_nat_set(nm_i,
2344                                        nid, SETVEC_SIZE, setvec))) {
2345                unsigned idx;
2346
2347                nid = setvec[found - 1]->set + 1;
2348                for (idx = 0; idx < found; idx++) {
2349                        /* entry_cnt is not zero, when cp_error was occurred */
2350                        f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2351                        radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2352                        kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2353                }
2354        }
2355        up_write(&nm_i->nat_tree_lock);
2356
2357        kfree(nm_i->nat_bitmap);
2358        sbi->nm_info = NULL;
2359        kfree(nm_i);
2360}
2361
2362int __init create_node_manager_caches(void)
2363{
2364        nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
2365                        sizeof(struct nat_entry));
2366        if (!nat_entry_slab)
2367                goto fail;
2368
2369        free_nid_slab = f2fs_kmem_cache_create("free_nid",
2370                        sizeof(struct free_nid));
2371        if (!free_nid_slab)
2372                goto destroy_nat_entry;
2373
2374        nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2375                        sizeof(struct nat_entry_set));
2376        if (!nat_entry_set_slab)
2377                goto destroy_free_nid;
2378        return 0;
2379
2380destroy_free_nid:
2381        kmem_cache_destroy(free_nid_slab);
2382destroy_nat_entry:
2383        kmem_cache_destroy(nat_entry_slab);
2384fail:
2385        return -ENOMEM;
2386}
2387
2388void destroy_node_manager_caches(void)
2389{
2390        kmem_cache_destroy(nat_entry_set_slab);
2391        kmem_cache_destroy(free_nid_slab);
2392        kmem_cache_destroy(nat_entry_slab);
2393}
2394