linux/fs/f2fs/node.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/node.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/mpage.h>
  14#include <linux/backing-dev.h>
  15#include <linux/blkdev.h>
  16#include <linux/pagevec.h>
  17#include <linux/swap.h>
  18
  19#include "f2fs.h"
  20#include "node.h"
  21#include "segment.h"
  22#include <trace/events/f2fs.h>
  23
  24static struct kmem_cache *nat_entry_slab;
  25static struct kmem_cache *free_nid_slab;
  26
  27static void clear_node_page_dirty(struct page *page)
  28{
  29        struct address_space *mapping = page->mapping;
  30        struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
  31        unsigned int long flags;
  32
  33        if (PageDirty(page)) {
  34                spin_lock_irqsave(&mapping->tree_lock, flags);
  35                radix_tree_tag_clear(&mapping->page_tree,
  36                                page_index(page),
  37                                PAGECACHE_TAG_DIRTY);
  38                spin_unlock_irqrestore(&mapping->tree_lock, flags);
  39
  40                clear_page_dirty_for_io(page);
  41                dec_page_count(sbi, F2FS_DIRTY_NODES);
  42        }
  43        ClearPageUptodate(page);
  44}
  45
  46static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
  47{
  48        pgoff_t index = current_nat_addr(sbi, nid);
  49        return get_meta_page(sbi, index);
  50}
  51
  52static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
  53{
  54        struct page *src_page;
  55        struct page *dst_page;
  56        pgoff_t src_off;
  57        pgoff_t dst_off;
  58        void *src_addr;
  59        void *dst_addr;
  60        struct f2fs_nm_info *nm_i = NM_I(sbi);
  61
  62        src_off = current_nat_addr(sbi, nid);
  63        dst_off = next_nat_addr(sbi, src_off);
  64
  65        /* get current nat block page with lock */
  66        src_page = get_meta_page(sbi, src_off);
  67
  68        /* Dirty src_page means that it is already the new target NAT page. */
  69        if (PageDirty(src_page))
  70                return src_page;
  71
  72        dst_page = grab_meta_page(sbi, dst_off);
  73
  74        src_addr = page_address(src_page);
  75        dst_addr = page_address(dst_page);
  76        memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
  77        set_page_dirty(dst_page);
  78        f2fs_put_page(src_page, 1);
  79
  80        set_to_next_nat(nm_i, nid);
  81
  82        return dst_page;
  83}
  84
  85/*
  86 * Readahead NAT pages
  87 */
  88static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
  89{
  90        struct address_space *mapping = sbi->meta_inode->i_mapping;
  91        struct f2fs_nm_info *nm_i = NM_I(sbi);
  92        struct blk_plug plug;
  93        struct page *page;
  94        pgoff_t index;
  95        int i;
  96
  97        blk_start_plug(&plug);
  98
  99        for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
 100                if (nid >= nm_i->max_nid)
 101                        nid = 0;
 102                index = current_nat_addr(sbi, nid);
 103
 104                page = grab_cache_page(mapping, index);
 105                if (!page)
 106                        continue;
 107                if (PageUptodate(page)) {
 108                        f2fs_put_page(page, 1);
 109                        continue;
 110                }
 111                if (f2fs_readpage(sbi, page, index, READ))
 112                        continue;
 113
 114                f2fs_put_page(page, 0);
 115        }
 116        blk_finish_plug(&plug);
 117}
 118
 119static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
 120{
 121        return radix_tree_lookup(&nm_i->nat_root, n);
 122}
 123
 124static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
 125                nid_t start, unsigned int nr, struct nat_entry **ep)
 126{
 127        return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
 128}
 129
 130static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
 131{
 132        list_del(&e->list);
 133        radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
 134        nm_i->nat_cnt--;
 135        kmem_cache_free(nat_entry_slab, e);
 136}
 137
 138int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
 139{
 140        struct f2fs_nm_info *nm_i = NM_I(sbi);
 141        struct nat_entry *e;
 142        int is_cp = 1;
 143
 144        read_lock(&nm_i->nat_tree_lock);
 145        e = __lookup_nat_cache(nm_i, nid);
 146        if (e && !e->checkpointed)
 147                is_cp = 0;
 148        read_unlock(&nm_i->nat_tree_lock);
 149        return is_cp;
 150}
 151
 152static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
 153{
 154        struct nat_entry *new;
 155
 156        new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
 157        if (!new)
 158                return NULL;
 159        if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
 160                kmem_cache_free(nat_entry_slab, new);
 161                return NULL;
 162        }
 163        memset(new, 0, sizeof(struct nat_entry));
 164        nat_set_nid(new, nid);
 165        list_add_tail(&new->list, &nm_i->nat_entries);
 166        nm_i->nat_cnt++;
 167        return new;
 168}
 169
 170static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
 171                                                struct f2fs_nat_entry *ne)
 172{
 173        struct nat_entry *e;
 174retry:
 175        write_lock(&nm_i->nat_tree_lock);
 176        e = __lookup_nat_cache(nm_i, nid);
 177        if (!e) {
 178                e = grab_nat_entry(nm_i, nid);
 179                if (!e) {
 180                        write_unlock(&nm_i->nat_tree_lock);
 181                        goto retry;
 182                }
 183                nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
 184                nat_set_ino(e, le32_to_cpu(ne->ino));
 185                nat_set_version(e, ne->version);
 186                e->checkpointed = true;
 187        }
 188        write_unlock(&nm_i->nat_tree_lock);
 189}
 190
 191static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
 192                        block_t new_blkaddr)
 193{
 194        struct f2fs_nm_info *nm_i = NM_I(sbi);
 195        struct nat_entry *e;
 196retry:
 197        write_lock(&nm_i->nat_tree_lock);
 198        e = __lookup_nat_cache(nm_i, ni->nid);
 199        if (!e) {
 200                e = grab_nat_entry(nm_i, ni->nid);
 201                if (!e) {
 202                        write_unlock(&nm_i->nat_tree_lock);
 203                        goto retry;
 204                }
 205                e->ni = *ni;
 206                e->checkpointed = true;
 207                BUG_ON(ni->blk_addr == NEW_ADDR);
 208        } else if (new_blkaddr == NEW_ADDR) {
 209                /*
 210                 * when nid is reallocated,
 211                 * previous nat entry can be remained in nat cache.
 212                 * So, reinitialize it with new information.
 213                 */
 214                e->ni = *ni;
 215                BUG_ON(ni->blk_addr != NULL_ADDR);
 216        }
 217
 218        if (new_blkaddr == NEW_ADDR)
 219                e->checkpointed = false;
 220
 221        /* sanity check */
 222        BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
 223        BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
 224                        new_blkaddr == NULL_ADDR);
 225        BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
 226                        new_blkaddr == NEW_ADDR);
 227        BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
 228                        nat_get_blkaddr(e) != NULL_ADDR &&
 229                        new_blkaddr == NEW_ADDR);
 230
 231        /* increament version no as node is removed */
 232        if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
 233                unsigned char version = nat_get_version(e);
 234                nat_set_version(e, inc_node_version(version));
 235        }
 236
 237        /* change address */
 238        nat_set_blkaddr(e, new_blkaddr);
 239        __set_nat_cache_dirty(nm_i, e);
 240        write_unlock(&nm_i->nat_tree_lock);
 241}
 242
 243static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 244{
 245        struct f2fs_nm_info *nm_i = NM_I(sbi);
 246
 247        if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
 248                return 0;
 249
 250        write_lock(&nm_i->nat_tree_lock);
 251        while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
 252                struct nat_entry *ne;
 253                ne = list_first_entry(&nm_i->nat_entries,
 254                                        struct nat_entry, list);
 255                __del_from_nat_cache(nm_i, ne);
 256                nr_shrink--;
 257        }
 258        write_unlock(&nm_i->nat_tree_lock);
 259        return nr_shrink;
 260}
 261
 262/*
 263 * This function returns always success
 264 */
 265void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
 266{
 267        struct f2fs_nm_info *nm_i = NM_I(sbi);
 268        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
 269        struct f2fs_summary_block *sum = curseg->sum_blk;
 270        nid_t start_nid = START_NID(nid);
 271        struct f2fs_nat_block *nat_blk;
 272        struct page *page = NULL;
 273        struct f2fs_nat_entry ne;
 274        struct nat_entry *e;
 275        int i;
 276
 277        memset(&ne, 0, sizeof(struct f2fs_nat_entry));
 278        ni->nid = nid;
 279
 280        /* Check nat cache */
 281        read_lock(&nm_i->nat_tree_lock);
 282        e = __lookup_nat_cache(nm_i, nid);
 283        if (e) {
 284                ni->ino = nat_get_ino(e);
 285                ni->blk_addr = nat_get_blkaddr(e);
 286                ni->version = nat_get_version(e);
 287        }
 288        read_unlock(&nm_i->nat_tree_lock);
 289        if (e)
 290                return;
 291
 292        /* Check current segment summary */
 293        mutex_lock(&curseg->curseg_mutex);
 294        i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
 295        if (i >= 0) {
 296                ne = nat_in_journal(sum, i);
 297                node_info_from_raw_nat(ni, &ne);
 298        }
 299        mutex_unlock(&curseg->curseg_mutex);
 300        if (i >= 0)
 301                goto cache;
 302
 303        /* Fill node_info from nat page */
 304        page = get_current_nat_page(sbi, start_nid);
 305        nat_blk = (struct f2fs_nat_block *)page_address(page);
 306        ne = nat_blk->entries[nid - start_nid];
 307        node_info_from_raw_nat(ni, &ne);
 308        f2fs_put_page(page, 1);
 309cache:
 310        /* cache nat entry */
 311        cache_nat_entry(NM_I(sbi), nid, &ne);
 312}
 313
 314/*
 315 * The maximum depth is four.
 316 * Offset[0] will have raw inode offset.
 317 */
 318static int get_node_path(long block, int offset[4], unsigned int noffset[4])
 319{
 320        const long direct_index = ADDRS_PER_INODE;
 321        const long direct_blks = ADDRS_PER_BLOCK;
 322        const long dptrs_per_blk = NIDS_PER_BLOCK;
 323        const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
 324        const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
 325        int n = 0;
 326        int level = 0;
 327
 328        noffset[0] = 0;
 329
 330        if (block < direct_index) {
 331                offset[n] = block;
 332                goto got;
 333        }
 334        block -= direct_index;
 335        if (block < direct_blks) {
 336                offset[n++] = NODE_DIR1_BLOCK;
 337                noffset[n] = 1;
 338                offset[n] = block;
 339                level = 1;
 340                goto got;
 341        }
 342        block -= direct_blks;
 343        if (block < direct_blks) {
 344                offset[n++] = NODE_DIR2_BLOCK;
 345                noffset[n] = 2;
 346                offset[n] = block;
 347                level = 1;
 348                goto got;
 349        }
 350        block -= direct_blks;
 351        if (block < indirect_blks) {
 352                offset[n++] = NODE_IND1_BLOCK;
 353                noffset[n] = 3;
 354                offset[n++] = block / direct_blks;
 355                noffset[n] = 4 + offset[n - 1];
 356                offset[n] = block % direct_blks;
 357                level = 2;
 358                goto got;
 359        }
 360        block -= indirect_blks;
 361        if (block < indirect_blks) {
 362                offset[n++] = NODE_IND2_BLOCK;
 363                noffset[n] = 4 + dptrs_per_blk;
 364                offset[n++] = block / direct_blks;
 365                noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
 366                offset[n] = block % direct_blks;
 367                level = 2;
 368                goto got;
 369        }
 370        block -= indirect_blks;
 371        if (block < dindirect_blks) {
 372                offset[n++] = NODE_DIND_BLOCK;
 373                noffset[n] = 5 + (dptrs_per_blk * 2);
 374                offset[n++] = block / indirect_blks;
 375                noffset[n] = 6 + (dptrs_per_blk * 2) +
 376                              offset[n - 1] * (dptrs_per_blk + 1);
 377                offset[n++] = (block / direct_blks) % dptrs_per_blk;
 378                noffset[n] = 7 + (dptrs_per_blk * 2) +
 379                              offset[n - 2] * (dptrs_per_blk + 1) +
 380                              offset[n - 1];
 381                offset[n] = block % direct_blks;
 382                level = 3;
 383                goto got;
 384        } else {
 385                BUG();
 386        }
 387got:
 388        return level;
 389}
 390
 391/*
 392 * Caller should call f2fs_put_dnode(dn).
 393 * Also, it should grab and release a mutex by calling mutex_lock_op() and
 394 * mutex_unlock_op() only if ro is not set RDONLY_NODE.
 395 * In the case of RDONLY_NODE, we don't need to care about mutex.
 396 */
 397int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 398{
 399        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 400        struct page *npage[4];
 401        struct page *parent;
 402        int offset[4];
 403        unsigned int noffset[4];
 404        nid_t nids[4];
 405        int level, i;
 406        int err = 0;
 407
 408        level = get_node_path(index, offset, noffset);
 409
 410        nids[0] = dn->inode->i_ino;
 411        npage[0] = get_node_page(sbi, nids[0]);
 412        if (IS_ERR(npage[0]))
 413                return PTR_ERR(npage[0]);
 414
 415        parent = npage[0];
 416        if (level != 0)
 417                nids[1] = get_nid(parent, offset[0], true);
 418        dn->inode_page = npage[0];
 419        dn->inode_page_locked = true;
 420
 421        /* get indirect or direct nodes */
 422        for (i = 1; i <= level; i++) {
 423                bool done = false;
 424
 425                if (!nids[i] && mode == ALLOC_NODE) {
 426                        /* alloc new node */
 427                        if (!alloc_nid(sbi, &(nids[i]))) {
 428                                err = -ENOSPC;
 429                                goto release_pages;
 430                        }
 431
 432                        dn->nid = nids[i];
 433                        npage[i] = new_node_page(dn, noffset[i]);
 434                        if (IS_ERR(npage[i])) {
 435                                alloc_nid_failed(sbi, nids[i]);
 436                                err = PTR_ERR(npage[i]);
 437                                goto release_pages;
 438                        }
 439
 440                        set_nid(parent, offset[i - 1], nids[i], i == 1);
 441                        alloc_nid_done(sbi, nids[i]);
 442                        done = true;
 443                } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
 444                        npage[i] = get_node_page_ra(parent, offset[i - 1]);
 445                        if (IS_ERR(npage[i])) {
 446                                err = PTR_ERR(npage[i]);
 447                                goto release_pages;
 448                        }
 449                        done = true;
 450                }
 451                if (i == 1) {
 452                        dn->inode_page_locked = false;
 453                        unlock_page(parent);
 454                } else {
 455                        f2fs_put_page(parent, 1);
 456                }
 457
 458                if (!done) {
 459                        npage[i] = get_node_page(sbi, nids[i]);
 460                        if (IS_ERR(npage[i])) {
 461                                err = PTR_ERR(npage[i]);
 462                                f2fs_put_page(npage[0], 0);
 463                                goto release_out;
 464                        }
 465                }
 466                if (i < level) {
 467                        parent = npage[i];
 468                        nids[i + 1] = get_nid(parent, offset[i], false);
 469                }
 470        }
 471        dn->nid = nids[level];
 472        dn->ofs_in_node = offset[level];
 473        dn->node_page = npage[level];
 474        dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
 475        return 0;
 476
 477release_pages:
 478        f2fs_put_page(parent, 1);
 479        if (i > 1)
 480                f2fs_put_page(npage[0], 0);
 481release_out:
 482        dn->inode_page = NULL;
 483        dn->node_page = NULL;
 484        return err;
 485}
 486
 487static void truncate_node(struct dnode_of_data *dn)
 488{
 489        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 490        struct node_info ni;
 491
 492        get_node_info(sbi, dn->nid, &ni);
 493        if (dn->inode->i_blocks == 0) {
 494                BUG_ON(ni.blk_addr != NULL_ADDR);
 495                goto invalidate;
 496        }
 497        BUG_ON(ni.blk_addr == NULL_ADDR);
 498
 499        /* Deallocate node address */
 500        invalidate_blocks(sbi, ni.blk_addr);
 501        dec_valid_node_count(sbi, dn->inode, 1);
 502        set_node_addr(sbi, &ni, NULL_ADDR);
 503
 504        if (dn->nid == dn->inode->i_ino) {
 505                remove_orphan_inode(sbi, dn->nid);
 506                dec_valid_inode_count(sbi);
 507        } else {
 508                sync_inode_page(dn);
 509        }
 510invalidate:
 511        clear_node_page_dirty(dn->node_page);
 512        F2FS_SET_SB_DIRT(sbi);
 513
 514        f2fs_put_page(dn->node_page, 1);
 515        dn->node_page = NULL;
 516        trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
 517}
 518
 519static int truncate_dnode(struct dnode_of_data *dn)
 520{
 521        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 522        struct page *page;
 523
 524        if (dn->nid == 0)
 525                return 1;
 526
 527        /* get direct node */
 528        page = get_node_page(sbi, dn->nid);
 529        if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
 530                return 1;
 531        else if (IS_ERR(page))
 532                return PTR_ERR(page);
 533
 534        /* Make dnode_of_data for parameter */
 535        dn->node_page = page;
 536        dn->ofs_in_node = 0;
 537        truncate_data_blocks(dn);
 538        truncate_node(dn);
 539        return 1;
 540}
 541
 542static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
 543                                                int ofs, int depth)
 544{
 545        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 546        struct dnode_of_data rdn = *dn;
 547        struct page *page;
 548        struct f2fs_node *rn;
 549        nid_t child_nid;
 550        unsigned int child_nofs;
 551        int freed = 0;
 552        int i, ret;
 553
 554        if (dn->nid == 0)
 555                return NIDS_PER_BLOCK + 1;
 556
 557        trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
 558
 559        page = get_node_page(sbi, dn->nid);
 560        if (IS_ERR(page)) {
 561                trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
 562                return PTR_ERR(page);
 563        }
 564
 565        rn = (struct f2fs_node *)page_address(page);
 566        if (depth < 3) {
 567                for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
 568                        child_nid = le32_to_cpu(rn->in.nid[i]);
 569                        if (child_nid == 0)
 570                                continue;
 571                        rdn.nid = child_nid;
 572                        ret = truncate_dnode(&rdn);
 573                        if (ret < 0)
 574                                goto out_err;
 575                        set_nid(page, i, 0, false);
 576                }
 577        } else {
 578                child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
 579                for (i = ofs; i < NIDS_PER_BLOCK; i++) {
 580                        child_nid = le32_to_cpu(rn->in.nid[i]);
 581                        if (child_nid == 0) {
 582                                child_nofs += NIDS_PER_BLOCK + 1;
 583                                continue;
 584                        }
 585                        rdn.nid = child_nid;
 586                        ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
 587                        if (ret == (NIDS_PER_BLOCK + 1)) {
 588                                set_nid(page, i, 0, false);
 589                                child_nofs += ret;
 590                        } else if (ret < 0 && ret != -ENOENT) {
 591                                goto out_err;
 592                        }
 593                }
 594                freed = child_nofs;
 595        }
 596
 597        if (!ofs) {
 598                /* remove current indirect node */
 599                dn->node_page = page;
 600                truncate_node(dn);
 601                freed++;
 602        } else {
 603                f2fs_put_page(page, 1);
 604        }
 605        trace_f2fs_truncate_nodes_exit(dn->inode, freed);
 606        return freed;
 607
 608out_err:
 609        f2fs_put_page(page, 1);
 610        trace_f2fs_truncate_nodes_exit(dn->inode, ret);
 611        return ret;
 612}
 613
 614static int truncate_partial_nodes(struct dnode_of_data *dn,
 615                        struct f2fs_inode *ri, int *offset, int depth)
 616{
 617        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 618        struct page *pages[2];
 619        nid_t nid[3];
 620        nid_t child_nid;
 621        int err = 0;
 622        int i;
 623        int idx = depth - 2;
 624
 625        nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
 626        if (!nid[0])
 627                return 0;
 628
 629        /* get indirect nodes in the path */
 630        for (i = 0; i < depth - 1; i++) {
 631                /* refernece count'll be increased */
 632                pages[i] = get_node_page(sbi, nid[i]);
 633                if (IS_ERR(pages[i])) {
 634                        depth = i + 1;
 635                        err = PTR_ERR(pages[i]);
 636                        goto fail;
 637                }
 638                nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
 639        }
 640
 641        /* free direct nodes linked to a partial indirect node */
 642        for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
 643                child_nid = get_nid(pages[idx], i, false);
 644                if (!child_nid)
 645                        continue;
 646                dn->nid = child_nid;
 647                err = truncate_dnode(dn);
 648                if (err < 0)
 649                        goto fail;
 650                set_nid(pages[idx], i, 0, false);
 651        }
 652
 653        if (offset[depth - 1] == 0) {
 654                dn->node_page = pages[idx];
 655                dn->nid = nid[idx];
 656                truncate_node(dn);
 657        } else {
 658                f2fs_put_page(pages[idx], 1);
 659        }
 660        offset[idx]++;
 661        offset[depth - 1] = 0;
 662fail:
 663        for (i = depth - 3; i >= 0; i--)
 664                f2fs_put_page(pages[i], 1);
 665
 666        trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
 667
 668        return err;
 669}
 670
 671/*
 672 * All the block addresses of data and nodes should be nullified.
 673 */
 674int truncate_inode_blocks(struct inode *inode, pgoff_t from)
 675{
 676        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 677        struct address_space *node_mapping = sbi->node_inode->i_mapping;
 678        int err = 0, cont = 1;
 679        int level, offset[4], noffset[4];
 680        unsigned int nofs = 0;
 681        struct f2fs_node *rn;
 682        struct dnode_of_data dn;
 683        struct page *page;
 684
 685        trace_f2fs_truncate_inode_blocks_enter(inode, from);
 686
 687        level = get_node_path(from, offset, noffset);
 688restart:
 689        page = get_node_page(sbi, inode->i_ino);
 690        if (IS_ERR(page)) {
 691                trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
 692                return PTR_ERR(page);
 693        }
 694
 695        set_new_dnode(&dn, inode, page, NULL, 0);
 696        unlock_page(page);
 697
 698        rn = page_address(page);
 699        switch (level) {
 700        case 0:
 701        case 1:
 702                nofs = noffset[1];
 703                break;
 704        case 2:
 705                nofs = noffset[1];
 706                if (!offset[level - 1])
 707                        goto skip_partial;
 708                err = truncate_partial_nodes(&dn, &rn->i, offset, level);
 709                if (err < 0 && err != -ENOENT)
 710                        goto fail;
 711                nofs += 1 + NIDS_PER_BLOCK;
 712                break;
 713        case 3:
 714                nofs = 5 + 2 * NIDS_PER_BLOCK;
 715                if (!offset[level - 1])
 716                        goto skip_partial;
 717                err = truncate_partial_nodes(&dn, &rn->i, offset, level);
 718                if (err < 0 && err != -ENOENT)
 719                        goto fail;
 720                break;
 721        default:
 722                BUG();
 723        }
 724
 725skip_partial:
 726        while (cont) {
 727                dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
 728                switch (offset[0]) {
 729                case NODE_DIR1_BLOCK:
 730                case NODE_DIR2_BLOCK:
 731                        err = truncate_dnode(&dn);
 732                        break;
 733
 734                case NODE_IND1_BLOCK:
 735                case NODE_IND2_BLOCK:
 736                        err = truncate_nodes(&dn, nofs, offset[1], 2);
 737                        break;
 738
 739                case NODE_DIND_BLOCK:
 740                        err = truncate_nodes(&dn, nofs, offset[1], 3);
 741                        cont = 0;
 742                        break;
 743
 744                default:
 745                        BUG();
 746                }
 747                if (err < 0 && err != -ENOENT)
 748                        goto fail;
 749                if (offset[1] == 0 &&
 750                                rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
 751                        lock_page(page);
 752                        if (page->mapping != node_mapping) {
 753                                f2fs_put_page(page, 1);
 754                                goto restart;
 755                        }
 756                        wait_on_page_writeback(page);
 757                        rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
 758                        set_page_dirty(page);
 759                        unlock_page(page);
 760                }
 761                offset[1] = 0;
 762                offset[0]++;
 763                nofs += err;
 764        }
 765fail:
 766        f2fs_put_page(page, 0);
 767        trace_f2fs_truncate_inode_blocks_exit(inode, err);
 768        return err > 0 ? 0 : err;
 769}
 770
 771/*
 772 * Caller should grab and release a mutex by calling mutex_lock_op() and
 773 * mutex_unlock_op().
 774 */
 775int remove_inode_page(struct inode *inode)
 776{
 777        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 778        struct page *page;
 779        nid_t ino = inode->i_ino;
 780        struct dnode_of_data dn;
 781
 782        page = get_node_page(sbi, ino);
 783        if (IS_ERR(page))
 784                return PTR_ERR(page);
 785
 786        if (F2FS_I(inode)->i_xattr_nid) {
 787                nid_t nid = F2FS_I(inode)->i_xattr_nid;
 788                struct page *npage = get_node_page(sbi, nid);
 789
 790                if (IS_ERR(npage))
 791                        return PTR_ERR(npage);
 792
 793                F2FS_I(inode)->i_xattr_nid = 0;
 794                set_new_dnode(&dn, inode, page, npage, nid);
 795                dn.inode_page_locked = 1;
 796                truncate_node(&dn);
 797        }
 798
 799        /* 0 is possible, after f2fs_new_inode() is failed */
 800        BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
 801        set_new_dnode(&dn, inode, page, page, ino);
 802        truncate_node(&dn);
 803        return 0;
 804}
 805
 806int new_inode_page(struct inode *inode, const struct qstr *name)
 807{
 808        struct page *page;
 809        struct dnode_of_data dn;
 810
 811        /* allocate inode page for new inode */
 812        set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
 813        page = new_node_page(&dn, 0);
 814        init_dent_inode(name, page);
 815        if (IS_ERR(page))
 816                return PTR_ERR(page);
 817        f2fs_put_page(page, 1);
 818        return 0;
 819}
 820
 821struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
 822{
 823        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 824        struct address_space *mapping = sbi->node_inode->i_mapping;
 825        struct node_info old_ni, new_ni;
 826        struct page *page;
 827        int err;
 828
 829        if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
 830                return ERR_PTR(-EPERM);
 831
 832        page = grab_cache_page(mapping, dn->nid);
 833        if (!page)
 834                return ERR_PTR(-ENOMEM);
 835
 836        get_node_info(sbi, dn->nid, &old_ni);
 837
 838        SetPageUptodate(page);
 839        fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
 840
 841        /* Reinitialize old_ni with new node page */
 842        BUG_ON(old_ni.blk_addr != NULL_ADDR);
 843        new_ni = old_ni;
 844        new_ni.ino = dn->inode->i_ino;
 845
 846        if (!inc_valid_node_count(sbi, dn->inode, 1)) {
 847                err = -ENOSPC;
 848                goto fail;
 849        }
 850        set_node_addr(sbi, &new_ni, NEW_ADDR);
 851        set_cold_node(dn->inode, page);
 852
 853        dn->node_page = page;
 854        sync_inode_page(dn);
 855        set_page_dirty(page);
 856        if (ofs == 0)
 857                inc_valid_inode_count(sbi);
 858
 859        return page;
 860
 861fail:
 862        clear_node_page_dirty(page);
 863        f2fs_put_page(page, 1);
 864        return ERR_PTR(err);
 865}
 866
 867/*
 868 * Caller should do after getting the following values.
 869 * 0: f2fs_put_page(page, 0)
 870 * LOCKED_PAGE: f2fs_put_page(page, 1)
 871 * error: nothing
 872 */
 873static int read_node_page(struct page *page, int type)
 874{
 875        struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
 876        struct node_info ni;
 877
 878        get_node_info(sbi, page->index, &ni);
 879
 880        if (ni.blk_addr == NULL_ADDR) {
 881                f2fs_put_page(page, 1);
 882                return -ENOENT;
 883        }
 884
 885        if (PageUptodate(page))
 886                return LOCKED_PAGE;
 887
 888        return f2fs_readpage(sbi, page, ni.blk_addr, type);
 889}
 890
 891/*
 892 * Readahead a node page
 893 */
 894void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
 895{
 896        struct address_space *mapping = sbi->node_inode->i_mapping;
 897        struct page *apage;
 898        int err;
 899
 900        apage = find_get_page(mapping, nid);
 901        if (apage && PageUptodate(apage)) {
 902                f2fs_put_page(apage, 0);
 903                return;
 904        }
 905        f2fs_put_page(apage, 0);
 906
 907        apage = grab_cache_page(mapping, nid);
 908        if (!apage)
 909                return;
 910
 911        err = read_node_page(apage, READA);
 912        if (err == 0)
 913                f2fs_put_page(apage, 0);
 914        else if (err == LOCKED_PAGE)
 915                f2fs_put_page(apage, 1);
 916        return;
 917}
 918
 919struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
 920{
 921        struct address_space *mapping = sbi->node_inode->i_mapping;
 922        struct page *page;
 923        int err;
 924repeat:
 925        page = grab_cache_page(mapping, nid);
 926        if (!page)
 927                return ERR_PTR(-ENOMEM);
 928
 929        err = read_node_page(page, READ_SYNC);
 930        if (err < 0)
 931                return ERR_PTR(err);
 932        else if (err == LOCKED_PAGE)
 933                goto got_it;
 934
 935        lock_page(page);
 936        if (!PageUptodate(page)) {
 937                f2fs_put_page(page, 1);
 938                return ERR_PTR(-EIO);
 939        }
 940        if (page->mapping != mapping) {
 941                f2fs_put_page(page, 1);
 942                goto repeat;
 943        }
 944got_it:
 945        BUG_ON(nid != nid_of_node(page));
 946        mark_page_accessed(page);
 947        return page;
 948}
 949
 950/*
 951 * Return a locked page for the desired node page.
 952 * And, readahead MAX_RA_NODE number of node pages.
 953 */
 954struct page *get_node_page_ra(struct page *parent, int start)
 955{
 956        struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
 957        struct address_space *mapping = sbi->node_inode->i_mapping;
 958        struct blk_plug plug;
 959        struct page *page;
 960        int err, i, end;
 961        nid_t nid;
 962
 963        /* First, try getting the desired direct node. */
 964        nid = get_nid(parent, start, false);
 965        if (!nid)
 966                return ERR_PTR(-ENOENT);
 967repeat:
 968        page = grab_cache_page(mapping, nid);
 969        if (!page)
 970                return ERR_PTR(-ENOMEM);
 971
 972        err = read_node_page(page, READ_SYNC);
 973        if (err < 0)
 974                return ERR_PTR(err);
 975        else if (err == LOCKED_PAGE)
 976                goto page_hit;
 977
 978        blk_start_plug(&plug);
 979
 980        /* Then, try readahead for siblings of the desired node */
 981        end = start + MAX_RA_NODE;
 982        end = min(end, NIDS_PER_BLOCK);
 983        for (i = start + 1; i < end; i++) {
 984                nid = get_nid(parent, i, false);
 985                if (!nid)
 986                        continue;
 987                ra_node_page(sbi, nid);
 988        }
 989
 990        blk_finish_plug(&plug);
 991
 992        lock_page(page);
 993        if (page->mapping != mapping) {
 994                f2fs_put_page(page, 1);
 995                goto repeat;
 996        }
 997page_hit:
 998        if (!PageUptodate(page)) {
 999                f2fs_put_page(page, 1);
1000                return ERR_PTR(-EIO);
1001        }
1002        mark_page_accessed(page);
1003        return page;
1004}
1005
1006void sync_inode_page(struct dnode_of_data *dn)
1007{
1008        if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1009                update_inode(dn->inode, dn->node_page);
1010        } else if (dn->inode_page) {
1011                if (!dn->inode_page_locked)
1012                        lock_page(dn->inode_page);
1013                update_inode(dn->inode, dn->inode_page);
1014                if (!dn->inode_page_locked)
1015                        unlock_page(dn->inode_page);
1016        } else {
1017                update_inode_page(dn->inode);
1018        }
1019}
1020
1021int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1022                                        struct writeback_control *wbc)
1023{
1024        struct address_space *mapping = sbi->node_inode->i_mapping;
1025        pgoff_t index, end;
1026        struct pagevec pvec;
1027        int step = ino ? 2 : 0;
1028        int nwritten = 0, wrote = 0;
1029
1030        pagevec_init(&pvec, 0);
1031
1032next_step:
1033        index = 0;
1034        end = LONG_MAX;
1035
1036        while (index <= end) {
1037                int i, nr_pages;
1038                nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1039                                PAGECACHE_TAG_DIRTY,
1040                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1041                if (nr_pages == 0)
1042                        break;
1043
1044                for (i = 0; i < nr_pages; i++) {
1045                        struct page *page = pvec.pages[i];
1046
1047                        /*
1048                         * flushing sequence with step:
1049                         * 0. indirect nodes
1050                         * 1. dentry dnodes
1051                         * 2. file dnodes
1052                         */
1053                        if (step == 0 && IS_DNODE(page))
1054                                continue;
1055                        if (step == 1 && (!IS_DNODE(page) ||
1056                                                is_cold_node(page)))
1057                                continue;
1058                        if (step == 2 && (!IS_DNODE(page) ||
1059                                                !is_cold_node(page)))
1060                                continue;
1061
1062                        /*
1063                         * If an fsync mode,
1064                         * we should not skip writing node pages.
1065                         */
1066                        if (ino && ino_of_node(page) == ino)
1067                                lock_page(page);
1068                        else if (!trylock_page(page))
1069                                continue;
1070
1071                        if (unlikely(page->mapping != mapping)) {
1072continue_unlock:
1073                                unlock_page(page);
1074                                continue;
1075                        }
1076                        if (ino && ino_of_node(page) != ino)
1077                                goto continue_unlock;
1078
1079                        if (!PageDirty(page)) {
1080                                /* someone wrote it for us */
1081                                goto continue_unlock;
1082                        }
1083
1084                        if (!clear_page_dirty_for_io(page))
1085                                goto continue_unlock;
1086
1087                        /* called by fsync() */
1088                        if (ino && IS_DNODE(page)) {
1089                                int mark = !is_checkpointed_node(sbi, ino);
1090                                set_fsync_mark(page, 1);
1091                                if (IS_INODE(page))
1092                                        set_dentry_mark(page, mark);
1093                                nwritten++;
1094                        } else {
1095                                set_fsync_mark(page, 0);
1096                                set_dentry_mark(page, 0);
1097                        }
1098                        mapping->a_ops->writepage(page, wbc);
1099                        wrote++;
1100
1101                        if (--wbc->nr_to_write == 0)
1102                                break;
1103                }
1104                pagevec_release(&pvec);
1105                cond_resched();
1106
1107                if (wbc->nr_to_write == 0) {
1108                        step = 2;
1109                        break;
1110                }
1111        }
1112
1113        if (step < 2) {
1114                step++;
1115                goto next_step;
1116        }
1117
1118        if (wrote)
1119                f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1120
1121        return nwritten;
1122}
1123
1124static int f2fs_write_node_page(struct page *page,
1125                                struct writeback_control *wbc)
1126{
1127        struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1128        nid_t nid;
1129        block_t new_addr;
1130        struct node_info ni;
1131
1132        wait_on_page_writeback(page);
1133
1134        /* get old block addr of this node page */
1135        nid = nid_of_node(page);
1136        BUG_ON(page->index != nid);
1137
1138        get_node_info(sbi, nid, &ni);
1139
1140        /* This page is already truncated */
1141        if (ni.blk_addr == NULL_ADDR) {
1142                dec_page_count(sbi, F2FS_DIRTY_NODES);
1143                unlock_page(page);
1144                return 0;
1145        }
1146
1147        if (wbc->for_reclaim) {
1148                dec_page_count(sbi, F2FS_DIRTY_NODES);
1149                wbc->pages_skipped++;
1150                set_page_dirty(page);
1151                return AOP_WRITEPAGE_ACTIVATE;
1152        }
1153
1154        mutex_lock(&sbi->node_write);
1155        set_page_writeback(page);
1156        write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1157        set_node_addr(sbi, &ni, new_addr);
1158        dec_page_count(sbi, F2FS_DIRTY_NODES);
1159        mutex_unlock(&sbi->node_write);
1160        unlock_page(page);
1161        return 0;
1162}
1163
1164/*
1165 * It is very important to gather dirty pages and write at once, so that we can
1166 * submit a big bio without interfering other data writes.
1167 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1168 */
1169#define COLLECT_DIRTY_NODES     512
1170static int f2fs_write_node_pages(struct address_space *mapping,
1171                            struct writeback_control *wbc)
1172{
1173        struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1174        long nr_to_write = wbc->nr_to_write;
1175
1176        /* First check balancing cached NAT entries */
1177        if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1178                f2fs_sync_fs(sbi->sb, true);
1179                return 0;
1180        }
1181
1182        /* collect a number of dirty node pages and write together */
1183        if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1184                return 0;
1185
1186        /* if mounting is failed, skip writing node pages */
1187        wbc->nr_to_write = max_hw_blocks(sbi);
1188        sync_node_pages(sbi, 0, wbc);
1189        wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write);
1190        return 0;
1191}
1192
1193static int f2fs_set_node_page_dirty(struct page *page)
1194{
1195        struct address_space *mapping = page->mapping;
1196        struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1197
1198        SetPageUptodate(page);
1199        if (!PageDirty(page)) {
1200                __set_page_dirty_nobuffers(page);
1201                inc_page_count(sbi, F2FS_DIRTY_NODES);
1202                SetPagePrivate(page);
1203                return 1;
1204        }
1205        return 0;
1206}
1207
1208static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1209{
1210        struct inode *inode = page->mapping->host;
1211        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1212        if (PageDirty(page))
1213                dec_page_count(sbi, F2FS_DIRTY_NODES);
1214        ClearPagePrivate(page);
1215}
1216
1217static int f2fs_release_node_page(struct page *page, gfp_t wait)
1218{
1219        ClearPagePrivate(page);
1220        return 1;
1221}
1222
1223/*
1224 * Structure of the f2fs node operations
1225 */
1226const struct address_space_operations f2fs_node_aops = {
1227        .writepage      = f2fs_write_node_page,
1228        .writepages     = f2fs_write_node_pages,
1229        .set_page_dirty = f2fs_set_node_page_dirty,
1230        .invalidatepage = f2fs_invalidate_node_page,
1231        .releasepage    = f2fs_release_node_page,
1232};
1233
1234static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1235{
1236        struct list_head *this;
1237        struct free_nid *i;
1238        list_for_each(this, head) {
1239                i = list_entry(this, struct free_nid, list);
1240                if (i->nid == n)
1241                        return i;
1242        }
1243        return NULL;
1244}
1245
1246static void __del_from_free_nid_list(struct free_nid *i)
1247{
1248        list_del(&i->list);
1249        kmem_cache_free(free_nid_slab, i);
1250}
1251
1252static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1253{
1254        struct free_nid *i;
1255        struct nat_entry *ne;
1256        bool allocated = false;
1257
1258        if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1259                return -1;
1260
1261        /* 0 nid should not be used */
1262        if (nid == 0)
1263                return 0;
1264
1265        if (!build)
1266                goto retry;
1267
1268        /* do not add allocated nids */
1269        read_lock(&nm_i->nat_tree_lock);
1270        ne = __lookup_nat_cache(nm_i, nid);
1271        if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1272                allocated = true;
1273        read_unlock(&nm_i->nat_tree_lock);
1274        if (allocated)
1275                return 0;
1276retry:
1277        i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1278        if (!i) {
1279                cond_resched();
1280                goto retry;
1281        }
1282        i->nid = nid;
1283        i->state = NID_NEW;
1284
1285        spin_lock(&nm_i->free_nid_list_lock);
1286        if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1287                spin_unlock(&nm_i->free_nid_list_lock);
1288                kmem_cache_free(free_nid_slab, i);
1289                return 0;
1290        }
1291        list_add_tail(&i->list, &nm_i->free_nid_list);
1292        nm_i->fcnt++;
1293        spin_unlock(&nm_i->free_nid_list_lock);
1294        return 1;
1295}
1296
1297static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1298{
1299        struct free_nid *i;
1300        spin_lock(&nm_i->free_nid_list_lock);
1301        i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1302        if (i && i->state == NID_NEW) {
1303                __del_from_free_nid_list(i);
1304                nm_i->fcnt--;
1305        }
1306        spin_unlock(&nm_i->free_nid_list_lock);
1307}
1308
1309static void scan_nat_page(struct f2fs_nm_info *nm_i,
1310                        struct page *nat_page, nid_t start_nid)
1311{
1312        struct f2fs_nat_block *nat_blk = page_address(nat_page);
1313        block_t blk_addr;
1314        int i;
1315
1316        i = start_nid % NAT_ENTRY_PER_BLOCK;
1317
1318        for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1319
1320                if (start_nid >= nm_i->max_nid)
1321                        break;
1322
1323                blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1324                BUG_ON(blk_addr == NEW_ADDR);
1325                if (blk_addr == NULL_ADDR) {
1326                        if (add_free_nid(nm_i, start_nid, true) < 0)
1327                                break;
1328                }
1329        }
1330}
1331
1332static void build_free_nids(struct f2fs_sb_info *sbi)
1333{
1334        struct f2fs_nm_info *nm_i = NM_I(sbi);
1335        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1336        struct f2fs_summary_block *sum = curseg->sum_blk;
1337        int i = 0;
1338        nid_t nid = nm_i->next_scan_nid;
1339
1340        /* Enough entries */
1341        if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1342                return;
1343
1344        /* readahead nat pages to be scanned */
1345        ra_nat_pages(sbi, nid);
1346
1347        while (1) {
1348                struct page *page = get_current_nat_page(sbi, nid);
1349
1350                scan_nat_page(nm_i, page, nid);
1351                f2fs_put_page(page, 1);
1352
1353                nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1354                if (nid >= nm_i->max_nid)
1355                        nid = 0;
1356
1357                if (i++ == FREE_NID_PAGES)
1358                        break;
1359        }
1360
1361        /* go to the next free nat pages to find free nids abundantly */
1362        nm_i->next_scan_nid = nid;
1363
1364        /* find free nids from current sum_pages */
1365        mutex_lock(&curseg->curseg_mutex);
1366        for (i = 0; i < nats_in_cursum(sum); i++) {
1367                block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1368                nid = le32_to_cpu(nid_in_journal(sum, i));
1369                if (addr == NULL_ADDR)
1370                        add_free_nid(nm_i, nid, true);
1371                else
1372                        remove_free_nid(nm_i, nid);
1373        }
1374        mutex_unlock(&curseg->curseg_mutex);
1375}
1376
1377/*
1378 * If this function returns success, caller can obtain a new nid
1379 * from second parameter of this function.
1380 * The returned nid could be used ino as well as nid when inode is created.
1381 */
1382bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1383{
1384        struct f2fs_nm_info *nm_i = NM_I(sbi);
1385        struct free_nid *i = NULL;
1386        struct list_head *this;
1387retry:
1388        if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
1389                return false;
1390
1391        spin_lock(&nm_i->free_nid_list_lock);
1392
1393        /* We should not use stale free nids created by build_free_nids */
1394        if (nm_i->fcnt && !sbi->on_build_free_nids) {
1395                BUG_ON(list_empty(&nm_i->free_nid_list));
1396                list_for_each(this, &nm_i->free_nid_list) {
1397                        i = list_entry(this, struct free_nid, list);
1398                        if (i->state == NID_NEW)
1399                                break;
1400                }
1401
1402                BUG_ON(i->state != NID_NEW);
1403                *nid = i->nid;
1404                i->state = NID_ALLOC;
1405                nm_i->fcnt--;
1406                spin_unlock(&nm_i->free_nid_list_lock);
1407                return true;
1408        }
1409        spin_unlock(&nm_i->free_nid_list_lock);
1410
1411        /* Let's scan nat pages and its caches to get free nids */
1412        mutex_lock(&nm_i->build_lock);
1413        sbi->on_build_free_nids = 1;
1414        build_free_nids(sbi);
1415        sbi->on_build_free_nids = 0;
1416        mutex_unlock(&nm_i->build_lock);
1417        goto retry;
1418}
1419
1420/*
1421 * alloc_nid() should be called prior to this function.
1422 */
1423void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1424{
1425        struct f2fs_nm_info *nm_i = NM_I(sbi);
1426        struct free_nid *i;
1427
1428        spin_lock(&nm_i->free_nid_list_lock);
1429        i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1430        BUG_ON(!i || i->state != NID_ALLOC);
1431        __del_from_free_nid_list(i);
1432        spin_unlock(&nm_i->free_nid_list_lock);
1433}
1434
1435/*
1436 * alloc_nid() should be called prior to this function.
1437 */
1438void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1439{
1440        struct f2fs_nm_info *nm_i = NM_I(sbi);
1441        struct free_nid *i;
1442
1443        spin_lock(&nm_i->free_nid_list_lock);
1444        i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1445        BUG_ON(!i || i->state != NID_ALLOC);
1446        if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
1447                __del_from_free_nid_list(i);
1448        } else {
1449                i->state = NID_NEW;
1450                nm_i->fcnt++;
1451        }
1452        spin_unlock(&nm_i->free_nid_list_lock);
1453}
1454
1455void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1456                struct f2fs_summary *sum, struct node_info *ni,
1457                block_t new_blkaddr)
1458{
1459        rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1460        set_node_addr(sbi, ni, new_blkaddr);
1461        clear_node_page_dirty(page);
1462}
1463
1464int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1465{
1466        struct address_space *mapping = sbi->node_inode->i_mapping;
1467        struct f2fs_node *src, *dst;
1468        nid_t ino = ino_of_node(page);
1469        struct node_info old_ni, new_ni;
1470        struct page *ipage;
1471
1472        ipage = grab_cache_page(mapping, ino);
1473        if (!ipage)
1474                return -ENOMEM;
1475
1476        /* Should not use this inode  from free nid list */
1477        remove_free_nid(NM_I(sbi), ino);
1478
1479        get_node_info(sbi, ino, &old_ni);
1480        SetPageUptodate(ipage);
1481        fill_node_footer(ipage, ino, ino, 0, true);
1482
1483        src = (struct f2fs_node *)page_address(page);
1484        dst = (struct f2fs_node *)page_address(ipage);
1485
1486        memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1487        dst->i.i_size = 0;
1488        dst->i.i_blocks = cpu_to_le64(1);
1489        dst->i.i_links = cpu_to_le32(1);
1490        dst->i.i_xattr_nid = 0;
1491
1492        new_ni = old_ni;
1493        new_ni.ino = ino;
1494
1495        set_node_addr(sbi, &new_ni, NEW_ADDR);
1496        inc_valid_inode_count(sbi);
1497
1498        f2fs_put_page(ipage, 1);
1499        return 0;
1500}
1501
1502int restore_node_summary(struct f2fs_sb_info *sbi,
1503                        unsigned int segno, struct f2fs_summary_block *sum)
1504{
1505        struct f2fs_node *rn;
1506        struct f2fs_summary *sum_entry;
1507        struct page *page;
1508        block_t addr;
1509        int i, last_offset;
1510
1511        /* alloc temporal page for read node */
1512        page = alloc_page(GFP_NOFS | __GFP_ZERO);
1513        if (IS_ERR(page))
1514                return PTR_ERR(page);
1515        lock_page(page);
1516
1517        /* scan the node segment */
1518        last_offset = sbi->blocks_per_seg;
1519        addr = START_BLOCK(sbi, segno);
1520        sum_entry = &sum->entries[0];
1521
1522        for (i = 0; i < last_offset; i++, sum_entry++) {
1523                /*
1524                 * In order to read next node page,
1525                 * we must clear PageUptodate flag.
1526                 */
1527                ClearPageUptodate(page);
1528
1529                if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1530                        goto out;
1531
1532                lock_page(page);
1533                rn = (struct f2fs_node *)page_address(page);
1534                sum_entry->nid = rn->footer.nid;
1535                sum_entry->version = 0;
1536                sum_entry->ofs_in_node = 0;
1537                addr++;
1538        }
1539        unlock_page(page);
1540out:
1541        __free_pages(page, 0);
1542        return 0;
1543}
1544
1545static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1546{
1547        struct f2fs_nm_info *nm_i = NM_I(sbi);
1548        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1549        struct f2fs_summary_block *sum = curseg->sum_blk;
1550        int i;
1551
1552        mutex_lock(&curseg->curseg_mutex);
1553
1554        if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1555                mutex_unlock(&curseg->curseg_mutex);
1556                return false;
1557        }
1558
1559        for (i = 0; i < nats_in_cursum(sum); i++) {
1560                struct nat_entry *ne;
1561                struct f2fs_nat_entry raw_ne;
1562                nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1563
1564                raw_ne = nat_in_journal(sum, i);
1565retry:
1566                write_lock(&nm_i->nat_tree_lock);
1567                ne = __lookup_nat_cache(nm_i, nid);
1568                if (ne) {
1569                        __set_nat_cache_dirty(nm_i, ne);
1570                        write_unlock(&nm_i->nat_tree_lock);
1571                        continue;
1572                }
1573                ne = grab_nat_entry(nm_i, nid);
1574                if (!ne) {
1575                        write_unlock(&nm_i->nat_tree_lock);
1576                        goto retry;
1577                }
1578                nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1579                nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1580                nat_set_version(ne, raw_ne.version);
1581                __set_nat_cache_dirty(nm_i, ne);
1582                write_unlock(&nm_i->nat_tree_lock);
1583        }
1584        update_nats_in_cursum(sum, -i);
1585        mutex_unlock(&curseg->curseg_mutex);
1586        return true;
1587}
1588
1589/*
1590 * This function is called during the checkpointing process.
1591 */
1592void flush_nat_entries(struct f2fs_sb_info *sbi)
1593{
1594        struct f2fs_nm_info *nm_i = NM_I(sbi);
1595        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1596        struct f2fs_summary_block *sum = curseg->sum_blk;
1597        struct list_head *cur, *n;
1598        struct page *page = NULL;
1599        struct f2fs_nat_block *nat_blk = NULL;
1600        nid_t start_nid = 0, end_nid = 0;
1601        bool flushed;
1602
1603        flushed = flush_nats_in_journal(sbi);
1604
1605        if (!flushed)
1606                mutex_lock(&curseg->curseg_mutex);
1607
1608        /* 1) flush dirty nat caches */
1609        list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1610                struct nat_entry *ne;
1611                nid_t nid;
1612                struct f2fs_nat_entry raw_ne;
1613                int offset = -1;
1614                block_t new_blkaddr;
1615
1616                ne = list_entry(cur, struct nat_entry, list);
1617                nid = nat_get_nid(ne);
1618
1619                if (nat_get_blkaddr(ne) == NEW_ADDR)
1620                        continue;
1621                if (flushed)
1622                        goto to_nat_page;
1623
1624                /* if there is room for nat enries in curseg->sumpage */
1625                offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1626                if (offset >= 0) {
1627                        raw_ne = nat_in_journal(sum, offset);
1628                        goto flush_now;
1629                }
1630to_nat_page:
1631                if (!page || (start_nid > nid || nid > end_nid)) {
1632                        if (page) {
1633                                f2fs_put_page(page, 1);
1634                                page = NULL;
1635                        }
1636                        start_nid = START_NID(nid);
1637                        end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1638
1639                        /*
1640                         * get nat block with dirty flag, increased reference
1641                         * count, mapped and lock
1642                         */
1643                        page = get_next_nat_page(sbi, start_nid);
1644                        nat_blk = page_address(page);
1645                }
1646
1647                BUG_ON(!nat_blk);
1648                raw_ne = nat_blk->entries[nid - start_nid];
1649flush_now:
1650                new_blkaddr = nat_get_blkaddr(ne);
1651
1652                raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1653                raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1654                raw_ne.version = nat_get_version(ne);
1655
1656                if (offset < 0) {
1657                        nat_blk->entries[nid - start_nid] = raw_ne;
1658                } else {
1659                        nat_in_journal(sum, offset) = raw_ne;
1660                        nid_in_journal(sum, offset) = cpu_to_le32(nid);
1661                }
1662
1663                if (nat_get_blkaddr(ne) == NULL_ADDR &&
1664                                add_free_nid(NM_I(sbi), nid, false) <= 0) {
1665                        write_lock(&nm_i->nat_tree_lock);
1666                        __del_from_nat_cache(nm_i, ne);
1667                        write_unlock(&nm_i->nat_tree_lock);
1668                } else {
1669                        write_lock(&nm_i->nat_tree_lock);
1670                        __clear_nat_cache_dirty(nm_i, ne);
1671                        ne->checkpointed = true;
1672                        write_unlock(&nm_i->nat_tree_lock);
1673                }
1674        }
1675        if (!flushed)
1676                mutex_unlock(&curseg->curseg_mutex);
1677        f2fs_put_page(page, 1);
1678
1679        /* 2) shrink nat caches if necessary */
1680        try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1681}
1682
1683static int init_node_manager(struct f2fs_sb_info *sbi)
1684{
1685        struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1686        struct f2fs_nm_info *nm_i = NM_I(sbi);
1687        unsigned char *version_bitmap;
1688        unsigned int nat_segs, nat_blocks;
1689
1690        nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1691
1692        /* segment_count_nat includes pair segment so divide to 2. */
1693        nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1694        nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1695        nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1696        nm_i->fcnt = 0;
1697        nm_i->nat_cnt = 0;
1698
1699        INIT_LIST_HEAD(&nm_i->free_nid_list);
1700        INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1701        INIT_LIST_HEAD(&nm_i->nat_entries);
1702        INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1703
1704        mutex_init(&nm_i->build_lock);
1705        spin_lock_init(&nm_i->free_nid_list_lock);
1706        rwlock_init(&nm_i->nat_tree_lock);
1707
1708        nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1709        nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1710        version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1711        if (!version_bitmap)
1712                return -EFAULT;
1713
1714        nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1715                                        GFP_KERNEL);
1716        if (!nm_i->nat_bitmap)
1717                return -ENOMEM;
1718        return 0;
1719}
1720
1721int build_node_manager(struct f2fs_sb_info *sbi)
1722{
1723        int err;
1724
1725        sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1726        if (!sbi->nm_info)
1727                return -ENOMEM;
1728
1729        err = init_node_manager(sbi);
1730        if (err)
1731                return err;
1732
1733        build_free_nids(sbi);
1734        return 0;
1735}
1736
1737void destroy_node_manager(struct f2fs_sb_info *sbi)
1738{
1739        struct f2fs_nm_info *nm_i = NM_I(sbi);
1740        struct free_nid *i, *next_i;
1741        struct nat_entry *natvec[NATVEC_SIZE];
1742        nid_t nid = 0;
1743        unsigned int found;
1744
1745        if (!nm_i)
1746                return;
1747
1748        /* destroy free nid list */
1749        spin_lock(&nm_i->free_nid_list_lock);
1750        list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1751                BUG_ON(i->state == NID_ALLOC);
1752                __del_from_free_nid_list(i);
1753                nm_i->fcnt--;
1754        }
1755        BUG_ON(nm_i->fcnt);
1756        spin_unlock(&nm_i->free_nid_list_lock);
1757
1758        /* destroy nat cache */
1759        write_lock(&nm_i->nat_tree_lock);
1760        while ((found = __gang_lookup_nat_cache(nm_i,
1761                                        nid, NATVEC_SIZE, natvec))) {
1762                unsigned idx;
1763                for (idx = 0; idx < found; idx++) {
1764                        struct nat_entry *e = natvec[idx];
1765                        nid = nat_get_nid(e) + 1;
1766                        __del_from_nat_cache(nm_i, e);
1767                }
1768        }
1769        BUG_ON(nm_i->nat_cnt);
1770        write_unlock(&nm_i->nat_tree_lock);
1771
1772        kfree(nm_i->nat_bitmap);
1773        sbi->nm_info = NULL;
1774        kfree(nm_i);
1775}
1776
1777int __init create_node_manager_caches(void)
1778{
1779        nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1780                        sizeof(struct nat_entry), NULL);
1781        if (!nat_entry_slab)
1782                return -ENOMEM;
1783
1784        free_nid_slab = f2fs_kmem_cache_create("free_nid",
1785                        sizeof(struct free_nid), NULL);
1786        if (!free_nid_slab) {
1787                kmem_cache_destroy(nat_entry_slab);
1788                return -ENOMEM;
1789        }
1790        return 0;
1791}
1792
1793void destroy_node_manager_caches(void)
1794{
1795        kmem_cache_destroy(free_nid_slab);
1796        kmem_cache_destroy(nat_entry_slab);
1797}
1798