linux/fs/f2fs/node.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/node.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/mpage.h>
  14#include <linux/backing-dev.h>
  15#include <linux/blkdev.h>
  16#include <linux/pagevec.h>
  17#include <linux/swap.h>
  18
  19#include "f2fs.h"
  20#include "node.h"
  21#include "segment.h"
  22#include <trace/events/f2fs.h>
  23
  24static struct kmem_cache *nat_entry_slab;
  25static struct kmem_cache *free_nid_slab;
  26
  27static void clear_node_page_dirty(struct page *page)
  28{
  29        struct address_space *mapping = page->mapping;
  30        struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
  31        unsigned int long flags;
  32
  33        if (PageDirty(page)) {
  34                spin_lock_irqsave(&mapping->tree_lock, flags);
  35                radix_tree_tag_clear(&mapping->page_tree,
  36                                page_index(page),
  37                                PAGECACHE_TAG_DIRTY);
  38                spin_unlock_irqrestore(&mapping->tree_lock, flags);
  39
  40                clear_page_dirty_for_io(page);
  41                dec_page_count(sbi, F2FS_DIRTY_NODES);
  42        }
  43        ClearPageUptodate(page);
  44}
  45
  46static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
  47{
  48        pgoff_t index = current_nat_addr(sbi, nid);
  49        return get_meta_page(sbi, index);
  50}
  51
  52static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
  53{
  54        struct page *src_page;
  55        struct page *dst_page;
  56        pgoff_t src_off;
  57        pgoff_t dst_off;
  58        void *src_addr;
  59        void *dst_addr;
  60        struct f2fs_nm_info *nm_i = NM_I(sbi);
  61
  62        src_off = current_nat_addr(sbi, nid);
  63        dst_off = next_nat_addr(sbi, src_off);
  64
  65        /* get current nat block page with lock */
  66        src_page = get_meta_page(sbi, src_off);
  67
  68        /* Dirty src_page means that it is already the new target NAT page. */
  69        if (PageDirty(src_page))
  70                return src_page;
  71
  72        dst_page = grab_meta_page(sbi, dst_off);
  73
  74        src_addr = page_address(src_page);
  75        dst_addr = page_address(dst_page);
  76        memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
  77        set_page_dirty(dst_page);
  78        f2fs_put_page(src_page, 1);
  79
  80        set_to_next_nat(nm_i, nid);
  81
  82        return dst_page;
  83}
  84
  85/*
  86 * Readahead NAT pages
  87 */
  88static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
  89{
  90        struct address_space *mapping = sbi->meta_inode->i_mapping;
  91        struct f2fs_nm_info *nm_i = NM_I(sbi);
  92        struct blk_plug plug;
  93        struct page *page;
  94        pgoff_t index;
  95        int i;
  96
  97        blk_start_plug(&plug);
  98
  99        for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
 100                if (nid >= nm_i->max_nid)
 101                        nid = 0;
 102                index = current_nat_addr(sbi, nid);
 103
 104                page = grab_cache_page(mapping, index);
 105                if (!page)
 106                        continue;
 107                if (PageUptodate(page)) {
 108                        f2fs_put_page(page, 1);
 109                        continue;
 110                }
 111                if (f2fs_readpage(sbi, page, index, READ))
 112                        continue;
 113
 114                f2fs_put_page(page, 0);
 115        }
 116        blk_finish_plug(&plug);
 117}
 118
 119static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
 120{
 121        return radix_tree_lookup(&nm_i->nat_root, n);
 122}
 123
 124static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
 125                nid_t start, unsigned int nr, struct nat_entry **ep)
 126{
 127        return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
 128}
 129
 130static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
 131{
 132        list_del(&e->list);
 133        radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
 134        nm_i->nat_cnt--;
 135        kmem_cache_free(nat_entry_slab, e);
 136}
 137
 138int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
 139{
 140        struct f2fs_nm_info *nm_i = NM_I(sbi);
 141        struct nat_entry *e;
 142        int is_cp = 1;
 143
 144        read_lock(&nm_i->nat_tree_lock);
 145        e = __lookup_nat_cache(nm_i, nid);
 146        if (e && !e->checkpointed)
 147                is_cp = 0;
 148        read_unlock(&nm_i->nat_tree_lock);
 149        return is_cp;
 150}
 151
 152static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
 153{
 154        struct nat_entry *new;
 155
 156        new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
 157        if (!new)
 158                return NULL;
 159        if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
 160                kmem_cache_free(nat_entry_slab, new);
 161                return NULL;
 162        }
 163        memset(new, 0, sizeof(struct nat_entry));
 164        nat_set_nid(new, nid);
 165        list_add_tail(&new->list, &nm_i->nat_entries);
 166        nm_i->nat_cnt++;
 167        return new;
 168}
 169
 170static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
 171                                                struct f2fs_nat_entry *ne)
 172{
 173        struct nat_entry *e;
 174retry:
 175        write_lock(&nm_i->nat_tree_lock);
 176        e = __lookup_nat_cache(nm_i, nid);
 177        if (!e) {
 178                e = grab_nat_entry(nm_i, nid);
 179                if (!e) {
 180                        write_unlock(&nm_i->nat_tree_lock);
 181                        goto retry;
 182                }
 183                nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
 184                nat_set_ino(e, le32_to_cpu(ne->ino));
 185                nat_set_version(e, ne->version);
 186                e->checkpointed = true;
 187        }
 188        write_unlock(&nm_i->nat_tree_lock);
 189}
 190
 191static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
 192                        block_t new_blkaddr)
 193{
 194        struct f2fs_nm_info *nm_i = NM_I(sbi);
 195        struct nat_entry *e;
 196retry:
 197        write_lock(&nm_i->nat_tree_lock);
 198        e = __lookup_nat_cache(nm_i, ni->nid);
 199        if (!e) {
 200                e = grab_nat_entry(nm_i, ni->nid);
 201                if (!e) {
 202                        write_unlock(&nm_i->nat_tree_lock);
 203                        goto retry;
 204                }
 205                e->ni = *ni;
 206                e->checkpointed = true;
 207                BUG_ON(ni->blk_addr == NEW_ADDR);
 208        } else if (new_blkaddr == NEW_ADDR) {
 209                /*
 210                 * when nid is reallocated,
 211                 * previous nat entry can be remained in nat cache.
 212                 * So, reinitialize it with new information.
 213                 */
 214                e->ni = *ni;
 215                BUG_ON(ni->blk_addr != NULL_ADDR);
 216        }
 217
 218        if (new_blkaddr == NEW_ADDR)
 219                e->checkpointed = false;
 220
 221        /* sanity check */
 222        BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
 223        BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
 224                        new_blkaddr == NULL_ADDR);
 225        BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
 226                        new_blkaddr == NEW_ADDR);
 227        BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
 228                        nat_get_blkaddr(e) != NULL_ADDR &&
 229                        new_blkaddr == NEW_ADDR);
 230
 231        /* increament version no as node is removed */
 232        if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
 233                unsigned char version = nat_get_version(e);
 234                nat_set_version(e, inc_node_version(version));
 235        }
 236
 237        /* change address */
 238        nat_set_blkaddr(e, new_blkaddr);
 239        __set_nat_cache_dirty(nm_i, e);
 240        write_unlock(&nm_i->nat_tree_lock);
 241}
 242
 243static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 244{
 245        struct f2fs_nm_info *nm_i = NM_I(sbi);
 246
 247        if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
 248                return 0;
 249
 250        write_lock(&nm_i->nat_tree_lock);
 251        while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
 252                struct nat_entry *ne;
 253                ne = list_first_entry(&nm_i->nat_entries,
 254                                        struct nat_entry, list);
 255                __del_from_nat_cache(nm_i, ne);
 256                nr_shrink--;
 257        }
 258        write_unlock(&nm_i->nat_tree_lock);
 259        return nr_shrink;
 260}
 261
 262/*
 263 * This function returns always success
 264 */
 265void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
 266{
 267        struct f2fs_nm_info *nm_i = NM_I(sbi);
 268        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
 269        struct f2fs_summary_block *sum = curseg->sum_blk;
 270        nid_t start_nid = START_NID(nid);
 271        struct f2fs_nat_block *nat_blk;
 272        struct page *page = NULL;
 273        struct f2fs_nat_entry ne;
 274        struct nat_entry *e;
 275        int i;
 276
 277        memset(&ne, 0, sizeof(struct f2fs_nat_entry));
 278        ni->nid = nid;
 279
 280        /* Check nat cache */
 281        read_lock(&nm_i->nat_tree_lock);
 282        e = __lookup_nat_cache(nm_i, nid);
 283        if (e) {
 284                ni->ino = nat_get_ino(e);
 285                ni->blk_addr = nat_get_blkaddr(e);
 286                ni->version = nat_get_version(e);
 287        }
 288        read_unlock(&nm_i->nat_tree_lock);
 289        if (e)
 290                return;
 291
 292        /* Check current segment summary */
 293        mutex_lock(&curseg->curseg_mutex);
 294        i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
 295        if (i >= 0) {
 296                ne = nat_in_journal(sum, i);
 297                node_info_from_raw_nat(ni, &ne);
 298        }
 299        mutex_unlock(&curseg->curseg_mutex);
 300        if (i >= 0)
 301                goto cache;
 302
 303        /* Fill node_info from nat page */
 304        page = get_current_nat_page(sbi, start_nid);
 305        nat_blk = (struct f2fs_nat_block *)page_address(page);
 306        ne = nat_blk->entries[nid - start_nid];
 307        node_info_from_raw_nat(ni, &ne);
 308        f2fs_put_page(page, 1);
 309cache:
 310        /* cache nat entry */
 311        cache_nat_entry(NM_I(sbi), nid, &ne);
 312}
 313
 314/*
 315 * The maximum depth is four.
 316 * Offset[0] will have raw inode offset.
 317 */
 318static int get_node_path(long block, int offset[4], unsigned int noffset[4])
 319{
 320        const long direct_index = ADDRS_PER_INODE;
 321        const long direct_blks = ADDRS_PER_BLOCK;
 322        const long dptrs_per_blk = NIDS_PER_BLOCK;
 323        const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
 324        const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
 325        int n = 0;
 326        int level = 0;
 327
 328        noffset[0] = 0;
 329
 330        if (block < direct_index) {
 331                offset[n] = block;
 332                goto got;
 333        }
 334        block -= direct_index;
 335        if (block < direct_blks) {
 336                offset[n++] = NODE_DIR1_BLOCK;
 337                noffset[n] = 1;
 338                offset[n] = block;
 339                level = 1;
 340                goto got;
 341        }
 342        block -= direct_blks;
 343        if (block < direct_blks) {
 344                offset[n++] = NODE_DIR2_BLOCK;
 345                noffset[n] = 2;
 346                offset[n] = block;
 347                level = 1;
 348                goto got;
 349        }
 350        block -= direct_blks;
 351        if (block < indirect_blks) {
 352                offset[n++] = NODE_IND1_BLOCK;
 353                noffset[n] = 3;
 354                offset[n++] = block / direct_blks;
 355                noffset[n] = 4 + offset[n - 1];
 356                offset[n] = block % direct_blks;
 357                level = 2;
 358                goto got;
 359        }
 360        block -= indirect_blks;
 361        if (block < indirect_blks) {
 362                offset[n++] = NODE_IND2_BLOCK;
 363                noffset[n] = 4 + dptrs_per_blk;
 364                offset[n++] = block / direct_blks;
 365                noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
 366                offset[n] = block % direct_blks;
 367                level = 2;
 368                goto got;
 369        }
 370        block -= indirect_blks;
 371        if (block < dindirect_blks) {
 372                offset[n++] = NODE_DIND_BLOCK;
 373                noffset[n] = 5 + (dptrs_per_blk * 2);
 374                offset[n++] = block / indirect_blks;
 375                noffset[n] = 6 + (dptrs_per_blk * 2) +
 376                              offset[n - 1] * (dptrs_per_blk + 1);
 377                offset[n++] = (block / direct_blks) % dptrs_per_blk;
 378                noffset[n] = 7 + (dptrs_per_blk * 2) +
 379                              offset[n - 2] * (dptrs_per_blk + 1) +
 380                              offset[n - 1];
 381                offset[n] = block % direct_blks;
 382                level = 3;
 383                goto got;
 384        } else {
 385                BUG();
 386        }
 387got:
 388        return level;
 389}
 390
 391/*
 392 * Caller should call f2fs_put_dnode(dn).
 393 * Also, it should grab and release a mutex by calling mutex_lock_op() and
 394 * mutex_unlock_op() only if ro is not set RDONLY_NODE.
 395 * In the case of RDONLY_NODE, we don't need to care about mutex.
 396 */
 397int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 398{
 399        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 400        struct page *npage[4];
 401        struct page *parent;
 402        int offset[4];
 403        unsigned int noffset[4];
 404        nid_t nids[4];
 405        int level, i;
 406        int err = 0;
 407
 408        level = get_node_path(index, offset, noffset);
 409
 410        nids[0] = dn->inode->i_ino;
 411        npage[0] = dn->inode_page;
 412
 413        if (!npage[0]) {
 414                npage[0] = get_node_page(sbi, nids[0]);
 415                if (IS_ERR(npage[0]))
 416                        return PTR_ERR(npage[0]);
 417        }
 418        parent = npage[0];
 419        if (level != 0)
 420                nids[1] = get_nid(parent, offset[0], true);
 421        dn->inode_page = npage[0];
 422        dn->inode_page_locked = true;
 423
 424        /* get indirect or direct nodes */
 425        for (i = 1; i <= level; i++) {
 426                bool done = false;
 427
 428                if (!nids[i] && mode == ALLOC_NODE) {
 429                        /* alloc new node */
 430                        if (!alloc_nid(sbi, &(nids[i]))) {
 431                                err = -ENOSPC;
 432                                goto release_pages;
 433                        }
 434
 435                        dn->nid = nids[i];
 436                        npage[i] = new_node_page(dn, noffset[i], NULL);
 437                        if (IS_ERR(npage[i])) {
 438                                alloc_nid_failed(sbi, nids[i]);
 439                                err = PTR_ERR(npage[i]);
 440                                goto release_pages;
 441                        }
 442
 443                        set_nid(parent, offset[i - 1], nids[i], i == 1);
 444                        alloc_nid_done(sbi, nids[i]);
 445                        done = true;
 446                } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
 447                        npage[i] = get_node_page_ra(parent, offset[i - 1]);
 448                        if (IS_ERR(npage[i])) {
 449                                err = PTR_ERR(npage[i]);
 450                                goto release_pages;
 451                        }
 452                        done = true;
 453                }
 454                if (i == 1) {
 455                        dn->inode_page_locked = false;
 456                        unlock_page(parent);
 457                } else {
 458                        f2fs_put_page(parent, 1);
 459                }
 460
 461                if (!done) {
 462                        npage[i] = get_node_page(sbi, nids[i]);
 463                        if (IS_ERR(npage[i])) {
 464                                err = PTR_ERR(npage[i]);
 465                                f2fs_put_page(npage[0], 0);
 466                                goto release_out;
 467                        }
 468                }
 469                if (i < level) {
 470                        parent = npage[i];
 471                        nids[i + 1] = get_nid(parent, offset[i], false);
 472                }
 473        }
 474        dn->nid = nids[level];
 475        dn->ofs_in_node = offset[level];
 476        dn->node_page = npage[level];
 477        dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
 478        return 0;
 479
 480release_pages:
 481        f2fs_put_page(parent, 1);
 482        if (i > 1)
 483                f2fs_put_page(npage[0], 0);
 484release_out:
 485        dn->inode_page = NULL;
 486        dn->node_page = NULL;
 487        return err;
 488}
 489
 490static void truncate_node(struct dnode_of_data *dn)
 491{
 492        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 493        struct node_info ni;
 494
 495        get_node_info(sbi, dn->nid, &ni);
 496        if (dn->inode->i_blocks == 0) {
 497                BUG_ON(ni.blk_addr != NULL_ADDR);
 498                goto invalidate;
 499        }
 500        BUG_ON(ni.blk_addr == NULL_ADDR);
 501
 502        /* Deallocate node address */
 503        invalidate_blocks(sbi, ni.blk_addr);
 504        dec_valid_node_count(sbi, dn->inode, 1);
 505        set_node_addr(sbi, &ni, NULL_ADDR);
 506
 507        if (dn->nid == dn->inode->i_ino) {
 508                remove_orphan_inode(sbi, dn->nid);
 509                dec_valid_inode_count(sbi);
 510        } else {
 511                sync_inode_page(dn);
 512        }
 513invalidate:
 514        clear_node_page_dirty(dn->node_page);
 515        F2FS_SET_SB_DIRT(sbi);
 516
 517        f2fs_put_page(dn->node_page, 1);
 518        dn->node_page = NULL;
 519        trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
 520}
 521
 522static int truncate_dnode(struct dnode_of_data *dn)
 523{
 524        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 525        struct page *page;
 526
 527        if (dn->nid == 0)
 528                return 1;
 529
 530        /* get direct node */
 531        page = get_node_page(sbi, dn->nid);
 532        if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
 533                return 1;
 534        else if (IS_ERR(page))
 535                return PTR_ERR(page);
 536
 537        /* Make dnode_of_data for parameter */
 538        dn->node_page = page;
 539        dn->ofs_in_node = 0;
 540        truncate_data_blocks(dn);
 541        truncate_node(dn);
 542        return 1;
 543}
 544
 545static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
 546                                                int ofs, int depth)
 547{
 548        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 549        struct dnode_of_data rdn = *dn;
 550        struct page *page;
 551        struct f2fs_node *rn;
 552        nid_t child_nid;
 553        unsigned int child_nofs;
 554        int freed = 0;
 555        int i, ret;
 556
 557        if (dn->nid == 0)
 558                return NIDS_PER_BLOCK + 1;
 559
 560        trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
 561
 562        page = get_node_page(sbi, dn->nid);
 563        if (IS_ERR(page)) {
 564                trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
 565                return PTR_ERR(page);
 566        }
 567
 568        rn = (struct f2fs_node *)page_address(page);
 569        if (depth < 3) {
 570                for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
 571                        child_nid = le32_to_cpu(rn->in.nid[i]);
 572                        if (child_nid == 0)
 573                                continue;
 574                        rdn.nid = child_nid;
 575                        ret = truncate_dnode(&rdn);
 576                        if (ret < 0)
 577                                goto out_err;
 578                        set_nid(page, i, 0, false);
 579                }
 580        } else {
 581                child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
 582                for (i = ofs; i < NIDS_PER_BLOCK; i++) {
 583                        child_nid = le32_to_cpu(rn->in.nid[i]);
 584                        if (child_nid == 0) {
 585                                child_nofs += NIDS_PER_BLOCK + 1;
 586                                continue;
 587                        }
 588                        rdn.nid = child_nid;
 589                        ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
 590                        if (ret == (NIDS_PER_BLOCK + 1)) {
 591                                set_nid(page, i, 0, false);
 592                                child_nofs += ret;
 593                        } else if (ret < 0 && ret != -ENOENT) {
 594                                goto out_err;
 595                        }
 596                }
 597                freed = child_nofs;
 598        }
 599
 600        if (!ofs) {
 601                /* remove current indirect node */
 602                dn->node_page = page;
 603                truncate_node(dn);
 604                freed++;
 605        } else {
 606                f2fs_put_page(page, 1);
 607        }
 608        trace_f2fs_truncate_nodes_exit(dn->inode, freed);
 609        return freed;
 610
 611out_err:
 612        f2fs_put_page(page, 1);
 613        trace_f2fs_truncate_nodes_exit(dn->inode, ret);
 614        return ret;
 615}
 616
 617static int truncate_partial_nodes(struct dnode_of_data *dn,
 618                        struct f2fs_inode *ri, int *offset, int depth)
 619{
 620        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 621        struct page *pages[2];
 622        nid_t nid[3];
 623        nid_t child_nid;
 624        int err = 0;
 625        int i;
 626        int idx = depth - 2;
 627
 628        nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
 629        if (!nid[0])
 630                return 0;
 631
 632        /* get indirect nodes in the path */
 633        for (i = 0; i < depth - 1; i++) {
 634                /* refernece count'll be increased */
 635                pages[i] = get_node_page(sbi, nid[i]);
 636                if (IS_ERR(pages[i])) {
 637                        depth = i + 1;
 638                        err = PTR_ERR(pages[i]);
 639                        goto fail;
 640                }
 641                nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
 642        }
 643
 644        /* free direct nodes linked to a partial indirect node */
 645        for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
 646                child_nid = get_nid(pages[idx], i, false);
 647                if (!child_nid)
 648                        continue;
 649                dn->nid = child_nid;
 650                err = truncate_dnode(dn);
 651                if (err < 0)
 652                        goto fail;
 653                set_nid(pages[idx], i, 0, false);
 654        }
 655
 656        if (offset[depth - 1] == 0) {
 657                dn->node_page = pages[idx];
 658                dn->nid = nid[idx];
 659                truncate_node(dn);
 660        } else {
 661                f2fs_put_page(pages[idx], 1);
 662        }
 663        offset[idx]++;
 664        offset[depth - 1] = 0;
 665fail:
 666        for (i = depth - 3; i >= 0; i--)
 667                f2fs_put_page(pages[i], 1);
 668
 669        trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
 670
 671        return err;
 672}
 673
 674/*
 675 * All the block addresses of data and nodes should be nullified.
 676 */
 677int truncate_inode_blocks(struct inode *inode, pgoff_t from)
 678{
 679        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 680        struct address_space *node_mapping = sbi->node_inode->i_mapping;
 681        int err = 0, cont = 1;
 682        int level, offset[4], noffset[4];
 683        unsigned int nofs = 0;
 684        struct f2fs_node *rn;
 685        struct dnode_of_data dn;
 686        struct page *page;
 687
 688        trace_f2fs_truncate_inode_blocks_enter(inode, from);
 689
 690        level = get_node_path(from, offset, noffset);
 691restart:
 692        page = get_node_page(sbi, inode->i_ino);
 693        if (IS_ERR(page)) {
 694                trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
 695                return PTR_ERR(page);
 696        }
 697
 698        set_new_dnode(&dn, inode, page, NULL, 0);
 699        unlock_page(page);
 700
 701        rn = page_address(page);
 702        switch (level) {
 703        case 0:
 704        case 1:
 705                nofs = noffset[1];
 706                break;
 707        case 2:
 708                nofs = noffset[1];
 709                if (!offset[level - 1])
 710                        goto skip_partial;
 711                err = truncate_partial_nodes(&dn, &rn->i, offset, level);
 712                if (err < 0 && err != -ENOENT)
 713                        goto fail;
 714                nofs += 1 + NIDS_PER_BLOCK;
 715                break;
 716        case 3:
 717                nofs = 5 + 2 * NIDS_PER_BLOCK;
 718                if (!offset[level - 1])
 719                        goto skip_partial;
 720                err = truncate_partial_nodes(&dn, &rn->i, offset, level);
 721                if (err < 0 && err != -ENOENT)
 722                        goto fail;
 723                break;
 724        default:
 725                BUG();
 726        }
 727
 728skip_partial:
 729        while (cont) {
 730                dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
 731                switch (offset[0]) {
 732                case NODE_DIR1_BLOCK:
 733                case NODE_DIR2_BLOCK:
 734                        err = truncate_dnode(&dn);
 735                        break;
 736
 737                case NODE_IND1_BLOCK:
 738                case NODE_IND2_BLOCK:
 739                        err = truncate_nodes(&dn, nofs, offset[1], 2);
 740                        break;
 741
 742                case NODE_DIND_BLOCK:
 743                        err = truncate_nodes(&dn, nofs, offset[1], 3);
 744                        cont = 0;
 745                        break;
 746
 747                default:
 748                        BUG();
 749                }
 750                if (err < 0 && err != -ENOENT)
 751                        goto fail;
 752                if (offset[1] == 0 &&
 753                                rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
 754                        lock_page(page);
 755                        if (page->mapping != node_mapping) {
 756                                f2fs_put_page(page, 1);
 757                                goto restart;
 758                        }
 759                        wait_on_page_writeback(page);
 760                        rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
 761                        set_page_dirty(page);
 762                        unlock_page(page);
 763                }
 764                offset[1] = 0;
 765                offset[0]++;
 766                nofs += err;
 767        }
 768fail:
 769        f2fs_put_page(page, 0);
 770        trace_f2fs_truncate_inode_blocks_exit(inode, err);
 771        return err > 0 ? 0 : err;
 772}
 773
 774/*
 775 * Caller should grab and release a mutex by calling mutex_lock_op() and
 776 * mutex_unlock_op().
 777 */
 778int remove_inode_page(struct inode *inode)
 779{
 780        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 781        struct page *page;
 782        nid_t ino = inode->i_ino;
 783        struct dnode_of_data dn;
 784
 785        page = get_node_page(sbi, ino);
 786        if (IS_ERR(page))
 787                return PTR_ERR(page);
 788
 789        if (F2FS_I(inode)->i_xattr_nid) {
 790                nid_t nid = F2FS_I(inode)->i_xattr_nid;
 791                struct page *npage = get_node_page(sbi, nid);
 792
 793                if (IS_ERR(npage))
 794                        return PTR_ERR(npage);
 795
 796                F2FS_I(inode)->i_xattr_nid = 0;
 797                set_new_dnode(&dn, inode, page, npage, nid);
 798                dn.inode_page_locked = 1;
 799                truncate_node(&dn);
 800        }
 801
 802        /* 0 is possible, after f2fs_new_inode() is failed */
 803        BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
 804        set_new_dnode(&dn, inode, page, page, ino);
 805        truncate_node(&dn);
 806        return 0;
 807}
 808
 809struct page *new_inode_page(struct inode *inode, const struct qstr *name)
 810{
 811        struct dnode_of_data dn;
 812
 813        /* allocate inode page for new inode */
 814        set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
 815
 816        /* caller should f2fs_put_page(page, 1); */
 817        return new_node_page(&dn, 0, NULL);
 818}
 819
 820struct page *new_node_page(struct dnode_of_data *dn,
 821                                unsigned int ofs, struct page *ipage)
 822{
 823        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 824        struct address_space *mapping = sbi->node_inode->i_mapping;
 825        struct node_info old_ni, new_ni;
 826        struct page *page;
 827        int err;
 828
 829        if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
 830                return ERR_PTR(-EPERM);
 831
 832        page = grab_cache_page(mapping, dn->nid);
 833        if (!page)
 834                return ERR_PTR(-ENOMEM);
 835
 836        get_node_info(sbi, dn->nid, &old_ni);
 837
 838        SetPageUptodate(page);
 839        fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
 840
 841        /* Reinitialize old_ni with new node page */
 842        BUG_ON(old_ni.blk_addr != NULL_ADDR);
 843        new_ni = old_ni;
 844        new_ni.ino = dn->inode->i_ino;
 845
 846        if (!inc_valid_node_count(sbi, dn->inode, 1)) {
 847                err = -ENOSPC;
 848                goto fail;
 849        }
 850        set_node_addr(sbi, &new_ni, NEW_ADDR);
 851        set_cold_node(dn->inode, page);
 852
 853        dn->node_page = page;
 854        if (ipage)
 855                update_inode(dn->inode, ipage);
 856        else
 857                sync_inode_page(dn);
 858        set_page_dirty(page);
 859        if (ofs == 0)
 860                inc_valid_inode_count(sbi);
 861
 862        return page;
 863
 864fail:
 865        clear_node_page_dirty(page);
 866        f2fs_put_page(page, 1);
 867        return ERR_PTR(err);
 868}
 869
 870/*
 871 * Caller should do after getting the following values.
 872 * 0: f2fs_put_page(page, 0)
 873 * LOCKED_PAGE: f2fs_put_page(page, 1)
 874 * error: nothing
 875 */
 876static int read_node_page(struct page *page, int type)
 877{
 878        struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
 879        struct node_info ni;
 880
 881        get_node_info(sbi, page->index, &ni);
 882
 883        if (ni.blk_addr == NULL_ADDR) {
 884                f2fs_put_page(page, 1);
 885                return -ENOENT;
 886        }
 887
 888        if (PageUptodate(page))
 889                return LOCKED_PAGE;
 890
 891        return f2fs_readpage(sbi, page, ni.blk_addr, type);
 892}
 893
 894/*
 895 * Readahead a node page
 896 */
 897void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
 898{
 899        struct address_space *mapping = sbi->node_inode->i_mapping;
 900        struct page *apage;
 901        int err;
 902
 903        apage = find_get_page(mapping, nid);
 904        if (apage && PageUptodate(apage)) {
 905                f2fs_put_page(apage, 0);
 906                return;
 907        }
 908        f2fs_put_page(apage, 0);
 909
 910        apage = grab_cache_page(mapping, nid);
 911        if (!apage)
 912                return;
 913
 914        err = read_node_page(apage, READA);
 915        if (err == 0)
 916                f2fs_put_page(apage, 0);
 917        else if (err == LOCKED_PAGE)
 918                f2fs_put_page(apage, 1);
 919        return;
 920}
 921
 922struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
 923{
 924        struct address_space *mapping = sbi->node_inode->i_mapping;
 925        struct page *page;
 926        int err;
 927repeat:
 928        page = grab_cache_page(mapping, nid);
 929        if (!page)
 930                return ERR_PTR(-ENOMEM);
 931
 932        err = read_node_page(page, READ_SYNC);
 933        if (err < 0)
 934                return ERR_PTR(err);
 935        else if (err == LOCKED_PAGE)
 936                goto got_it;
 937
 938        lock_page(page);
 939        if (!PageUptodate(page)) {
 940                f2fs_put_page(page, 1);
 941                return ERR_PTR(-EIO);
 942        }
 943        if (page->mapping != mapping) {
 944                f2fs_put_page(page, 1);
 945                goto repeat;
 946        }
 947got_it:
 948        BUG_ON(nid != nid_of_node(page));
 949        mark_page_accessed(page);
 950        return page;
 951}
 952
 953/*
 954 * Return a locked page for the desired node page.
 955 * And, readahead MAX_RA_NODE number of node pages.
 956 */
 957struct page *get_node_page_ra(struct page *parent, int start)
 958{
 959        struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
 960        struct address_space *mapping = sbi->node_inode->i_mapping;
 961        struct blk_plug plug;
 962        struct page *page;
 963        int err, i, end;
 964        nid_t nid;
 965
 966        /* First, try getting the desired direct node. */
 967        nid = get_nid(parent, start, false);
 968        if (!nid)
 969                return ERR_PTR(-ENOENT);
 970repeat:
 971        page = grab_cache_page(mapping, nid);
 972        if (!page)
 973                return ERR_PTR(-ENOMEM);
 974
 975        err = read_node_page(page, READ_SYNC);
 976        if (err < 0)
 977                return ERR_PTR(err);
 978        else if (err == LOCKED_PAGE)
 979                goto page_hit;
 980
 981        blk_start_plug(&plug);
 982
 983        /* Then, try readahead for siblings of the desired node */
 984        end = start + MAX_RA_NODE;
 985        end = min(end, NIDS_PER_BLOCK);
 986        for (i = start + 1; i < end; i++) {
 987                nid = get_nid(parent, i, false);
 988                if (!nid)
 989                        continue;
 990                ra_node_page(sbi, nid);
 991        }
 992
 993        blk_finish_plug(&plug);
 994
 995        lock_page(page);
 996        if (page->mapping != mapping) {
 997                f2fs_put_page(page, 1);
 998                goto repeat;
 999        }
1000page_hit:
1001        if (!PageUptodate(page)) {
1002                f2fs_put_page(page, 1);
1003                return ERR_PTR(-EIO);
1004        }
1005        mark_page_accessed(page);
1006        return page;
1007}
1008
1009void sync_inode_page(struct dnode_of_data *dn)
1010{
1011        if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1012                update_inode(dn->inode, dn->node_page);
1013        } else if (dn->inode_page) {
1014                if (!dn->inode_page_locked)
1015                        lock_page(dn->inode_page);
1016                update_inode(dn->inode, dn->inode_page);
1017                if (!dn->inode_page_locked)
1018                        unlock_page(dn->inode_page);
1019        } else {
1020                update_inode_page(dn->inode);
1021        }
1022}
1023
1024int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1025                                        struct writeback_control *wbc)
1026{
1027        struct address_space *mapping = sbi->node_inode->i_mapping;
1028        pgoff_t index, end;
1029        struct pagevec pvec;
1030        int step = ino ? 2 : 0;
1031        int nwritten = 0, wrote = 0;
1032
1033        pagevec_init(&pvec, 0);
1034
1035next_step:
1036        index = 0;
1037        end = LONG_MAX;
1038
1039        while (index <= end) {
1040                int i, nr_pages;
1041                nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1042                                PAGECACHE_TAG_DIRTY,
1043                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1044                if (nr_pages == 0)
1045                        break;
1046
1047                for (i = 0; i < nr_pages; i++) {
1048                        struct page *page = pvec.pages[i];
1049
1050                        /*
1051                         * flushing sequence with step:
1052                         * 0. indirect nodes
1053                         * 1. dentry dnodes
1054                         * 2. file dnodes
1055                         */
1056                        if (step == 0 && IS_DNODE(page))
1057                                continue;
1058                        if (step == 1 && (!IS_DNODE(page) ||
1059                                                is_cold_node(page)))
1060                                continue;
1061                        if (step == 2 && (!IS_DNODE(page) ||
1062                                                !is_cold_node(page)))
1063                                continue;
1064
1065                        /*
1066                         * If an fsync mode,
1067                         * we should not skip writing node pages.
1068                         */
1069                        if (ino && ino_of_node(page) == ino)
1070                                lock_page(page);
1071                        else if (!trylock_page(page))
1072                                continue;
1073
1074                        if (unlikely(page->mapping != mapping)) {
1075continue_unlock:
1076                                unlock_page(page);
1077                                continue;
1078                        }
1079                        if (ino && ino_of_node(page) != ino)
1080                                goto continue_unlock;
1081
1082                        if (!PageDirty(page)) {
1083                                /* someone wrote it for us */
1084                                goto continue_unlock;
1085                        }
1086
1087                        if (!clear_page_dirty_for_io(page))
1088                                goto continue_unlock;
1089
1090                        /* called by fsync() */
1091                        if (ino && IS_DNODE(page)) {
1092                                int mark = !is_checkpointed_node(sbi, ino);
1093                                set_fsync_mark(page, 1);
1094                                if (IS_INODE(page))
1095                                        set_dentry_mark(page, mark);
1096                                nwritten++;
1097                        } else {
1098                                set_fsync_mark(page, 0);
1099                                set_dentry_mark(page, 0);
1100                        }
1101                        mapping->a_ops->writepage(page, wbc);
1102                        wrote++;
1103
1104                        if (--wbc->nr_to_write == 0)
1105                                break;
1106                }
1107                pagevec_release(&pvec);
1108                cond_resched();
1109
1110                if (wbc->nr_to_write == 0) {
1111                        step = 2;
1112                        break;
1113                }
1114        }
1115
1116        if (step < 2) {
1117                step++;
1118                goto next_step;
1119        }
1120
1121        if (wrote)
1122                f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1123
1124        return nwritten;
1125}
1126
1127static int f2fs_write_node_page(struct page *page,
1128                                struct writeback_control *wbc)
1129{
1130        struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1131        nid_t nid;
1132        block_t new_addr;
1133        struct node_info ni;
1134
1135        wait_on_page_writeback(page);
1136
1137        /* get old block addr of this node page */
1138        nid = nid_of_node(page);
1139        BUG_ON(page->index != nid);
1140
1141        get_node_info(sbi, nid, &ni);
1142
1143        /* This page is already truncated */
1144        if (ni.blk_addr == NULL_ADDR) {
1145                dec_page_count(sbi, F2FS_DIRTY_NODES);
1146                unlock_page(page);
1147                return 0;
1148        }
1149
1150        if (wbc->for_reclaim) {
1151                dec_page_count(sbi, F2FS_DIRTY_NODES);
1152                wbc->pages_skipped++;
1153                set_page_dirty(page);
1154                return AOP_WRITEPAGE_ACTIVATE;
1155        }
1156
1157        mutex_lock(&sbi->node_write);
1158        set_page_writeback(page);
1159        write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1160        set_node_addr(sbi, &ni, new_addr);
1161        dec_page_count(sbi, F2FS_DIRTY_NODES);
1162        mutex_unlock(&sbi->node_write);
1163        unlock_page(page);
1164        return 0;
1165}
1166
1167/*
1168 * It is very important to gather dirty pages and write at once, so that we can
1169 * submit a big bio without interfering other data writes.
1170 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1171 */
1172#define COLLECT_DIRTY_NODES     512
1173static int f2fs_write_node_pages(struct address_space *mapping,
1174                            struct writeback_control *wbc)
1175{
1176        struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1177        long nr_to_write = wbc->nr_to_write;
1178
1179        /* First check balancing cached NAT entries */
1180        if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1181                f2fs_sync_fs(sbi->sb, true);
1182                return 0;
1183        }
1184
1185        /* collect a number of dirty node pages and write together */
1186        if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1187                return 0;
1188
1189        /* if mounting is failed, skip writing node pages */
1190        wbc->nr_to_write = max_hw_blocks(sbi);
1191        sync_node_pages(sbi, 0, wbc);
1192        wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write);
1193        return 0;
1194}
1195
1196static int f2fs_set_node_page_dirty(struct page *page)
1197{
1198        struct address_space *mapping = page->mapping;
1199        struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1200
1201        SetPageUptodate(page);
1202        if (!PageDirty(page)) {
1203                __set_page_dirty_nobuffers(page);
1204                inc_page_count(sbi, F2FS_DIRTY_NODES);
1205                SetPagePrivate(page);
1206                return 1;
1207        }
1208        return 0;
1209}
1210
1211static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
1212                                      unsigned int length)
1213{
1214        struct inode *inode = page->mapping->host;
1215        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1216        if (PageDirty(page))
1217                dec_page_count(sbi, F2FS_DIRTY_NODES);
1218        ClearPagePrivate(page);
1219}
1220
1221static int f2fs_release_node_page(struct page *page, gfp_t wait)
1222{
1223        ClearPagePrivate(page);
1224        return 1;
1225}
1226
1227/*
1228 * Structure of the f2fs node operations
1229 */
1230const struct address_space_operations f2fs_node_aops = {
1231        .writepage      = f2fs_write_node_page,
1232        .writepages     = f2fs_write_node_pages,
1233        .set_page_dirty = f2fs_set_node_page_dirty,
1234        .invalidatepage = f2fs_invalidate_node_page,
1235        .releasepage    = f2fs_release_node_page,
1236};
1237
1238static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1239{
1240        struct list_head *this;
1241        struct free_nid *i;
1242        list_for_each(this, head) {
1243                i = list_entry(this, struct free_nid, list);
1244                if (i->nid == n)
1245                        return i;
1246        }
1247        return NULL;
1248}
1249
1250static void __del_from_free_nid_list(struct free_nid *i)
1251{
1252        list_del(&i->list);
1253        kmem_cache_free(free_nid_slab, i);
1254}
1255
1256static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1257{
1258        struct free_nid *i;
1259        struct nat_entry *ne;
1260        bool allocated = false;
1261
1262        if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1263                return -1;
1264
1265        /* 0 nid should not be used */
1266        if (nid == 0)
1267                return 0;
1268
1269        if (!build)
1270                goto retry;
1271
1272        /* do not add allocated nids */
1273        read_lock(&nm_i->nat_tree_lock);
1274        ne = __lookup_nat_cache(nm_i, nid);
1275        if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1276                allocated = true;
1277        read_unlock(&nm_i->nat_tree_lock);
1278        if (allocated)
1279                return 0;
1280retry:
1281        i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1282        if (!i) {
1283                cond_resched();
1284                goto retry;
1285        }
1286        i->nid = nid;
1287        i->state = NID_NEW;
1288
1289        spin_lock(&nm_i->free_nid_list_lock);
1290        if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1291                spin_unlock(&nm_i->free_nid_list_lock);
1292                kmem_cache_free(free_nid_slab, i);
1293                return 0;
1294        }
1295        list_add_tail(&i->list, &nm_i->free_nid_list);
1296        nm_i->fcnt++;
1297        spin_unlock(&nm_i->free_nid_list_lock);
1298        return 1;
1299}
1300
1301static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1302{
1303        struct free_nid *i;
1304        spin_lock(&nm_i->free_nid_list_lock);
1305        i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1306        if (i && i->state == NID_NEW) {
1307                __del_from_free_nid_list(i);
1308                nm_i->fcnt--;
1309        }
1310        spin_unlock(&nm_i->free_nid_list_lock);
1311}
1312
1313static void scan_nat_page(struct f2fs_nm_info *nm_i,
1314                        struct page *nat_page, nid_t start_nid)
1315{
1316        struct f2fs_nat_block *nat_blk = page_address(nat_page);
1317        block_t blk_addr;
1318        int i;
1319
1320        i = start_nid % NAT_ENTRY_PER_BLOCK;
1321
1322        for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1323
1324                if (start_nid >= nm_i->max_nid)
1325                        break;
1326
1327                blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1328                BUG_ON(blk_addr == NEW_ADDR);
1329                if (blk_addr == NULL_ADDR) {
1330                        if (add_free_nid(nm_i, start_nid, true) < 0)
1331                                break;
1332                }
1333        }
1334}
1335
1336static void build_free_nids(struct f2fs_sb_info *sbi)
1337{
1338        struct f2fs_nm_info *nm_i = NM_I(sbi);
1339        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1340        struct f2fs_summary_block *sum = curseg->sum_blk;
1341        int i = 0;
1342        nid_t nid = nm_i->next_scan_nid;
1343
1344        /* Enough entries */
1345        if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1346                return;
1347
1348        /* readahead nat pages to be scanned */
1349        ra_nat_pages(sbi, nid);
1350
1351        while (1) {
1352                struct page *page = get_current_nat_page(sbi, nid);
1353
1354                scan_nat_page(nm_i, page, nid);
1355                f2fs_put_page(page, 1);
1356
1357                nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1358                if (nid >= nm_i->max_nid)
1359                        nid = 0;
1360
1361                if (i++ == FREE_NID_PAGES)
1362                        break;
1363        }
1364
1365        /* go to the next free nat pages to find free nids abundantly */
1366        nm_i->next_scan_nid = nid;
1367
1368        /* find free nids from current sum_pages */
1369        mutex_lock(&curseg->curseg_mutex);
1370        for (i = 0; i < nats_in_cursum(sum); i++) {
1371                block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1372                nid = le32_to_cpu(nid_in_journal(sum, i));
1373                if (addr == NULL_ADDR)
1374                        add_free_nid(nm_i, nid, true);
1375                else
1376                        remove_free_nid(nm_i, nid);
1377        }
1378        mutex_unlock(&curseg->curseg_mutex);
1379}
1380
1381/*
1382 * If this function returns success, caller can obtain a new nid
1383 * from second parameter of this function.
1384 * The returned nid could be used ino as well as nid when inode is created.
1385 */
1386bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1387{
1388        struct f2fs_nm_info *nm_i = NM_I(sbi);
1389        struct free_nid *i = NULL;
1390        struct list_head *this;
1391retry:
1392        if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
1393                return false;
1394
1395        spin_lock(&nm_i->free_nid_list_lock);
1396
1397        /* We should not use stale free nids created by build_free_nids */
1398        if (nm_i->fcnt && !sbi->on_build_free_nids) {
1399                BUG_ON(list_empty(&nm_i->free_nid_list));
1400                list_for_each(this, &nm_i->free_nid_list) {
1401                        i = list_entry(this, struct free_nid, list);
1402                        if (i->state == NID_NEW)
1403                                break;
1404                }
1405
1406                BUG_ON(i->state != NID_NEW);
1407                *nid = i->nid;
1408                i->state = NID_ALLOC;
1409                nm_i->fcnt--;
1410                spin_unlock(&nm_i->free_nid_list_lock);
1411                return true;
1412        }
1413        spin_unlock(&nm_i->free_nid_list_lock);
1414
1415        /* Let's scan nat pages and its caches to get free nids */
1416        mutex_lock(&nm_i->build_lock);
1417        sbi->on_build_free_nids = 1;
1418        build_free_nids(sbi);
1419        sbi->on_build_free_nids = 0;
1420        mutex_unlock(&nm_i->build_lock);
1421        goto retry;
1422}
1423
1424/*
1425 * alloc_nid() should be called prior to this function.
1426 */
1427void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1428{
1429        struct f2fs_nm_info *nm_i = NM_I(sbi);
1430        struct free_nid *i;
1431
1432        spin_lock(&nm_i->free_nid_list_lock);
1433        i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1434        BUG_ON(!i || i->state != NID_ALLOC);
1435        __del_from_free_nid_list(i);
1436        spin_unlock(&nm_i->free_nid_list_lock);
1437}
1438
1439/*
1440 * alloc_nid() should be called prior to this function.
1441 */
1442void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1443{
1444        struct f2fs_nm_info *nm_i = NM_I(sbi);
1445        struct free_nid *i;
1446
1447        spin_lock(&nm_i->free_nid_list_lock);
1448        i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1449        BUG_ON(!i || i->state != NID_ALLOC);
1450        if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
1451                __del_from_free_nid_list(i);
1452        } else {
1453                i->state = NID_NEW;
1454                nm_i->fcnt++;
1455        }
1456        spin_unlock(&nm_i->free_nid_list_lock);
1457}
1458
1459void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1460                struct f2fs_summary *sum, struct node_info *ni,
1461                block_t new_blkaddr)
1462{
1463        rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1464        set_node_addr(sbi, ni, new_blkaddr);
1465        clear_node_page_dirty(page);
1466}
1467
1468int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1469{
1470        struct address_space *mapping = sbi->node_inode->i_mapping;
1471        struct f2fs_node *src, *dst;
1472        nid_t ino = ino_of_node(page);
1473        struct node_info old_ni, new_ni;
1474        struct page *ipage;
1475
1476        ipage = grab_cache_page(mapping, ino);
1477        if (!ipage)
1478                return -ENOMEM;
1479
1480        /* Should not use this inode  from free nid list */
1481        remove_free_nid(NM_I(sbi), ino);
1482
1483        get_node_info(sbi, ino, &old_ni);
1484        SetPageUptodate(ipage);
1485        fill_node_footer(ipage, ino, ino, 0, true);
1486
1487        src = (struct f2fs_node *)page_address(page);
1488        dst = (struct f2fs_node *)page_address(ipage);
1489
1490        memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1491        dst->i.i_size = 0;
1492        dst->i.i_blocks = cpu_to_le64(1);
1493        dst->i.i_links = cpu_to_le32(1);
1494        dst->i.i_xattr_nid = 0;
1495
1496        new_ni = old_ni;
1497        new_ni.ino = ino;
1498
1499        if (!inc_valid_node_count(sbi, NULL, 1))
1500                WARN_ON(1);
1501        set_node_addr(sbi, &new_ni, NEW_ADDR);
1502        inc_valid_inode_count(sbi);
1503        f2fs_put_page(ipage, 1);
1504        return 0;
1505}
1506
1507int restore_node_summary(struct f2fs_sb_info *sbi,
1508                        unsigned int segno, struct f2fs_summary_block *sum)
1509{
1510        struct f2fs_node *rn;
1511        struct f2fs_summary *sum_entry;
1512        struct page *page;
1513        block_t addr;
1514        int i, last_offset;
1515
1516        /* alloc temporal page for read node */
1517        page = alloc_page(GFP_NOFS | __GFP_ZERO);
1518        if (IS_ERR(page))
1519                return PTR_ERR(page);
1520        lock_page(page);
1521
1522        /* scan the node segment */
1523        last_offset = sbi->blocks_per_seg;
1524        addr = START_BLOCK(sbi, segno);
1525        sum_entry = &sum->entries[0];
1526
1527        for (i = 0; i < last_offset; i++, sum_entry++) {
1528                /*
1529                 * In order to read next node page,
1530                 * we must clear PageUptodate flag.
1531                 */
1532                ClearPageUptodate(page);
1533
1534                if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1535                        goto out;
1536
1537                lock_page(page);
1538                rn = (struct f2fs_node *)page_address(page);
1539                sum_entry->nid = rn->footer.nid;
1540                sum_entry->version = 0;
1541                sum_entry->ofs_in_node = 0;
1542                addr++;
1543        }
1544        unlock_page(page);
1545out:
1546        __free_pages(page, 0);
1547        return 0;
1548}
1549
1550static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1551{
1552        struct f2fs_nm_info *nm_i = NM_I(sbi);
1553        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1554        struct f2fs_summary_block *sum = curseg->sum_blk;
1555        int i;
1556
1557        mutex_lock(&curseg->curseg_mutex);
1558
1559        if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1560                mutex_unlock(&curseg->curseg_mutex);
1561                return false;
1562        }
1563
1564        for (i = 0; i < nats_in_cursum(sum); i++) {
1565                struct nat_entry *ne;
1566                struct f2fs_nat_entry raw_ne;
1567                nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1568
1569                raw_ne = nat_in_journal(sum, i);
1570retry:
1571                write_lock(&nm_i->nat_tree_lock);
1572                ne = __lookup_nat_cache(nm_i, nid);
1573                if (ne) {
1574                        __set_nat_cache_dirty(nm_i, ne);
1575                        write_unlock(&nm_i->nat_tree_lock);
1576                        continue;
1577                }
1578                ne = grab_nat_entry(nm_i, nid);
1579                if (!ne) {
1580                        write_unlock(&nm_i->nat_tree_lock);
1581                        goto retry;
1582                }
1583                nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1584                nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1585                nat_set_version(ne, raw_ne.version);
1586                __set_nat_cache_dirty(nm_i, ne);
1587                write_unlock(&nm_i->nat_tree_lock);
1588        }
1589        update_nats_in_cursum(sum, -i);
1590        mutex_unlock(&curseg->curseg_mutex);
1591        return true;
1592}
1593
1594/*
1595 * This function is called during the checkpointing process.
1596 */
1597void flush_nat_entries(struct f2fs_sb_info *sbi)
1598{
1599        struct f2fs_nm_info *nm_i = NM_I(sbi);
1600        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1601        struct f2fs_summary_block *sum = curseg->sum_blk;
1602        struct list_head *cur, *n;
1603        struct page *page = NULL;
1604        struct f2fs_nat_block *nat_blk = NULL;
1605        nid_t start_nid = 0, end_nid = 0;
1606        bool flushed;
1607
1608        flushed = flush_nats_in_journal(sbi);
1609
1610        if (!flushed)
1611                mutex_lock(&curseg->curseg_mutex);
1612
1613        /* 1) flush dirty nat caches */
1614        list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1615                struct nat_entry *ne;
1616                nid_t nid;
1617                struct f2fs_nat_entry raw_ne;
1618                int offset = -1;
1619                block_t new_blkaddr;
1620
1621                ne = list_entry(cur, struct nat_entry, list);
1622                nid = nat_get_nid(ne);
1623
1624                if (nat_get_blkaddr(ne) == NEW_ADDR)
1625                        continue;
1626                if (flushed)
1627                        goto to_nat_page;
1628
1629                /* if there is room for nat enries in curseg->sumpage */
1630                offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1631                if (offset >= 0) {
1632                        raw_ne = nat_in_journal(sum, offset);
1633                        goto flush_now;
1634                }
1635to_nat_page:
1636                if (!page || (start_nid > nid || nid > end_nid)) {
1637                        if (page) {
1638                                f2fs_put_page(page, 1);
1639                                page = NULL;
1640                        }
1641                        start_nid = START_NID(nid);
1642                        end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1643
1644                        /*
1645                         * get nat block with dirty flag, increased reference
1646                         * count, mapped and lock
1647                         */
1648                        page = get_next_nat_page(sbi, start_nid);
1649                        nat_blk = page_address(page);
1650                }
1651
1652                BUG_ON(!nat_blk);
1653                raw_ne = nat_blk->entries[nid - start_nid];
1654flush_now:
1655                new_blkaddr = nat_get_blkaddr(ne);
1656
1657                raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1658                raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1659                raw_ne.version = nat_get_version(ne);
1660
1661                if (offset < 0) {
1662                        nat_blk->entries[nid - start_nid] = raw_ne;
1663                } else {
1664                        nat_in_journal(sum, offset) = raw_ne;
1665                        nid_in_journal(sum, offset) = cpu_to_le32(nid);
1666                }
1667
1668                if (nat_get_blkaddr(ne) == NULL_ADDR &&
1669                                add_free_nid(NM_I(sbi), nid, false) <= 0) {
1670                        write_lock(&nm_i->nat_tree_lock);
1671                        __del_from_nat_cache(nm_i, ne);
1672                        write_unlock(&nm_i->nat_tree_lock);
1673                } else {
1674                        write_lock(&nm_i->nat_tree_lock);
1675                        __clear_nat_cache_dirty(nm_i, ne);
1676                        ne->checkpointed = true;
1677                        write_unlock(&nm_i->nat_tree_lock);
1678                }
1679        }
1680        if (!flushed)
1681                mutex_unlock(&curseg->curseg_mutex);
1682        f2fs_put_page(page, 1);
1683
1684        /* 2) shrink nat caches if necessary */
1685        try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1686}
1687
1688static int init_node_manager(struct f2fs_sb_info *sbi)
1689{
1690        struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1691        struct f2fs_nm_info *nm_i = NM_I(sbi);
1692        unsigned char *version_bitmap;
1693        unsigned int nat_segs, nat_blocks;
1694
1695        nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1696
1697        /* segment_count_nat includes pair segment so divide to 2. */
1698        nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1699        nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1700        nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1701        nm_i->fcnt = 0;
1702        nm_i->nat_cnt = 0;
1703
1704        INIT_LIST_HEAD(&nm_i->free_nid_list);
1705        INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1706        INIT_LIST_HEAD(&nm_i->nat_entries);
1707        INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1708
1709        mutex_init(&nm_i->build_lock);
1710        spin_lock_init(&nm_i->free_nid_list_lock);
1711        rwlock_init(&nm_i->nat_tree_lock);
1712
1713        nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1714        nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1715        version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1716        if (!version_bitmap)
1717                return -EFAULT;
1718
1719        nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1720                                        GFP_KERNEL);
1721        if (!nm_i->nat_bitmap)
1722                return -ENOMEM;
1723        return 0;
1724}
1725
1726int build_node_manager(struct f2fs_sb_info *sbi)
1727{
1728        int err;
1729
1730        sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1731        if (!sbi->nm_info)
1732                return -ENOMEM;
1733
1734        err = init_node_manager(sbi);
1735        if (err)
1736                return err;
1737
1738        build_free_nids(sbi);
1739        return 0;
1740}
1741
1742void destroy_node_manager(struct f2fs_sb_info *sbi)
1743{
1744        struct f2fs_nm_info *nm_i = NM_I(sbi);
1745        struct free_nid *i, *next_i;
1746        struct nat_entry *natvec[NATVEC_SIZE];
1747        nid_t nid = 0;
1748        unsigned int found;
1749
1750        if (!nm_i)
1751                return;
1752
1753        /* destroy free nid list */
1754        spin_lock(&nm_i->free_nid_list_lock);
1755        list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1756                BUG_ON(i->state == NID_ALLOC);
1757                __del_from_free_nid_list(i);
1758                nm_i->fcnt--;
1759        }
1760        BUG_ON(nm_i->fcnt);
1761        spin_unlock(&nm_i->free_nid_list_lock);
1762
1763        /* destroy nat cache */
1764        write_lock(&nm_i->nat_tree_lock);
1765        while ((found = __gang_lookup_nat_cache(nm_i,
1766                                        nid, NATVEC_SIZE, natvec))) {
1767                unsigned idx;
1768                for (idx = 0; idx < found; idx++) {
1769                        struct nat_entry *e = natvec[idx];
1770                        nid = nat_get_nid(e) + 1;
1771                        __del_from_nat_cache(nm_i, e);
1772                }
1773        }
1774        BUG_ON(nm_i->nat_cnt);
1775        write_unlock(&nm_i->nat_tree_lock);
1776
1777        kfree(nm_i->nat_bitmap);
1778        sbi->nm_info = NULL;
1779        kfree(nm_i);
1780}
1781
1782int __init create_node_manager_caches(void)
1783{
1784        nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1785                        sizeof(struct nat_entry), NULL);
1786        if (!nat_entry_slab)
1787                return -ENOMEM;
1788
1789        free_nid_slab = f2fs_kmem_cache_create("free_nid",
1790                        sizeof(struct free_nid), NULL);
1791        if (!free_nid_slab) {
1792                kmem_cache_destroy(nat_entry_slab);
1793                return -ENOMEM;
1794        }
1795        return 0;
1796}
1797
1798void destroy_node_manager_caches(void)
1799{
1800        kmem_cache_destroy(free_nid_slab);
1801        kmem_cache_destroy(nat_entry_slab);
1802}
1803