linux/fs/f2fs/recovery.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/recovery.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <asm/unaligned.h>
   9#include <linux/fs.h>
  10#include <linux/f2fs_fs.h>
  11#include "f2fs.h"
  12#include "node.h"
  13#include "segment.h"
  14
  15/*
  16 * Roll forward recovery scenarios.
  17 *
  18 * [Term] F: fsync_mark, D: dentry_mark
  19 *
  20 * 1. inode(x) | CP | inode(x) | dnode(F)
  21 * -> Update the latest inode(x).
  22 *
  23 * 2. inode(x) | CP | inode(F) | dnode(F)
  24 * -> No problem.
  25 *
  26 * 3. inode(x) | CP | dnode(F) | inode(x)
  27 * -> Recover to the latest dnode(F), and drop the last inode(x)
  28 *
  29 * 4. inode(x) | CP | dnode(F) | inode(F)
  30 * -> No problem.
  31 *
  32 * 5. CP | inode(x) | dnode(F)
  33 * -> The inode(DF) was missing. Should drop this dnode(F).
  34 *
  35 * 6. CP | inode(DF) | dnode(F)
  36 * -> No problem.
  37 *
  38 * 7. CP | dnode(F) | inode(DF)
  39 * -> If f2fs_iget fails, then goto next to find inode(DF).
  40 *
  41 * 8. CP | dnode(F) | inode(x)
  42 * -> If f2fs_iget fails, then goto next to find inode(DF).
  43 *    But it will fail due to no inode(DF).
  44 */
  45
  46static struct kmem_cache *fsync_entry_slab;
  47
  48#ifdef CONFIG_UNICODE
  49extern struct kmem_cache *f2fs_cf_name_slab;
  50#endif
  51
  52bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
  53{
  54        s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
  55
  56        if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
  57                return false;
  58        return true;
  59}
  60
  61static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
  62                                                                nid_t ino)
  63{
  64        struct fsync_inode_entry *entry;
  65
  66        list_for_each_entry(entry, head, list)
  67                if (entry->inode->i_ino == ino)
  68                        return entry;
  69
  70        return NULL;
  71}
  72
  73static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
  74                        struct list_head *head, nid_t ino, bool quota_inode)
  75{
  76        struct inode *inode;
  77        struct fsync_inode_entry *entry;
  78        int err;
  79
  80        inode = f2fs_iget_retry(sbi->sb, ino);
  81        if (IS_ERR(inode))
  82                return ERR_CAST(inode);
  83
  84        err = dquot_initialize(inode);
  85        if (err)
  86                goto err_out;
  87
  88        if (quota_inode) {
  89                err = dquot_alloc_inode(inode);
  90                if (err)
  91                        goto err_out;
  92        }
  93
  94        entry = f2fs_kmem_cache_alloc(fsync_entry_slab,
  95                                        GFP_F2FS_ZERO, true, NULL);
  96        entry->inode = inode;
  97        list_add_tail(&entry->list, head);
  98
  99        return entry;
 100err_out:
 101        iput(inode);
 102        return ERR_PTR(err);
 103}
 104
 105static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
 106{
 107        if (drop) {
 108                /* inode should not be recovered, drop it */
 109                f2fs_inode_synced(entry->inode);
 110        }
 111        iput(entry->inode);
 112        list_del(&entry->list);
 113        kmem_cache_free(fsync_entry_slab, entry);
 114}
 115
 116static int init_recovered_filename(const struct inode *dir,
 117                                   struct f2fs_inode *raw_inode,
 118                                   struct f2fs_filename *fname,
 119                                   struct qstr *usr_fname)
 120{
 121        int err;
 122
 123        memset(fname, 0, sizeof(*fname));
 124        fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
 125        fname->disk_name.name = raw_inode->i_name;
 126
 127        if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
 128                return -ENAMETOOLONG;
 129
 130        if (!IS_ENCRYPTED(dir)) {
 131                usr_fname->name = fname->disk_name.name;
 132                usr_fname->len = fname->disk_name.len;
 133                fname->usr_fname = usr_fname;
 134        }
 135
 136        /* Compute the hash of the filename */
 137        if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) {
 138                /*
 139                 * In this case the hash isn't computable without the key, so it
 140                 * was saved on-disk.
 141                 */
 142                if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN)
 143                        return -EINVAL;
 144                fname->hash = get_unaligned((f2fs_hash_t *)
 145                                &raw_inode->i_name[fname->disk_name.len]);
 146        } else if (IS_CASEFOLDED(dir)) {
 147                err = f2fs_init_casefolded_name(dir, fname);
 148                if (err)
 149                        return err;
 150                f2fs_hash_filename(dir, fname);
 151#ifdef CONFIG_UNICODE
 152                /* Case-sensitive match is fine for recovery */
 153                kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
 154                fname->cf_name.name = NULL;
 155#endif
 156        } else {
 157                f2fs_hash_filename(dir, fname);
 158        }
 159        return 0;
 160}
 161
 162static int recover_dentry(struct inode *inode, struct page *ipage,
 163                                                struct list_head *dir_list)
 164{
 165        struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
 166        nid_t pino = le32_to_cpu(raw_inode->i_pino);
 167        struct f2fs_dir_entry *de;
 168        struct f2fs_filename fname;
 169        struct qstr usr_fname;
 170        struct page *page;
 171        struct inode *dir, *einode;
 172        struct fsync_inode_entry *entry;
 173        int err = 0;
 174        char *name;
 175
 176        entry = get_fsync_inode(dir_list, pino);
 177        if (!entry) {
 178                entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
 179                                                        pino, false);
 180                if (IS_ERR(entry)) {
 181                        dir = ERR_CAST(entry);
 182                        err = PTR_ERR(entry);
 183                        goto out;
 184                }
 185        }
 186
 187        dir = entry->inode;
 188        err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
 189        if (err)
 190                goto out;
 191retry:
 192        de = __f2fs_find_entry(dir, &fname, &page);
 193        if (de && inode->i_ino == le32_to_cpu(de->ino))
 194                goto out_put;
 195
 196        if (de) {
 197                einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
 198                if (IS_ERR(einode)) {
 199                        WARN_ON(1);
 200                        err = PTR_ERR(einode);
 201                        if (err == -ENOENT)
 202                                err = -EEXIST;
 203                        goto out_put;
 204                }
 205
 206                err = dquot_initialize(einode);
 207                if (err) {
 208                        iput(einode);
 209                        goto out_put;
 210                }
 211
 212                err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
 213                if (err) {
 214                        iput(einode);
 215                        goto out_put;
 216                }
 217                f2fs_delete_entry(de, page, dir, einode);
 218                iput(einode);
 219                goto retry;
 220        } else if (IS_ERR(page)) {
 221                err = PTR_ERR(page);
 222        } else {
 223                err = f2fs_add_dentry(dir, &fname, inode,
 224                                        inode->i_ino, inode->i_mode);
 225        }
 226        if (err == -ENOMEM)
 227                goto retry;
 228        goto out;
 229
 230out_put:
 231        f2fs_put_page(page, 0);
 232out:
 233        if (file_enc_name(inode))
 234                name = "<encrypted>";
 235        else
 236                name = raw_inode->i_name;
 237        f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
 238                    __func__, ino_of_node(ipage), name,
 239                    IS_ERR(dir) ? 0 : dir->i_ino, err);
 240        return err;
 241}
 242
 243static int recover_quota_data(struct inode *inode, struct page *page)
 244{
 245        struct f2fs_inode *raw = F2FS_INODE(page);
 246        struct iattr attr;
 247        uid_t i_uid = le32_to_cpu(raw->i_uid);
 248        gid_t i_gid = le32_to_cpu(raw->i_gid);
 249        int err;
 250
 251        memset(&attr, 0, sizeof(attr));
 252
 253        attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
 254        attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
 255
 256        if (!uid_eq(attr.ia_uid, inode->i_uid))
 257                attr.ia_valid |= ATTR_UID;
 258        if (!gid_eq(attr.ia_gid, inode->i_gid))
 259                attr.ia_valid |= ATTR_GID;
 260
 261        if (!attr.ia_valid)
 262                return 0;
 263
 264        err = dquot_transfer(inode, &attr);
 265        if (err)
 266                set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
 267        return err;
 268}
 269
 270static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
 271{
 272        if (ri->i_inline & F2FS_PIN_FILE)
 273                set_inode_flag(inode, FI_PIN_FILE);
 274        else
 275                clear_inode_flag(inode, FI_PIN_FILE);
 276        if (ri->i_inline & F2FS_DATA_EXIST)
 277                set_inode_flag(inode, FI_DATA_EXIST);
 278        else
 279                clear_inode_flag(inode, FI_DATA_EXIST);
 280}
 281
 282static int recover_inode(struct inode *inode, struct page *page)
 283{
 284        struct f2fs_inode *raw = F2FS_INODE(page);
 285        char *name;
 286        int err;
 287
 288        inode->i_mode = le16_to_cpu(raw->i_mode);
 289
 290        err = recover_quota_data(inode, page);
 291        if (err)
 292                return err;
 293
 294        i_uid_write(inode, le32_to_cpu(raw->i_uid));
 295        i_gid_write(inode, le32_to_cpu(raw->i_gid));
 296
 297        if (raw->i_inline & F2FS_EXTRA_ATTR) {
 298                if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
 299                        F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
 300                                                                i_projid)) {
 301                        projid_t i_projid;
 302                        kprojid_t kprojid;
 303
 304                        i_projid = (projid_t)le32_to_cpu(raw->i_projid);
 305                        kprojid = make_kprojid(&init_user_ns, i_projid);
 306
 307                        if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
 308                                err = f2fs_transfer_project_quota(inode,
 309                                                                kprojid);
 310                                if (err)
 311                                        return err;
 312                                F2FS_I(inode)->i_projid = kprojid;
 313                        }
 314                }
 315        }
 316
 317        f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
 318        inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
 319        inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
 320        inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
 321        inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
 322        inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
 323        inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
 324
 325        F2FS_I(inode)->i_advise = raw->i_advise;
 326        F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
 327        f2fs_set_inode_flags(inode);
 328        F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
 329                                le16_to_cpu(raw->i_gc_failures);
 330
 331        recover_inline_flags(inode, raw);
 332
 333        f2fs_mark_inode_dirty_sync(inode, true);
 334
 335        if (file_enc_name(inode))
 336                name = "<encrypted>";
 337        else
 338                name = F2FS_INODE(page)->i_name;
 339
 340        f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
 341                    ino_of_node(page), name, raw->i_inline);
 342        return 0;
 343}
 344
 345static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
 346                                bool check_only)
 347{
 348        struct curseg_info *curseg;
 349        struct page *page = NULL;
 350        block_t blkaddr;
 351        unsigned int loop_cnt = 0;
 352        unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
 353                                                valid_user_blocks(sbi);
 354        int err = 0;
 355
 356        /* get node pages in the current segment */
 357        curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
 358        blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 359
 360        while (1) {
 361                struct fsync_inode_entry *entry;
 362
 363                if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
 364                        return 0;
 365
 366                page = f2fs_get_tmp_page(sbi, blkaddr);
 367                if (IS_ERR(page)) {
 368                        err = PTR_ERR(page);
 369                        break;
 370                }
 371
 372                if (!is_recoverable_dnode(page)) {
 373                        f2fs_put_page(page, 1);
 374                        break;
 375                }
 376
 377                if (!is_fsync_dnode(page))
 378                        goto next;
 379
 380                entry = get_fsync_inode(head, ino_of_node(page));
 381                if (!entry) {
 382                        bool quota_inode = false;
 383
 384                        if (!check_only &&
 385                                        IS_INODE(page) && is_dent_dnode(page)) {
 386                                err = f2fs_recover_inode_page(sbi, page);
 387                                if (err) {
 388                                        f2fs_put_page(page, 1);
 389                                        break;
 390                                }
 391                                quota_inode = true;
 392                        }
 393
 394                        /*
 395                         * CP | dnode(F) | inode(DF)
 396                         * For this case, we should not give up now.
 397                         */
 398                        entry = add_fsync_inode(sbi, head, ino_of_node(page),
 399                                                                quota_inode);
 400                        if (IS_ERR(entry)) {
 401                                err = PTR_ERR(entry);
 402                                if (err == -ENOENT) {
 403                                        err = 0;
 404                                        goto next;
 405                                }
 406                                f2fs_put_page(page, 1);
 407                                break;
 408                        }
 409                }
 410                entry->blkaddr = blkaddr;
 411
 412                if (IS_INODE(page) && is_dent_dnode(page))
 413                        entry->last_dentry = blkaddr;
 414next:
 415                /* sanity check in order to detect looped node chain */
 416                if (++loop_cnt >= free_blocks ||
 417                        blkaddr == next_blkaddr_of_node(page)) {
 418                        f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
 419                                    __func__, blkaddr,
 420                                    next_blkaddr_of_node(page));
 421                        f2fs_put_page(page, 1);
 422                        err = -EINVAL;
 423                        break;
 424                }
 425
 426                /* check next segment */
 427                blkaddr = next_blkaddr_of_node(page);
 428                f2fs_put_page(page, 1);
 429
 430                f2fs_ra_meta_pages_cond(sbi, blkaddr);
 431        }
 432        return err;
 433}
 434
 435static void destroy_fsync_dnodes(struct list_head *head, int drop)
 436{
 437        struct fsync_inode_entry *entry, *tmp;
 438
 439        list_for_each_entry_safe(entry, tmp, head, list)
 440                del_fsync_inode(entry, drop);
 441}
 442
 443static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
 444                        block_t blkaddr, struct dnode_of_data *dn)
 445{
 446        struct seg_entry *sentry;
 447        unsigned int segno = GET_SEGNO(sbi, blkaddr);
 448        unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
 449        struct f2fs_summary_block *sum_node;
 450        struct f2fs_summary sum;
 451        struct page *sum_page, *node_page;
 452        struct dnode_of_data tdn = *dn;
 453        nid_t ino, nid;
 454        struct inode *inode;
 455        unsigned int offset;
 456        block_t bidx;
 457        int i;
 458
 459        sentry = get_seg_entry(sbi, segno);
 460        if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
 461                return 0;
 462
 463        /* Get the previous summary */
 464        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
 465                struct curseg_info *curseg = CURSEG_I(sbi, i);
 466
 467                if (curseg->segno == segno) {
 468                        sum = curseg->sum_blk->entries[blkoff];
 469                        goto got_it;
 470                }
 471        }
 472
 473        sum_page = f2fs_get_sum_page(sbi, segno);
 474        if (IS_ERR(sum_page))
 475                return PTR_ERR(sum_page);
 476        sum_node = (struct f2fs_summary_block *)page_address(sum_page);
 477        sum = sum_node->entries[blkoff];
 478        f2fs_put_page(sum_page, 1);
 479got_it:
 480        /* Use the locked dnode page and inode */
 481        nid = le32_to_cpu(sum.nid);
 482        if (dn->inode->i_ino == nid) {
 483                tdn.nid = nid;
 484                if (!dn->inode_page_locked)
 485                        lock_page(dn->inode_page);
 486                tdn.node_page = dn->inode_page;
 487                tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
 488                goto truncate_out;
 489        } else if (dn->nid == nid) {
 490                tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
 491                goto truncate_out;
 492        }
 493
 494        /* Get the node page */
 495        node_page = f2fs_get_node_page(sbi, nid);
 496        if (IS_ERR(node_page))
 497                return PTR_ERR(node_page);
 498
 499        offset = ofs_of_node(node_page);
 500        ino = ino_of_node(node_page);
 501        f2fs_put_page(node_page, 1);
 502
 503        if (ino != dn->inode->i_ino) {
 504                int ret;
 505
 506                /* Deallocate previous index in the node page */
 507                inode = f2fs_iget_retry(sbi->sb, ino);
 508                if (IS_ERR(inode))
 509                        return PTR_ERR(inode);
 510
 511                ret = dquot_initialize(inode);
 512                if (ret) {
 513                        iput(inode);
 514                        return ret;
 515                }
 516        } else {
 517                inode = dn->inode;
 518        }
 519
 520        bidx = f2fs_start_bidx_of_node(offset, inode) +
 521                                le16_to_cpu(sum.ofs_in_node);
 522
 523        /*
 524         * if inode page is locked, unlock temporarily, but its reference
 525         * count keeps alive.
 526         */
 527        if (ino == dn->inode->i_ino && dn->inode_page_locked)
 528                unlock_page(dn->inode_page);
 529
 530        set_new_dnode(&tdn, inode, NULL, NULL, 0);
 531        if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
 532                goto out;
 533
 534        if (tdn.data_blkaddr == blkaddr)
 535                f2fs_truncate_data_blocks_range(&tdn, 1);
 536
 537        f2fs_put_dnode(&tdn);
 538out:
 539        if (ino != dn->inode->i_ino)
 540                iput(inode);
 541        else if (dn->inode_page_locked)
 542                lock_page(dn->inode_page);
 543        return 0;
 544
 545truncate_out:
 546        if (f2fs_data_blkaddr(&tdn) == blkaddr)
 547                f2fs_truncate_data_blocks_range(&tdn, 1);
 548        if (dn->inode->i_ino == nid && !dn->inode_page_locked)
 549                unlock_page(dn->inode_page);
 550        return 0;
 551}
 552
 553static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
 554                                        struct page *page)
 555{
 556        struct dnode_of_data dn;
 557        struct node_info ni;
 558        unsigned int start, end;
 559        int err = 0, recovered = 0;
 560
 561        /* step 1: recover xattr */
 562        if (IS_INODE(page)) {
 563                err = f2fs_recover_inline_xattr(inode, page);
 564                if (err)
 565                        goto out;
 566        } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
 567                err = f2fs_recover_xattr_data(inode, page);
 568                if (!err)
 569                        recovered++;
 570                goto out;
 571        }
 572
 573        /* step 2: recover inline data */
 574        err = f2fs_recover_inline_data(inode, page);
 575        if (err) {
 576                if (err == 1)
 577                        err = 0;
 578                goto out;
 579        }
 580
 581        /* step 3: recover data indices */
 582        start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
 583        end = start + ADDRS_PER_PAGE(page, inode);
 584
 585        set_new_dnode(&dn, inode, NULL, NULL, 0);
 586retry_dn:
 587        err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
 588        if (err) {
 589                if (err == -ENOMEM) {
 590                        congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
 591                        goto retry_dn;
 592                }
 593                goto out;
 594        }
 595
 596        f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
 597
 598        err = f2fs_get_node_info(sbi, dn.nid, &ni);
 599        if (err)
 600                goto err;
 601
 602        f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
 603
 604        if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
 605                f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
 606                          inode->i_ino, ofs_of_node(dn.node_page),
 607                          ofs_of_node(page));
 608                err = -EFSCORRUPTED;
 609                goto err;
 610        }
 611
 612        for (; start < end; start++, dn.ofs_in_node++) {
 613                block_t src, dest;
 614
 615                src = f2fs_data_blkaddr(&dn);
 616                dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
 617
 618                if (__is_valid_data_blkaddr(src) &&
 619                        !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
 620                        err = -EFSCORRUPTED;
 621                        goto err;
 622                }
 623
 624                if (__is_valid_data_blkaddr(dest) &&
 625                        !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
 626                        err = -EFSCORRUPTED;
 627                        goto err;
 628                }
 629
 630                /* skip recovering if dest is the same as src */
 631                if (src == dest)
 632                        continue;
 633
 634                /* dest is invalid, just invalidate src block */
 635                if (dest == NULL_ADDR) {
 636                        f2fs_truncate_data_blocks_range(&dn, 1);
 637                        continue;
 638                }
 639
 640                if (!file_keep_isize(inode) &&
 641                        (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
 642                        f2fs_i_size_write(inode,
 643                                (loff_t)(start + 1) << PAGE_SHIFT);
 644
 645                /*
 646                 * dest is reserved block, invalidate src block
 647                 * and then reserve one new block in dnode page.
 648                 */
 649                if (dest == NEW_ADDR) {
 650                        f2fs_truncate_data_blocks_range(&dn, 1);
 651                        f2fs_reserve_new_block(&dn);
 652                        continue;
 653                }
 654
 655                /* dest is valid block, try to recover from src to dest */
 656                if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
 657
 658                        if (src == NULL_ADDR) {
 659                                err = f2fs_reserve_new_block(&dn);
 660                                while (err &&
 661                                       IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
 662                                        err = f2fs_reserve_new_block(&dn);
 663                                /* We should not get -ENOSPC */
 664                                f2fs_bug_on(sbi, err);
 665                                if (err)
 666                                        goto err;
 667                        }
 668retry_prev:
 669                        /* Check the previous node page having this index */
 670                        err = check_index_in_prev_nodes(sbi, dest, &dn);
 671                        if (err) {
 672                                if (err == -ENOMEM) {
 673                                        congestion_wait(BLK_RW_ASYNC,
 674                                                        DEFAULT_IO_TIMEOUT);
 675                                        goto retry_prev;
 676                                }
 677                                goto err;
 678                        }
 679
 680                        /* write dummy data page */
 681                        f2fs_replace_block(sbi, &dn, src, dest,
 682                                                ni.version, false, false);
 683                        recovered++;
 684                }
 685        }
 686
 687        copy_node_footer(dn.node_page, page);
 688        fill_node_footer(dn.node_page, dn.nid, ni.ino,
 689                                        ofs_of_node(page), false);
 690        set_page_dirty(dn.node_page);
 691err:
 692        f2fs_put_dnode(&dn);
 693out:
 694        f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
 695                    inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
 696                    recovered, err);
 697        return err;
 698}
 699
 700static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
 701                struct list_head *tmp_inode_list, struct list_head *dir_list)
 702{
 703        struct curseg_info *curseg;
 704        struct page *page = NULL;
 705        int err = 0;
 706        block_t blkaddr;
 707
 708        /* get node pages in the current segment */
 709        curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
 710        blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 711
 712        while (1) {
 713                struct fsync_inode_entry *entry;
 714
 715                if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
 716                        break;
 717
 718                f2fs_ra_meta_pages_cond(sbi, blkaddr);
 719
 720                page = f2fs_get_tmp_page(sbi, blkaddr);
 721                if (IS_ERR(page)) {
 722                        err = PTR_ERR(page);
 723                        break;
 724                }
 725
 726                if (!is_recoverable_dnode(page)) {
 727                        f2fs_put_page(page, 1);
 728                        break;
 729                }
 730
 731                entry = get_fsync_inode(inode_list, ino_of_node(page));
 732                if (!entry)
 733                        goto next;
 734                /*
 735                 * inode(x) | CP | inode(x) | dnode(F)
 736                 * In this case, we can lose the latest inode(x).
 737                 * So, call recover_inode for the inode update.
 738                 */
 739                if (IS_INODE(page)) {
 740                        err = recover_inode(entry->inode, page);
 741                        if (err) {
 742                                f2fs_put_page(page, 1);
 743                                break;
 744                        }
 745                }
 746                if (entry->last_dentry == blkaddr) {
 747                        err = recover_dentry(entry->inode, page, dir_list);
 748                        if (err) {
 749                                f2fs_put_page(page, 1);
 750                                break;
 751                        }
 752                }
 753                err = do_recover_data(sbi, entry->inode, page);
 754                if (err) {
 755                        f2fs_put_page(page, 1);
 756                        break;
 757                }
 758
 759                if (entry->blkaddr == blkaddr)
 760                        list_move_tail(&entry->list, tmp_inode_list);
 761next:
 762                /* check next segment */
 763                blkaddr = next_blkaddr_of_node(page);
 764                f2fs_put_page(page, 1);
 765        }
 766        if (!err)
 767                f2fs_allocate_new_segments(sbi);
 768        return err;
 769}
 770
 771int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
 772{
 773        struct list_head inode_list, tmp_inode_list;
 774        struct list_head dir_list;
 775        int err;
 776        int ret = 0;
 777        unsigned long s_flags = sbi->sb->s_flags;
 778        bool need_writecp = false;
 779        bool fix_curseg_write_pointer = false;
 780#ifdef CONFIG_QUOTA
 781        int quota_enabled;
 782#endif
 783
 784        if (s_flags & SB_RDONLY) {
 785                f2fs_info(sbi, "recover fsync data on readonly fs");
 786                sbi->sb->s_flags &= ~SB_RDONLY;
 787        }
 788
 789#ifdef CONFIG_QUOTA
 790        /* Needed for iput() to work correctly and not trash data */
 791        sbi->sb->s_flags |= SB_ACTIVE;
 792        /* Turn on quotas so that they are updated correctly */
 793        quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
 794#endif
 795
 796        INIT_LIST_HEAD(&inode_list);
 797        INIT_LIST_HEAD(&tmp_inode_list);
 798        INIT_LIST_HEAD(&dir_list);
 799
 800        /* prevent checkpoint */
 801        down_write(&sbi->cp_global_sem);
 802
 803        /* step #1: find fsynced inode numbers */
 804        err = find_fsync_dnodes(sbi, &inode_list, check_only);
 805        if (err || list_empty(&inode_list))
 806                goto skip;
 807
 808        if (check_only) {
 809                ret = 1;
 810                goto skip;
 811        }
 812
 813        need_writecp = true;
 814
 815        /* step #2: recover data */
 816        err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
 817        if (!err)
 818                f2fs_bug_on(sbi, !list_empty(&inode_list));
 819        else {
 820                /* restore s_flags to let iput() trash data */
 821                sbi->sb->s_flags = s_flags;
 822        }
 823skip:
 824        fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
 825
 826        destroy_fsync_dnodes(&inode_list, err);
 827        destroy_fsync_dnodes(&tmp_inode_list, err);
 828
 829        /* truncate meta pages to be used by the recovery */
 830        truncate_inode_pages_range(META_MAPPING(sbi),
 831                        (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
 832
 833        if (err) {
 834                truncate_inode_pages_final(NODE_MAPPING(sbi));
 835                truncate_inode_pages_final(META_MAPPING(sbi));
 836        }
 837
 838        /*
 839         * If fsync data succeeds or there is no fsync data to recover,
 840         * and the f2fs is not read only, check and fix zoned block devices'
 841         * write pointer consistency.
 842         */
 843        if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
 844                        f2fs_sb_has_blkzoned(sbi)) {
 845                err = f2fs_fix_curseg_write_pointer(sbi);
 846                ret = err;
 847        }
 848
 849        if (!err)
 850                clear_sbi_flag(sbi, SBI_POR_DOING);
 851
 852        up_write(&sbi->cp_global_sem);
 853
 854        /* let's drop all the directory inodes for clean checkpoint */
 855        destroy_fsync_dnodes(&dir_list, err);
 856
 857        if (need_writecp) {
 858                set_sbi_flag(sbi, SBI_IS_RECOVERED);
 859
 860                if (!err) {
 861                        struct cp_control cpc = {
 862                                .reason = CP_RECOVERY,
 863                        };
 864                        err = f2fs_write_checkpoint(sbi, &cpc);
 865                }
 866        }
 867
 868#ifdef CONFIG_QUOTA
 869        /* Turn quotas off */
 870        if (quota_enabled)
 871                f2fs_quota_off_umount(sbi->sb);
 872#endif
 873        sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 874
 875        return ret ? ret : err;
 876}
 877
 878int __init f2fs_create_recovery_cache(void)
 879{
 880        fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
 881                                        sizeof(struct fsync_inode_entry));
 882        if (!fsync_entry_slab)
 883                return -ENOMEM;
 884        return 0;
 885}
 886
 887void f2fs_destroy_recovery_cache(void)
 888{
 889        kmem_cache_destroy(fsync_entry_slab);
 890}
 891