1
2
3
4
5
6
7
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include "f2fs.h"
11#include "node.h"
12#include "segment.h"
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45static struct kmem_cache *fsync_entry_slab;
46
47bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
48{
49 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
50
51 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
52 return false;
53 return true;
54}
55
56static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
57 nid_t ino)
58{
59 struct fsync_inode_entry *entry;
60
61 list_for_each_entry(entry, head, list)
62 if (entry->inode->i_ino == ino)
63 return entry;
64
65 return NULL;
66}
67
68static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
69 struct list_head *head, nid_t ino, bool quota_inode)
70{
71 struct inode *inode;
72 struct fsync_inode_entry *entry;
73 int err;
74
75 inode = f2fs_iget_retry(sbi->sb, ino);
76 if (IS_ERR(inode))
77 return ERR_CAST(inode);
78
79 err = dquot_initialize(inode);
80 if (err)
81 goto err_out;
82
83 if (quota_inode) {
84 err = dquot_alloc_inode(inode);
85 if (err)
86 goto err_out;
87 }
88
89 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
90 entry->inode = inode;
91 list_add_tail(&entry->list, head);
92
93 return entry;
94err_out:
95 iput(inode);
96 return ERR_PTR(err);
97}
98
99static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
100{
101 if (drop) {
102
103 f2fs_inode_synced(entry->inode);
104 }
105 iput(entry->inode);
106 list_del(&entry->list);
107 kmem_cache_free(fsync_entry_slab, entry);
108}
109
110static int init_recovered_filename(const struct inode *dir,
111 struct f2fs_inode *raw_inode,
112 struct f2fs_filename *fname,
113 struct qstr *usr_fname)
114{
115 int err;
116
117 memset(fname, 0, sizeof(*fname));
118 fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
119 fname->disk_name.name = raw_inode->i_name;
120
121 if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
122 return -ENAMETOOLONG;
123
124 if (!IS_ENCRYPTED(dir)) {
125 usr_fname->name = fname->disk_name.name;
126 usr_fname->len = fname->disk_name.len;
127 fname->usr_fname = usr_fname;
128 }
129
130
131 if (IS_CASEFOLDED(dir)) {
132 err = f2fs_init_casefolded_name(dir, fname);
133 if (err)
134 return err;
135 f2fs_hash_filename(dir, fname);
136#ifdef CONFIG_UNICODE
137
138 kfree(fname->cf_name.name);
139 fname->cf_name.name = NULL;
140#endif
141 } else {
142 f2fs_hash_filename(dir, fname);
143 }
144 return 0;
145}
146
147static int recover_dentry(struct inode *inode, struct page *ipage,
148 struct list_head *dir_list)
149{
150 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
151 nid_t pino = le32_to_cpu(raw_inode->i_pino);
152 struct f2fs_dir_entry *de;
153 struct f2fs_filename fname;
154 struct qstr usr_fname;
155 struct page *page;
156 struct inode *dir, *einode;
157 struct fsync_inode_entry *entry;
158 int err = 0;
159 char *name;
160
161 entry = get_fsync_inode(dir_list, pino);
162 if (!entry) {
163 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
164 pino, false);
165 if (IS_ERR(entry)) {
166 dir = ERR_CAST(entry);
167 err = PTR_ERR(entry);
168 goto out;
169 }
170 }
171
172 dir = entry->inode;
173 err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
174 if (err)
175 goto out;
176retry:
177 de = __f2fs_find_entry(dir, &fname, &page);
178 if (de && inode->i_ino == le32_to_cpu(de->ino))
179 goto out_put;
180
181 if (de) {
182 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
183 if (IS_ERR(einode)) {
184 WARN_ON(1);
185 err = PTR_ERR(einode);
186 if (err == -ENOENT)
187 err = -EEXIST;
188 goto out_put;
189 }
190
191 err = dquot_initialize(einode);
192 if (err) {
193 iput(einode);
194 goto out_put;
195 }
196
197 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
198 if (err) {
199 iput(einode);
200 goto out_put;
201 }
202 f2fs_delete_entry(de, page, dir, einode);
203 iput(einode);
204 goto retry;
205 } else if (IS_ERR(page)) {
206 err = PTR_ERR(page);
207 } else {
208 err = f2fs_add_dentry(dir, &fname, inode,
209 inode->i_ino, inode->i_mode);
210 }
211 if (err == -ENOMEM)
212 goto retry;
213 goto out;
214
215out_put:
216 f2fs_put_page(page, 0);
217out:
218 if (file_enc_name(inode))
219 name = "<encrypted>";
220 else
221 name = raw_inode->i_name;
222 f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
223 __func__, ino_of_node(ipage), name,
224 IS_ERR(dir) ? 0 : dir->i_ino, err);
225 return err;
226}
227
228static int recover_quota_data(struct inode *inode, struct page *page)
229{
230 struct f2fs_inode *raw = F2FS_INODE(page);
231 struct iattr attr;
232 uid_t i_uid = le32_to_cpu(raw->i_uid);
233 gid_t i_gid = le32_to_cpu(raw->i_gid);
234 int err;
235
236 memset(&attr, 0, sizeof(attr));
237
238 attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
239 attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
240
241 if (!uid_eq(attr.ia_uid, inode->i_uid))
242 attr.ia_valid |= ATTR_UID;
243 if (!gid_eq(attr.ia_gid, inode->i_gid))
244 attr.ia_valid |= ATTR_GID;
245
246 if (!attr.ia_valid)
247 return 0;
248
249 err = dquot_transfer(inode, &attr);
250 if (err)
251 set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
252 return err;
253}
254
255static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
256{
257 if (ri->i_inline & F2FS_PIN_FILE)
258 set_inode_flag(inode, FI_PIN_FILE);
259 else
260 clear_inode_flag(inode, FI_PIN_FILE);
261 if (ri->i_inline & F2FS_DATA_EXIST)
262 set_inode_flag(inode, FI_DATA_EXIST);
263 else
264 clear_inode_flag(inode, FI_DATA_EXIST);
265}
266
267static int recover_inode(struct inode *inode, struct page *page)
268{
269 struct f2fs_inode *raw = F2FS_INODE(page);
270 char *name;
271 int err;
272
273 inode->i_mode = le16_to_cpu(raw->i_mode);
274
275 err = recover_quota_data(inode, page);
276 if (err)
277 return err;
278
279 i_uid_write(inode, le32_to_cpu(raw->i_uid));
280 i_gid_write(inode, le32_to_cpu(raw->i_gid));
281
282 if (raw->i_inline & F2FS_EXTRA_ATTR) {
283 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
284 F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
285 i_projid)) {
286 projid_t i_projid;
287 kprojid_t kprojid;
288
289 i_projid = (projid_t)le32_to_cpu(raw->i_projid);
290 kprojid = make_kprojid(&init_user_ns, i_projid);
291
292 if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
293 err = f2fs_transfer_project_quota(inode,
294 kprojid);
295 if (err)
296 return err;
297 F2FS_I(inode)->i_projid = kprojid;
298 }
299 }
300 }
301
302 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
303 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
304 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
305 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
306 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
307 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
308 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
309
310 F2FS_I(inode)->i_advise = raw->i_advise;
311 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
312 f2fs_set_inode_flags(inode);
313 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
314 le16_to_cpu(raw->i_gc_failures);
315
316 recover_inline_flags(inode, raw);
317
318 f2fs_mark_inode_dirty_sync(inode, true);
319
320 if (file_enc_name(inode))
321 name = "<encrypted>";
322 else
323 name = F2FS_INODE(page)->i_name;
324
325 f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
326 ino_of_node(page), name, raw->i_inline);
327 return 0;
328}
329
330static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
331 bool check_only)
332{
333 struct curseg_info *curseg;
334 struct page *page = NULL;
335 block_t blkaddr;
336 unsigned int loop_cnt = 0;
337 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
338 valid_user_blocks(sbi);
339 int err = 0;
340
341
342 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
343 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
344
345 while (1) {
346 struct fsync_inode_entry *entry;
347
348 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
349 return 0;
350
351 page = f2fs_get_tmp_page(sbi, blkaddr);
352 if (IS_ERR(page)) {
353 err = PTR_ERR(page);
354 break;
355 }
356
357 if (!is_recoverable_dnode(page)) {
358 f2fs_put_page(page, 1);
359 break;
360 }
361
362 if (!is_fsync_dnode(page))
363 goto next;
364
365 entry = get_fsync_inode(head, ino_of_node(page));
366 if (!entry) {
367 bool quota_inode = false;
368
369 if (!check_only &&
370 IS_INODE(page) && is_dent_dnode(page)) {
371 err = f2fs_recover_inode_page(sbi, page);
372 if (err) {
373 f2fs_put_page(page, 1);
374 break;
375 }
376 quota_inode = true;
377 }
378
379
380
381
382
383 entry = add_fsync_inode(sbi, head, ino_of_node(page),
384 quota_inode);
385 if (IS_ERR(entry)) {
386 err = PTR_ERR(entry);
387 if (err == -ENOENT) {
388 err = 0;
389 goto next;
390 }
391 f2fs_put_page(page, 1);
392 break;
393 }
394 }
395 entry->blkaddr = blkaddr;
396
397 if (IS_INODE(page) && is_dent_dnode(page))
398 entry->last_dentry = blkaddr;
399next:
400
401 if (++loop_cnt >= free_blocks ||
402 blkaddr == next_blkaddr_of_node(page)) {
403 f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
404 __func__, blkaddr,
405 next_blkaddr_of_node(page));
406 f2fs_put_page(page, 1);
407 err = -EINVAL;
408 break;
409 }
410
411
412 blkaddr = next_blkaddr_of_node(page);
413 f2fs_put_page(page, 1);
414
415 f2fs_ra_meta_pages_cond(sbi, blkaddr);
416 }
417 return err;
418}
419
420static void destroy_fsync_dnodes(struct list_head *head, int drop)
421{
422 struct fsync_inode_entry *entry, *tmp;
423
424 list_for_each_entry_safe(entry, tmp, head, list)
425 del_fsync_inode(entry, drop);
426}
427
428static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
429 block_t blkaddr, struct dnode_of_data *dn)
430{
431 struct seg_entry *sentry;
432 unsigned int segno = GET_SEGNO(sbi, blkaddr);
433 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
434 struct f2fs_summary_block *sum_node;
435 struct f2fs_summary sum;
436 struct page *sum_page, *node_page;
437 struct dnode_of_data tdn = *dn;
438 nid_t ino, nid;
439 struct inode *inode;
440 unsigned int offset;
441 block_t bidx;
442 int i;
443
444 sentry = get_seg_entry(sbi, segno);
445 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
446 return 0;
447
448
449 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
450 struct curseg_info *curseg = CURSEG_I(sbi, i);
451 if (curseg->segno == segno) {
452 sum = curseg->sum_blk->entries[blkoff];
453 goto got_it;
454 }
455 }
456
457 sum_page = f2fs_get_sum_page(sbi, segno);
458 if (IS_ERR(sum_page))
459 return PTR_ERR(sum_page);
460 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
461 sum = sum_node->entries[blkoff];
462 f2fs_put_page(sum_page, 1);
463got_it:
464
465 nid = le32_to_cpu(sum.nid);
466 if (dn->inode->i_ino == nid) {
467 tdn.nid = nid;
468 if (!dn->inode_page_locked)
469 lock_page(dn->inode_page);
470 tdn.node_page = dn->inode_page;
471 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
472 goto truncate_out;
473 } else if (dn->nid == nid) {
474 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
475 goto truncate_out;
476 }
477
478
479 node_page = f2fs_get_node_page(sbi, nid);
480 if (IS_ERR(node_page))
481 return PTR_ERR(node_page);
482
483 offset = ofs_of_node(node_page);
484 ino = ino_of_node(node_page);
485 f2fs_put_page(node_page, 1);
486
487 if (ino != dn->inode->i_ino) {
488 int ret;
489
490
491 inode = f2fs_iget_retry(sbi->sb, ino);
492 if (IS_ERR(inode))
493 return PTR_ERR(inode);
494
495 ret = dquot_initialize(inode);
496 if (ret) {
497 iput(inode);
498 return ret;
499 }
500 } else {
501 inode = dn->inode;
502 }
503
504 bidx = f2fs_start_bidx_of_node(offset, inode) +
505 le16_to_cpu(sum.ofs_in_node);
506
507
508
509
510
511 if (ino == dn->inode->i_ino && dn->inode_page_locked)
512 unlock_page(dn->inode_page);
513
514 set_new_dnode(&tdn, inode, NULL, NULL, 0);
515 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
516 goto out;
517
518 if (tdn.data_blkaddr == blkaddr)
519 f2fs_truncate_data_blocks_range(&tdn, 1);
520
521 f2fs_put_dnode(&tdn);
522out:
523 if (ino != dn->inode->i_ino)
524 iput(inode);
525 else if (dn->inode_page_locked)
526 lock_page(dn->inode_page);
527 return 0;
528
529truncate_out:
530 if (f2fs_data_blkaddr(&tdn) == blkaddr)
531 f2fs_truncate_data_blocks_range(&tdn, 1);
532 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
533 unlock_page(dn->inode_page);
534 return 0;
535}
536
537static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
538 struct page *page)
539{
540 struct dnode_of_data dn;
541 struct node_info ni;
542 unsigned int start, end;
543 int err = 0, recovered = 0;
544
545
546 if (IS_INODE(page)) {
547 err = f2fs_recover_inline_xattr(inode, page);
548 if (err)
549 goto out;
550 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
551 err = f2fs_recover_xattr_data(inode, page);
552 if (!err)
553 recovered++;
554 goto out;
555 }
556
557
558 err = f2fs_recover_inline_data(inode, page);
559 if (err) {
560 if (err == 1)
561 err = 0;
562 goto out;
563 }
564
565
566 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
567 end = start + ADDRS_PER_PAGE(page, inode);
568
569 set_new_dnode(&dn, inode, NULL, NULL, 0);
570retry_dn:
571 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
572 if (err) {
573 if (err == -ENOMEM) {
574 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
575 goto retry_dn;
576 }
577 goto out;
578 }
579
580 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
581
582 err = f2fs_get_node_info(sbi, dn.nid, &ni);
583 if (err)
584 goto err;
585
586 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
587
588 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
589 f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
590 inode->i_ino, ofs_of_node(dn.node_page),
591 ofs_of_node(page));
592 err = -EFSCORRUPTED;
593 goto err;
594 }
595
596 for (; start < end; start++, dn.ofs_in_node++) {
597 block_t src, dest;
598
599 src = f2fs_data_blkaddr(&dn);
600 dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
601
602 if (__is_valid_data_blkaddr(src) &&
603 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
604 err = -EFSCORRUPTED;
605 goto err;
606 }
607
608 if (__is_valid_data_blkaddr(dest) &&
609 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
610 err = -EFSCORRUPTED;
611 goto err;
612 }
613
614
615 if (src == dest)
616 continue;
617
618
619 if (dest == NULL_ADDR) {
620 f2fs_truncate_data_blocks_range(&dn, 1);
621 continue;
622 }
623
624 if (!file_keep_isize(inode) &&
625 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
626 f2fs_i_size_write(inode,
627 (loff_t)(start + 1) << PAGE_SHIFT);
628
629
630
631
632
633 if (dest == NEW_ADDR) {
634 f2fs_truncate_data_blocks_range(&dn, 1);
635 f2fs_reserve_new_block(&dn);
636 continue;
637 }
638
639
640 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
641
642 if (src == NULL_ADDR) {
643 err = f2fs_reserve_new_block(&dn);
644 while (err &&
645 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
646 err = f2fs_reserve_new_block(&dn);
647
648 f2fs_bug_on(sbi, err);
649 if (err)
650 goto err;
651 }
652retry_prev:
653
654 err = check_index_in_prev_nodes(sbi, dest, &dn);
655 if (err) {
656 if (err == -ENOMEM) {
657 congestion_wait(BLK_RW_ASYNC,
658 DEFAULT_IO_TIMEOUT);
659 goto retry_prev;
660 }
661 goto err;
662 }
663
664
665 f2fs_replace_block(sbi, &dn, src, dest,
666 ni.version, false, false);
667 recovered++;
668 }
669 }
670
671 copy_node_footer(dn.node_page, page);
672 fill_node_footer(dn.node_page, dn.nid, ni.ino,
673 ofs_of_node(page), false);
674 set_page_dirty(dn.node_page);
675err:
676 f2fs_put_dnode(&dn);
677out:
678 f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
679 inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
680 recovered, err);
681 return err;
682}
683
684static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
685 struct list_head *tmp_inode_list, struct list_head *dir_list)
686{
687 struct curseg_info *curseg;
688 struct page *page = NULL;
689 int err = 0;
690 block_t blkaddr;
691
692
693 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
694 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
695
696 while (1) {
697 struct fsync_inode_entry *entry;
698
699 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
700 break;
701
702 f2fs_ra_meta_pages_cond(sbi, blkaddr);
703
704 page = f2fs_get_tmp_page(sbi, blkaddr);
705 if (IS_ERR(page)) {
706 err = PTR_ERR(page);
707 break;
708 }
709
710 if (!is_recoverable_dnode(page)) {
711 f2fs_put_page(page, 1);
712 break;
713 }
714
715 entry = get_fsync_inode(inode_list, ino_of_node(page));
716 if (!entry)
717 goto next;
718
719
720
721
722
723 if (IS_INODE(page)) {
724 err = recover_inode(entry->inode, page);
725 if (err) {
726 f2fs_put_page(page, 1);
727 break;
728 }
729 }
730 if (entry->last_dentry == blkaddr) {
731 err = recover_dentry(entry->inode, page, dir_list);
732 if (err) {
733 f2fs_put_page(page, 1);
734 break;
735 }
736 }
737 err = do_recover_data(sbi, entry->inode, page);
738 if (err) {
739 f2fs_put_page(page, 1);
740 break;
741 }
742
743 if (entry->blkaddr == blkaddr)
744 list_move_tail(&entry->list, tmp_inode_list);
745next:
746
747 blkaddr = next_blkaddr_of_node(page);
748 f2fs_put_page(page, 1);
749 }
750 if (!err)
751 f2fs_allocate_new_segments(sbi);
752 return err;
753}
754
755int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
756{
757 struct list_head inode_list, tmp_inode_list;
758 struct list_head dir_list;
759 int err;
760 int ret = 0;
761 unsigned long s_flags = sbi->sb->s_flags;
762 bool need_writecp = false;
763 bool fix_curseg_write_pointer = false;
764#ifdef CONFIG_QUOTA
765 int quota_enabled;
766#endif
767
768 if (s_flags & SB_RDONLY) {
769 f2fs_info(sbi, "recover fsync data on readonly fs");
770 sbi->sb->s_flags &= ~SB_RDONLY;
771 }
772
773#ifdef CONFIG_QUOTA
774
775 sbi->sb->s_flags |= SB_ACTIVE;
776
777 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
778#endif
779
780 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
781 sizeof(struct fsync_inode_entry));
782 if (!fsync_entry_slab) {
783 err = -ENOMEM;
784 goto out;
785 }
786
787 INIT_LIST_HEAD(&inode_list);
788 INIT_LIST_HEAD(&tmp_inode_list);
789 INIT_LIST_HEAD(&dir_list);
790
791
792 mutex_lock(&sbi->cp_mutex);
793
794
795 err = find_fsync_dnodes(sbi, &inode_list, check_only);
796 if (err || list_empty(&inode_list))
797 goto skip;
798
799 if (check_only) {
800 ret = 1;
801 goto skip;
802 }
803
804 need_writecp = true;
805
806
807 err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
808 if (!err)
809 f2fs_bug_on(sbi, !list_empty(&inode_list));
810 else {
811
812 sbi->sb->s_flags = s_flags;
813 }
814skip:
815 fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
816
817 destroy_fsync_dnodes(&inode_list, err);
818 destroy_fsync_dnodes(&tmp_inode_list, err);
819
820
821 truncate_inode_pages_range(META_MAPPING(sbi),
822 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
823
824 if (err) {
825 truncate_inode_pages_final(NODE_MAPPING(sbi));
826 truncate_inode_pages_final(META_MAPPING(sbi));
827 }
828
829
830
831
832
833
834 if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
835 f2fs_sb_has_blkzoned(sbi)) {
836 err = f2fs_fix_curseg_write_pointer(sbi);
837 ret = err;
838 }
839
840 if (!err)
841 clear_sbi_flag(sbi, SBI_POR_DOING);
842
843 mutex_unlock(&sbi->cp_mutex);
844
845
846 destroy_fsync_dnodes(&dir_list, err);
847
848 if (need_writecp) {
849 set_sbi_flag(sbi, SBI_IS_RECOVERED);
850
851 if (!err) {
852 struct cp_control cpc = {
853 .reason = CP_RECOVERY,
854 };
855 err = f2fs_write_checkpoint(sbi, &cpc);
856 }
857 }
858
859 kmem_cache_destroy(fsync_entry_slab);
860out:
861#ifdef CONFIG_QUOTA
862
863 if (quota_enabled)
864 f2fs_quota_off_umount(sbi->sb);
865#endif
866 sbi->sb->s_flags = s_flags;
867
868 return ret ? ret: err;
869}
870