linux/fs/f2fs/checkpoint.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/checkpoint.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/bio.h>
  13#include <linux/mpage.h>
  14#include <linux/writeback.h>
  15#include <linux/blkdev.h>
  16#include <linux/f2fs_fs.h>
  17#include <linux/pagevec.h>
  18#include <linux/swap.h>
  19
  20#include "f2fs.h"
  21#include "node.h"
  22#include "segment.h"
  23#include "trace.h"
  24#include <trace/events/f2fs.h>
  25
  26static struct kmem_cache *ino_entry_slab;
  27struct kmem_cache *inode_entry_slab;
  28
  29/*
  30 * We guarantee no failure on the returned page.
  31 */
  32struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
  33{
  34        struct address_space *mapping = META_MAPPING(sbi);
  35        struct page *page = NULL;
  36repeat:
  37        page = grab_cache_page(mapping, index);
  38        if (!page) {
  39                cond_resched();
  40                goto repeat;
  41        }
  42        f2fs_wait_on_page_writeback(page, META);
  43        SetPageUptodate(page);
  44        return page;
  45}
  46
  47/*
  48 * We guarantee no failure on the returned page.
  49 */
  50struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
  51{
  52        struct address_space *mapping = META_MAPPING(sbi);
  53        struct page *page;
  54        struct f2fs_io_info fio = {
  55                .sbi = sbi,
  56                .type = META,
  57                .rw = READ_SYNC | REQ_META | REQ_PRIO,
  58                .blk_addr = index,
  59                .encrypted_page = NULL,
  60        };
  61repeat:
  62        page = grab_cache_page(mapping, index);
  63        if (!page) {
  64                cond_resched();
  65                goto repeat;
  66        }
  67        if (PageUptodate(page))
  68                goto out;
  69
  70        fio.page = page;
  71
  72        if (f2fs_submit_page_bio(&fio)) {
  73                f2fs_put_page(page, 1);
  74                goto repeat;
  75        }
  76
  77        lock_page(page);
  78        if (unlikely(page->mapping != mapping)) {
  79                f2fs_put_page(page, 1);
  80                goto repeat;
  81        }
  82
  83        /*
  84         * if there is any IO error when accessing device, make our filesystem
  85         * readonly and make sure do not write checkpoint with non-uptodate
  86         * meta page.
  87         */
  88        if (unlikely(!PageUptodate(page)))
  89                f2fs_stop_checkpoint(sbi);
  90out:
  91        return page;
  92}
  93
  94bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
  95{
  96        switch (type) {
  97        case META_NAT:
  98                break;
  99        case META_SIT:
 100                if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
 101                        return false;
 102                break;
 103        case META_SSA:
 104                if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
 105                        blkaddr < SM_I(sbi)->ssa_blkaddr))
 106                        return false;
 107                break;
 108        case META_CP:
 109                if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
 110                        blkaddr < __start_cp_addr(sbi)))
 111                        return false;
 112                break;
 113        case META_POR:
 114                if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
 115                        blkaddr < MAIN_BLKADDR(sbi)))
 116                        return false;
 117                break;
 118        default:
 119                BUG();
 120        }
 121
 122        return true;
 123}
 124
 125/*
 126 * Readahead CP/NAT/SIT/SSA pages
 127 */
 128int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type)
 129{
 130        block_t prev_blk_addr = 0;
 131        struct page *page;
 132        block_t blkno = start;
 133        struct f2fs_io_info fio = {
 134                .sbi = sbi,
 135                .type = META,
 136                .rw = READ_SYNC | REQ_META | REQ_PRIO,
 137                .encrypted_page = NULL,
 138        };
 139
 140        for (; nrpages-- > 0; blkno++) {
 141
 142                if (!is_valid_blkaddr(sbi, blkno, type))
 143                        goto out;
 144
 145                switch (type) {
 146                case META_NAT:
 147                        if (unlikely(blkno >=
 148                                        NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
 149                                blkno = 0;
 150                        /* get nat block addr */
 151                        fio.blk_addr = current_nat_addr(sbi,
 152                                        blkno * NAT_ENTRY_PER_BLOCK);
 153                        break;
 154                case META_SIT:
 155                        /* get sit block addr */
 156                        fio.blk_addr = current_sit_addr(sbi,
 157                                        blkno * SIT_ENTRY_PER_BLOCK);
 158                        if (blkno != start && prev_blk_addr + 1 != fio.blk_addr)
 159                                goto out;
 160                        prev_blk_addr = fio.blk_addr;
 161                        break;
 162                case META_SSA:
 163                case META_CP:
 164                case META_POR:
 165                        fio.blk_addr = blkno;
 166                        break;
 167                default:
 168                        BUG();
 169                }
 170
 171                page = grab_cache_page(META_MAPPING(sbi), fio.blk_addr);
 172                if (!page)
 173                        continue;
 174                if (PageUptodate(page)) {
 175                        f2fs_put_page(page, 1);
 176                        continue;
 177                }
 178
 179                fio.page = page;
 180                f2fs_submit_page_mbio(&fio);
 181                f2fs_put_page(page, 0);
 182        }
 183out:
 184        f2fs_submit_merged_bio(sbi, META, READ);
 185        return blkno - start;
 186}
 187
 188void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
 189{
 190        struct page *page;
 191        bool readahead = false;
 192
 193        page = find_get_page(META_MAPPING(sbi), index);
 194        if (!page || (page && !PageUptodate(page)))
 195                readahead = true;
 196        f2fs_put_page(page, 0);
 197
 198        if (readahead)
 199                ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR);
 200}
 201
 202static int f2fs_write_meta_page(struct page *page,
 203                                struct writeback_control *wbc)
 204{
 205        struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 206
 207        trace_f2fs_writepage(page, META);
 208
 209        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 210                goto redirty_out;
 211        if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
 212                goto redirty_out;
 213        if (unlikely(f2fs_cp_error(sbi)))
 214                goto redirty_out;
 215
 216        f2fs_wait_on_page_writeback(page, META);
 217        write_meta_page(sbi, page);
 218        dec_page_count(sbi, F2FS_DIRTY_META);
 219        unlock_page(page);
 220
 221        if (wbc->for_reclaim)
 222                f2fs_submit_merged_bio(sbi, META, WRITE);
 223        return 0;
 224
 225redirty_out:
 226        redirty_page_for_writepage(wbc, page);
 227        return AOP_WRITEPAGE_ACTIVATE;
 228}
 229
 230static int f2fs_write_meta_pages(struct address_space *mapping,
 231                                struct writeback_control *wbc)
 232{
 233        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
 234        long diff, written;
 235
 236        trace_f2fs_writepages(mapping->host, wbc, META);
 237
 238        /* collect a number of dirty meta pages and write together */
 239        if (wbc->for_kupdate ||
 240                get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
 241                goto skip_write;
 242
 243        /* if mounting is failed, skip writing node pages */
 244        mutex_lock(&sbi->cp_mutex);
 245        diff = nr_pages_to_write(sbi, META, wbc);
 246        written = sync_meta_pages(sbi, META, wbc->nr_to_write);
 247        mutex_unlock(&sbi->cp_mutex);
 248        wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
 249        return 0;
 250
 251skip_write:
 252        wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
 253        return 0;
 254}
 255
 256long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
 257                                                long nr_to_write)
 258{
 259        struct address_space *mapping = META_MAPPING(sbi);
 260        pgoff_t index = 0, end = LONG_MAX;
 261        struct pagevec pvec;
 262        long nwritten = 0;
 263        struct writeback_control wbc = {
 264                .for_reclaim = 0,
 265        };
 266
 267        pagevec_init(&pvec, 0);
 268
 269        while (index <= end) {
 270                int i, nr_pages;
 271                nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
 272                                PAGECACHE_TAG_DIRTY,
 273                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
 274                if (unlikely(nr_pages == 0))
 275                        break;
 276
 277                for (i = 0; i < nr_pages; i++) {
 278                        struct page *page = pvec.pages[i];
 279
 280                        lock_page(page);
 281
 282                        if (unlikely(page->mapping != mapping)) {
 283continue_unlock:
 284                                unlock_page(page);
 285                                continue;
 286                        }
 287                        if (!PageDirty(page)) {
 288                                /* someone wrote it for us */
 289                                goto continue_unlock;
 290                        }
 291
 292                        if (!clear_page_dirty_for_io(page))
 293                                goto continue_unlock;
 294
 295                        if (mapping->a_ops->writepage(page, &wbc)) {
 296                                unlock_page(page);
 297                                break;
 298                        }
 299                        nwritten++;
 300                        if (unlikely(nwritten >= nr_to_write))
 301                                break;
 302                }
 303                pagevec_release(&pvec);
 304                cond_resched();
 305        }
 306
 307        if (nwritten)
 308                f2fs_submit_merged_bio(sbi, type, WRITE);
 309
 310        return nwritten;
 311}
 312
 313static int f2fs_set_meta_page_dirty(struct page *page)
 314{
 315        trace_f2fs_set_page_dirty(page, META);
 316
 317        SetPageUptodate(page);
 318        if (!PageDirty(page)) {
 319                __set_page_dirty_nobuffers(page);
 320                inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
 321                SetPagePrivate(page);
 322                f2fs_trace_pid(page);
 323                return 1;
 324        }
 325        return 0;
 326}
 327
 328const struct address_space_operations f2fs_meta_aops = {
 329        .writepage      = f2fs_write_meta_page,
 330        .writepages     = f2fs_write_meta_pages,
 331        .set_page_dirty = f2fs_set_meta_page_dirty,
 332        .invalidatepage = f2fs_invalidate_page,
 333        .releasepage    = f2fs_release_page,
 334};
 335
 336static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 337{
 338        struct inode_management *im = &sbi->im[type];
 339        struct ino_entry *e, *tmp;
 340
 341        tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
 342retry:
 343        radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
 344
 345        spin_lock(&im->ino_lock);
 346        e = radix_tree_lookup(&im->ino_root, ino);
 347        if (!e) {
 348                e = tmp;
 349                if (radix_tree_insert(&im->ino_root, ino, e)) {
 350                        spin_unlock(&im->ino_lock);
 351                        radix_tree_preload_end();
 352                        goto retry;
 353                }
 354                memset(e, 0, sizeof(struct ino_entry));
 355                e->ino = ino;
 356
 357                list_add_tail(&e->list, &im->ino_list);
 358                if (type != ORPHAN_INO)
 359                        im->ino_num++;
 360        }
 361        spin_unlock(&im->ino_lock);
 362        radix_tree_preload_end();
 363
 364        if (e != tmp)
 365                kmem_cache_free(ino_entry_slab, tmp);
 366}
 367
 368static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 369{
 370        struct inode_management *im = &sbi->im[type];
 371        struct ino_entry *e;
 372
 373        spin_lock(&im->ino_lock);
 374        e = radix_tree_lookup(&im->ino_root, ino);
 375        if (e) {
 376                list_del(&e->list);
 377                radix_tree_delete(&im->ino_root, ino);
 378                im->ino_num--;
 379                spin_unlock(&im->ino_lock);
 380                kmem_cache_free(ino_entry_slab, e);
 381                return;
 382        }
 383        spin_unlock(&im->ino_lock);
 384}
 385
 386void add_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
 387{
 388        /* add new dirty ino entry into list */
 389        __add_ino_entry(sbi, ino, type);
 390}
 391
 392void remove_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
 393{
 394        /* remove dirty ino entry from list */
 395        __remove_ino_entry(sbi, ino, type);
 396}
 397
 398/* mode should be APPEND_INO or UPDATE_INO */
 399bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
 400{
 401        struct inode_management *im = &sbi->im[mode];
 402        struct ino_entry *e;
 403
 404        spin_lock(&im->ino_lock);
 405        e = radix_tree_lookup(&im->ino_root, ino);
 406        spin_unlock(&im->ino_lock);
 407        return e ? true : false;
 408}
 409
 410void release_dirty_inode(struct f2fs_sb_info *sbi)
 411{
 412        struct ino_entry *e, *tmp;
 413        int i;
 414
 415        for (i = APPEND_INO; i <= UPDATE_INO; i++) {
 416                struct inode_management *im = &sbi->im[i];
 417
 418                spin_lock(&im->ino_lock);
 419                list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
 420                        list_del(&e->list);
 421                        radix_tree_delete(&im->ino_root, e->ino);
 422                        kmem_cache_free(ino_entry_slab, e);
 423                        im->ino_num--;
 424                }
 425                spin_unlock(&im->ino_lock);
 426        }
 427}
 428
 429int acquire_orphan_inode(struct f2fs_sb_info *sbi)
 430{
 431        struct inode_management *im = &sbi->im[ORPHAN_INO];
 432        int err = 0;
 433
 434        spin_lock(&im->ino_lock);
 435        if (unlikely(im->ino_num >= sbi->max_orphans))
 436                err = -ENOSPC;
 437        else
 438                im->ino_num++;
 439        spin_unlock(&im->ino_lock);
 440
 441        return err;
 442}
 443
 444void release_orphan_inode(struct f2fs_sb_info *sbi)
 445{
 446        struct inode_management *im = &sbi->im[ORPHAN_INO];
 447
 448        spin_lock(&im->ino_lock);
 449        f2fs_bug_on(sbi, im->ino_num == 0);
 450        im->ino_num--;
 451        spin_unlock(&im->ino_lock);
 452}
 453
 454void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 455{
 456        /* add new orphan ino entry into list */
 457        __add_ino_entry(sbi, ino, ORPHAN_INO);
 458}
 459
 460void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 461{
 462        /* remove orphan entry from orphan list */
 463        __remove_ino_entry(sbi, ino, ORPHAN_INO);
 464}
 465
 466static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 467{
 468        struct inode *inode;
 469
 470        inode = f2fs_iget(sbi->sb, ino);
 471        if (IS_ERR(inode)) {
 472                /*
 473                 * there should be a bug that we can't find the entry
 474                 * to orphan inode.
 475                 */
 476                f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
 477                return PTR_ERR(inode);
 478        }
 479
 480        clear_nlink(inode);
 481
 482        /* truncate all the data during iput */
 483        iput(inode);
 484        return 0;
 485}
 486
 487int recover_orphan_inodes(struct f2fs_sb_info *sbi)
 488{
 489        block_t start_blk, orphan_blocks, i, j;
 490        int err;
 491
 492        if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
 493                return 0;
 494
 495        start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
 496        orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
 497
 498        ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP);
 499
 500        for (i = 0; i < orphan_blocks; i++) {
 501                struct page *page = get_meta_page(sbi, start_blk + i);
 502                struct f2fs_orphan_block *orphan_blk;
 503
 504                orphan_blk = (struct f2fs_orphan_block *)page_address(page);
 505                for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
 506                        nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
 507                        err = recover_orphan_inode(sbi, ino);
 508                        if (err) {
 509                                f2fs_put_page(page, 1);
 510                                return err;
 511                        }
 512                }
 513                f2fs_put_page(page, 1);
 514        }
 515        /* clear Orphan Flag */
 516        clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
 517        return 0;
 518}
 519
 520static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
 521{
 522        struct list_head *head;
 523        struct f2fs_orphan_block *orphan_blk = NULL;
 524        unsigned int nentries = 0;
 525        unsigned short index = 1;
 526        unsigned short orphan_blocks;
 527        struct page *page = NULL;
 528        struct ino_entry *orphan = NULL;
 529        struct inode_management *im = &sbi->im[ORPHAN_INO];
 530
 531        orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
 532
 533        /*
 534         * we don't need to do spin_lock(&im->ino_lock) here, since all the
 535         * orphan inode operations are covered under f2fs_lock_op().
 536         * And, spin_lock should be avoided due to page operations below.
 537         */
 538        head = &im->ino_list;
 539
 540        /* loop for each orphan inode entry and write them in Jornal block */
 541        list_for_each_entry(orphan, head, list) {
 542                if (!page) {
 543                        page = grab_meta_page(sbi, start_blk++);
 544                        orphan_blk =
 545                                (struct f2fs_orphan_block *)page_address(page);
 546                        memset(orphan_blk, 0, sizeof(*orphan_blk));
 547                }
 548
 549                orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
 550
 551                if (nentries == F2FS_ORPHANS_PER_BLOCK) {
 552                        /*
 553                         * an orphan block is full of 1020 entries,
 554                         * then we need to flush current orphan blocks
 555                         * and bring another one in memory
 556                         */
 557                        orphan_blk->blk_addr = cpu_to_le16(index);
 558                        orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
 559                        orphan_blk->entry_count = cpu_to_le32(nentries);
 560                        set_page_dirty(page);
 561                        f2fs_put_page(page, 1);
 562                        index++;
 563                        nentries = 0;
 564                        page = NULL;
 565                }
 566        }
 567
 568        if (page) {
 569                orphan_blk->blk_addr = cpu_to_le16(index);
 570                orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
 571                orphan_blk->entry_count = cpu_to_le32(nentries);
 572                set_page_dirty(page);
 573                f2fs_put_page(page, 1);
 574        }
 575}
 576
 577static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
 578                                block_t cp_addr, unsigned long long *version)
 579{
 580        struct page *cp_page_1, *cp_page_2 = NULL;
 581        unsigned long blk_size = sbi->blocksize;
 582        struct f2fs_checkpoint *cp_block;
 583        unsigned long long cur_version = 0, pre_version = 0;
 584        size_t crc_offset;
 585        __u32 crc = 0;
 586
 587        /* Read the 1st cp block in this CP pack */
 588        cp_page_1 = get_meta_page(sbi, cp_addr);
 589
 590        /* get the version number */
 591        cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
 592        crc_offset = le32_to_cpu(cp_block->checksum_offset);
 593        if (crc_offset >= blk_size)
 594                goto invalid_cp1;
 595
 596        crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
 597        if (!f2fs_crc_valid(crc, cp_block, crc_offset))
 598                goto invalid_cp1;
 599
 600        pre_version = cur_cp_version(cp_block);
 601
 602        /* Read the 2nd cp block in this CP pack */
 603        cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
 604        cp_page_2 = get_meta_page(sbi, cp_addr);
 605
 606        cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
 607        crc_offset = le32_to_cpu(cp_block->checksum_offset);
 608        if (crc_offset >= blk_size)
 609                goto invalid_cp2;
 610
 611        crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
 612        if (!f2fs_crc_valid(crc, cp_block, crc_offset))
 613                goto invalid_cp2;
 614
 615        cur_version = cur_cp_version(cp_block);
 616
 617        if (cur_version == pre_version) {
 618                *version = cur_version;
 619                f2fs_put_page(cp_page_2, 1);
 620                return cp_page_1;
 621        }
 622invalid_cp2:
 623        f2fs_put_page(cp_page_2, 1);
 624invalid_cp1:
 625        f2fs_put_page(cp_page_1, 1);
 626        return NULL;
 627}
 628
 629int get_valid_checkpoint(struct f2fs_sb_info *sbi)
 630{
 631        struct f2fs_checkpoint *cp_block;
 632        struct f2fs_super_block *fsb = sbi->raw_super;
 633        struct page *cp1, *cp2, *cur_page;
 634        unsigned long blk_size = sbi->blocksize;
 635        unsigned long long cp1_version = 0, cp2_version = 0;
 636        unsigned long long cp_start_blk_no;
 637        unsigned int cp_blks = 1 + __cp_payload(sbi);
 638        block_t cp_blk_no;
 639        int i;
 640
 641        sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
 642        if (!sbi->ckpt)
 643                return -ENOMEM;
 644        /*
 645         * Finding out valid cp block involves read both
 646         * sets( cp pack1 and cp pack 2)
 647         */
 648        cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
 649        cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
 650
 651        /* The second checkpoint pack should start at the next segment */
 652        cp_start_blk_no += ((unsigned long long)1) <<
 653                                le32_to_cpu(fsb->log_blocks_per_seg);
 654        cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
 655
 656        if (cp1 && cp2) {
 657                if (ver_after(cp2_version, cp1_version))
 658                        cur_page = cp2;
 659                else
 660                        cur_page = cp1;
 661        } else if (cp1) {
 662                cur_page = cp1;
 663        } else if (cp2) {
 664                cur_page = cp2;
 665        } else {
 666                goto fail_no_cp;
 667        }
 668
 669        cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
 670        memcpy(sbi->ckpt, cp_block, blk_size);
 671
 672        if (cp_blks <= 1)
 673                goto done;
 674
 675        cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
 676        if (cur_page == cp2)
 677                cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
 678
 679        for (i = 1; i < cp_blks; i++) {
 680                void *sit_bitmap_ptr;
 681                unsigned char *ckpt = (unsigned char *)sbi->ckpt;
 682
 683                cur_page = get_meta_page(sbi, cp_blk_no + i);
 684                sit_bitmap_ptr = page_address(cur_page);
 685                memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
 686                f2fs_put_page(cur_page, 1);
 687        }
 688done:
 689        f2fs_put_page(cp1, 1);
 690        f2fs_put_page(cp2, 1);
 691        return 0;
 692
 693fail_no_cp:
 694        kfree(sbi->ckpt);
 695        return -EINVAL;
 696}
 697
 698static int __add_dirty_inode(struct inode *inode, struct inode_entry *new)
 699{
 700        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 701
 702        if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR))
 703                return -EEXIST;
 704
 705        set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
 706        F2FS_I(inode)->dirty_dir = new;
 707        list_add_tail(&new->list, &sbi->dir_inode_list);
 708        stat_inc_dirty_dir(sbi);
 709        return 0;
 710}
 711
 712void update_dirty_page(struct inode *inode, struct page *page)
 713{
 714        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 715        struct inode_entry *new;
 716        int ret = 0;
 717
 718        if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
 719                        !S_ISLNK(inode->i_mode))
 720                return;
 721
 722        if (!S_ISDIR(inode->i_mode)) {
 723                inode_inc_dirty_pages(inode);
 724                goto out;
 725        }
 726
 727        new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
 728        new->inode = inode;
 729        INIT_LIST_HEAD(&new->list);
 730
 731        spin_lock(&sbi->dir_inode_lock);
 732        ret = __add_dirty_inode(inode, new);
 733        inode_inc_dirty_pages(inode);
 734        spin_unlock(&sbi->dir_inode_lock);
 735
 736        if (ret)
 737                kmem_cache_free(inode_entry_slab, new);
 738out:
 739        SetPagePrivate(page);
 740        f2fs_trace_pid(page);
 741}
 742
 743void add_dirty_dir_inode(struct inode *inode)
 744{
 745        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 746        struct inode_entry *new =
 747                        f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
 748        int ret = 0;
 749
 750        new->inode = inode;
 751        INIT_LIST_HEAD(&new->list);
 752
 753        spin_lock(&sbi->dir_inode_lock);
 754        ret = __add_dirty_inode(inode, new);
 755        spin_unlock(&sbi->dir_inode_lock);
 756
 757        if (ret)
 758                kmem_cache_free(inode_entry_slab, new);
 759}
 760
 761void remove_dirty_dir_inode(struct inode *inode)
 762{
 763        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 764        struct inode_entry *entry;
 765
 766        if (!S_ISDIR(inode->i_mode))
 767                return;
 768
 769        spin_lock(&sbi->dir_inode_lock);
 770        if (get_dirty_pages(inode) ||
 771                        !is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) {
 772                spin_unlock(&sbi->dir_inode_lock);
 773                return;
 774        }
 775
 776        entry = F2FS_I(inode)->dirty_dir;
 777        list_del(&entry->list);
 778        F2FS_I(inode)->dirty_dir = NULL;
 779        clear_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
 780        stat_dec_dirty_dir(sbi);
 781        spin_unlock(&sbi->dir_inode_lock);
 782        kmem_cache_free(inode_entry_slab, entry);
 783
 784        /* Only from the recovery routine */
 785        if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
 786                clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
 787                iput(inode);
 788        }
 789}
 790
 791void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
 792{
 793        struct list_head *head;
 794        struct inode_entry *entry;
 795        struct inode *inode;
 796retry:
 797        if (unlikely(f2fs_cp_error(sbi)))
 798                return;
 799
 800        spin_lock(&sbi->dir_inode_lock);
 801
 802        head = &sbi->dir_inode_list;
 803        if (list_empty(head)) {
 804                spin_unlock(&sbi->dir_inode_lock);
 805                return;
 806        }
 807        entry = list_entry(head->next, struct inode_entry, list);
 808        inode = igrab(entry->inode);
 809        spin_unlock(&sbi->dir_inode_lock);
 810        if (inode) {
 811                filemap_fdatawrite(inode->i_mapping);
 812                iput(inode);
 813        } else {
 814                /*
 815                 * We should submit bio, since it exists several
 816                 * wribacking dentry pages in the freeing inode.
 817                 */
 818                f2fs_submit_merged_bio(sbi, DATA, WRITE);
 819                cond_resched();
 820        }
 821        goto retry;
 822}
 823
 824/*
 825 * Freeze all the FS-operations for checkpoint.
 826 */
 827static int block_operations(struct f2fs_sb_info *sbi)
 828{
 829        struct writeback_control wbc = {
 830                .sync_mode = WB_SYNC_ALL,
 831                .nr_to_write = LONG_MAX,
 832                .for_reclaim = 0,
 833        };
 834        struct blk_plug plug;
 835        int err = 0;
 836
 837        blk_start_plug(&plug);
 838
 839retry_flush_dents:
 840        f2fs_lock_all(sbi);
 841        /* write all the dirty dentry pages */
 842        if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
 843                f2fs_unlock_all(sbi);
 844                sync_dirty_dir_inodes(sbi);
 845                if (unlikely(f2fs_cp_error(sbi))) {
 846                        err = -EIO;
 847                        goto out;
 848                }
 849                goto retry_flush_dents;
 850        }
 851
 852        /*
 853         * POR: we should ensure that there are no dirty node pages
 854         * until finishing nat/sit flush.
 855         */
 856retry_flush_nodes:
 857        down_write(&sbi->node_write);
 858
 859        if (get_pages(sbi, F2FS_DIRTY_NODES)) {
 860                up_write(&sbi->node_write);
 861                sync_node_pages(sbi, 0, &wbc);
 862                if (unlikely(f2fs_cp_error(sbi))) {
 863                        f2fs_unlock_all(sbi);
 864                        err = -EIO;
 865                        goto out;
 866                }
 867                goto retry_flush_nodes;
 868        }
 869out:
 870        blk_finish_plug(&plug);
 871        return err;
 872}
 873
 874static void unblock_operations(struct f2fs_sb_info *sbi)
 875{
 876        up_write(&sbi->node_write);
 877        f2fs_unlock_all(sbi);
 878}
 879
 880static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
 881{
 882        DEFINE_WAIT(wait);
 883
 884        for (;;) {
 885                prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
 886
 887                if (!get_pages(sbi, F2FS_WRITEBACK))
 888                        break;
 889
 890                io_schedule();
 891        }
 892        finish_wait(&sbi->cp_wait, &wait);
 893}
 894
 895static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 896{
 897        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 898        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
 899        struct f2fs_nm_info *nm_i = NM_I(sbi);
 900        unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
 901        nid_t last_nid = nm_i->next_scan_nid;
 902        block_t start_blk;
 903        unsigned int data_sum_blocks, orphan_blocks;
 904        __u32 crc32 = 0;
 905        int i;
 906        int cp_payload_blks = __cp_payload(sbi);
 907        block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
 908        bool invalidate = false;
 909
 910        /*
 911         * This avoids to conduct wrong roll-forward operations and uses
 912         * metapages, so should be called prior to sync_meta_pages below.
 913         */
 914        if (discard_next_dnode(sbi, discard_blk))
 915                invalidate = true;
 916
 917        /* Flush all the NAT/SIT pages */
 918        while (get_pages(sbi, F2FS_DIRTY_META)) {
 919                sync_meta_pages(sbi, META, LONG_MAX);
 920                if (unlikely(f2fs_cp_error(sbi)))
 921                        return;
 922        }
 923
 924        next_free_nid(sbi, &last_nid);
 925
 926        /*
 927         * modify checkpoint
 928         * version number is already updated
 929         */
 930        ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
 931        ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
 932        ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
 933        for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
 934                ckpt->cur_node_segno[i] =
 935                        cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
 936                ckpt->cur_node_blkoff[i] =
 937                        cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
 938                ckpt->alloc_type[i + CURSEG_HOT_NODE] =
 939                                curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
 940        }
 941        for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
 942                ckpt->cur_data_segno[i] =
 943                        cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
 944                ckpt->cur_data_blkoff[i] =
 945                        cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
 946                ckpt->alloc_type[i + CURSEG_HOT_DATA] =
 947                                curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
 948        }
 949
 950        ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
 951        ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
 952        ckpt->next_free_nid = cpu_to_le32(last_nid);
 953
 954        /* 2 cp  + n data seg summary + orphan inode blocks */
 955        data_sum_blocks = npages_for_summary_flush(sbi, false);
 956        if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
 957                set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
 958        else
 959                clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
 960
 961        orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
 962        ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
 963                        orphan_blocks);
 964
 965        if (__remain_node_summaries(cpc->reason))
 966                ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
 967                                cp_payload_blks + data_sum_blocks +
 968                                orphan_blocks + NR_CURSEG_NODE_TYPE);
 969        else
 970                ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
 971                                cp_payload_blks + data_sum_blocks +
 972                                orphan_blocks);
 973
 974        if (cpc->reason == CP_UMOUNT)
 975                set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
 976        else
 977                clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
 978
 979        if (cpc->reason == CP_FASTBOOT)
 980                set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
 981        else
 982                clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
 983
 984        if (orphan_num)
 985                set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
 986        else
 987                clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
 988
 989        if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
 990                set_ckpt_flags(ckpt, CP_FSCK_FLAG);
 991
 992        /* update SIT/NAT bitmap */
 993        get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
 994        get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
 995
 996        crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
 997        *((__le32 *)((unsigned char *)ckpt +
 998                                le32_to_cpu(ckpt->checksum_offset)))
 999                                = cpu_to_le32(crc32);
1000
1001        start_blk = __start_cp_addr(sbi);
1002
1003        /* write out checkpoint buffer at block 0 */
1004        update_meta_page(sbi, ckpt, start_blk++);
1005
1006        for (i = 1; i < 1 + cp_payload_blks; i++)
1007                update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
1008                                                        start_blk++);
1009
1010        if (orphan_num) {
1011                write_orphan_inodes(sbi, start_blk);
1012                start_blk += orphan_blocks;
1013        }
1014
1015        write_data_summaries(sbi, start_blk);
1016        start_blk += data_sum_blocks;
1017        if (__remain_node_summaries(cpc->reason)) {
1018                write_node_summaries(sbi, start_blk);
1019                start_blk += NR_CURSEG_NODE_TYPE;
1020        }
1021
1022        /* writeout checkpoint block */
1023        update_meta_page(sbi, ckpt, start_blk);
1024
1025        /* wait for previous submitted node/meta pages writeback */
1026        wait_on_all_pages_writeback(sbi);
1027
1028        if (unlikely(f2fs_cp_error(sbi)))
1029                return;
1030
1031        filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
1032        filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
1033
1034        /* update user_block_counts */
1035        sbi->last_valid_block_count = sbi->total_valid_block_count;
1036        sbi->alloc_valid_block_count = 0;
1037
1038        /* Here, we only have one bio having CP pack */
1039        sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
1040
1041        /* wait for previous submitted meta pages writeback */
1042        wait_on_all_pages_writeback(sbi);
1043
1044        /*
1045         * invalidate meta page which is used temporarily for zeroing out
1046         * block at the end of warm node chain.
1047         */
1048        if (invalidate)
1049                invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
1050                                                                discard_blk);
1051
1052        release_dirty_inode(sbi);
1053
1054        if (unlikely(f2fs_cp_error(sbi)))
1055                return;
1056
1057        clear_prefree_segments(sbi, cpc);
1058        clear_sbi_flag(sbi, SBI_IS_DIRTY);
1059}
1060
1061/*
1062 * We guarantee that this checkpoint procedure will not fail.
1063 */
1064void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1065{
1066        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1067        unsigned long long ckpt_ver;
1068
1069        mutex_lock(&sbi->cp_mutex);
1070
1071        if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1072                (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
1073                (cpc->reason == CP_DISCARD && !sbi->discard_blks)))
1074                goto out;
1075        if (unlikely(f2fs_cp_error(sbi)))
1076                goto out;
1077        if (f2fs_readonly(sbi->sb))
1078                goto out;
1079
1080        trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
1081
1082        if (block_operations(sbi))
1083                goto out;
1084
1085        trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
1086
1087        f2fs_submit_merged_bio(sbi, DATA, WRITE);
1088        f2fs_submit_merged_bio(sbi, NODE, WRITE);
1089        f2fs_submit_merged_bio(sbi, META, WRITE);
1090
1091        /*
1092         * update checkpoint pack index
1093         * Increase the version number so that
1094         * SIT entries and seg summaries are written at correct place
1095         */
1096        ckpt_ver = cur_cp_version(ckpt);
1097        ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1098
1099        /* write cached NAT/SIT entries to NAT/SIT area */
1100        flush_nat_entries(sbi);
1101        flush_sit_entries(sbi, cpc);
1102
1103        /* unlock all the fs_lock[] in do_checkpoint() */
1104        do_checkpoint(sbi, cpc);
1105
1106        unblock_operations(sbi);
1107        stat_inc_cp_count(sbi->stat_info);
1108
1109        if (cpc->reason == CP_RECOVERY)
1110                f2fs_msg(sbi->sb, KERN_NOTICE,
1111                        "checkpoint: version = %llx", ckpt_ver);
1112out:
1113        mutex_unlock(&sbi->cp_mutex);
1114        trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1115}
1116
1117void init_ino_entry_info(struct f2fs_sb_info *sbi)
1118{
1119        int i;
1120
1121        for (i = 0; i < MAX_INO_ENTRY; i++) {
1122                struct inode_management *im = &sbi->im[i];
1123
1124                INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
1125                spin_lock_init(&im->ino_lock);
1126                INIT_LIST_HEAD(&im->ino_list);
1127                im->ino_num = 0;
1128        }
1129
1130        sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1131                        NR_CURSEG_TYPE - __cp_payload(sbi)) *
1132                                F2FS_ORPHANS_PER_BLOCK;
1133}
1134
1135int __init create_checkpoint_caches(void)
1136{
1137        ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1138                        sizeof(struct ino_entry));
1139        if (!ino_entry_slab)
1140                return -ENOMEM;
1141        inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
1142                        sizeof(struct inode_entry));
1143        if (!inode_entry_slab) {
1144                kmem_cache_destroy(ino_entry_slab);
1145                return -ENOMEM;
1146        }
1147        return 0;
1148}
1149
1150void destroy_checkpoint_caches(void)
1151{
1152        kmem_cache_destroy(ino_entry_slab);
1153        kmem_cache_destroy(inode_entry_slab);
1154}
1155