linux/fs/f2fs/segment.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/segment.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/bio.h>
  14#include <linux/blkdev.h>
  15#include <linux/prefetch.h>
  16#include <linux/kthread.h>
  17#include <linux/swap.h>
  18#include <linux/timer.h>
  19#include <linux/freezer.h>
  20#include <linux/sched/signal.h>
  21
  22#include "f2fs.h"
  23#include "segment.h"
  24#include "node.h"
  25#include "gc.h"
  26#include "trace.h"
  27#include <trace/events/f2fs.h>
  28
  29#define __reverse_ffz(x) __reverse_ffs(~(x))
  30
  31static struct kmem_cache *discard_entry_slab;
  32static struct kmem_cache *discard_cmd_slab;
  33static struct kmem_cache *sit_entry_set_slab;
  34static struct kmem_cache *inmem_entry_slab;
  35
  36static unsigned long __reverse_ulong(unsigned char *str)
  37{
  38        unsigned long tmp = 0;
  39        int shift = 24, idx = 0;
  40
  41#if BITS_PER_LONG == 64
  42        shift = 56;
  43#endif
  44        while (shift >= 0) {
  45                tmp |= (unsigned long)str[idx++] << shift;
  46                shift -= BITS_PER_BYTE;
  47        }
  48        return tmp;
  49}
  50
  51/*
  52 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
  53 * MSB and LSB are reversed in a byte by f2fs_set_bit.
  54 */
  55static inline unsigned long __reverse_ffs(unsigned long word)
  56{
  57        int num = 0;
  58
  59#if BITS_PER_LONG == 64
  60        if ((word & 0xffffffff00000000UL) == 0)
  61                num += 32;
  62        else
  63                word >>= 32;
  64#endif
  65        if ((word & 0xffff0000) == 0)
  66                num += 16;
  67        else
  68                word >>= 16;
  69
  70        if ((word & 0xff00) == 0)
  71                num += 8;
  72        else
  73                word >>= 8;
  74
  75        if ((word & 0xf0) == 0)
  76                num += 4;
  77        else
  78                word >>= 4;
  79
  80        if ((word & 0xc) == 0)
  81                num += 2;
  82        else
  83                word >>= 2;
  84
  85        if ((word & 0x2) == 0)
  86                num += 1;
  87        return num;
  88}
  89
  90/*
  91 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
  92 * f2fs_set_bit makes MSB and LSB reversed in a byte.
  93 * @size must be integral times of unsigned long.
  94 * Example:
  95 *                             MSB <--> LSB
  96 *   f2fs_set_bit(0, bitmap) => 1000 0000
  97 *   f2fs_set_bit(7, bitmap) => 0000 0001
  98 */
  99static unsigned long __find_rev_next_bit(const unsigned long *addr,
 100                        unsigned long size, unsigned long offset)
 101{
 102        const unsigned long *p = addr + BIT_WORD(offset);
 103        unsigned long result = size;
 104        unsigned long tmp;
 105
 106        if (offset >= size)
 107                return size;
 108
 109        size -= (offset & ~(BITS_PER_LONG - 1));
 110        offset %= BITS_PER_LONG;
 111
 112        while (1) {
 113                if (*p == 0)
 114                        goto pass;
 115
 116                tmp = __reverse_ulong((unsigned char *)p);
 117
 118                tmp &= ~0UL >> offset;
 119                if (size < BITS_PER_LONG)
 120                        tmp &= (~0UL << (BITS_PER_LONG - size));
 121                if (tmp)
 122                        goto found;
 123pass:
 124                if (size <= BITS_PER_LONG)
 125                        break;
 126                size -= BITS_PER_LONG;
 127                offset = 0;
 128                p++;
 129        }
 130        return result;
 131found:
 132        return result - size + __reverse_ffs(tmp);
 133}
 134
 135static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
 136                        unsigned long size, unsigned long offset)
 137{
 138        const unsigned long *p = addr + BIT_WORD(offset);
 139        unsigned long result = size;
 140        unsigned long tmp;
 141
 142        if (offset >= size)
 143                return size;
 144
 145        size -= (offset & ~(BITS_PER_LONG - 1));
 146        offset %= BITS_PER_LONG;
 147
 148        while (1) {
 149                if (*p == ~0UL)
 150                        goto pass;
 151
 152                tmp = __reverse_ulong((unsigned char *)p);
 153
 154                if (offset)
 155                        tmp |= ~0UL << (BITS_PER_LONG - offset);
 156                if (size < BITS_PER_LONG)
 157                        tmp |= ~0UL >> size;
 158                if (tmp != ~0UL)
 159                        goto found;
 160pass:
 161                if (size <= BITS_PER_LONG)
 162                        break;
 163                size -= BITS_PER_LONG;
 164                offset = 0;
 165                p++;
 166        }
 167        return result;
 168found:
 169        return result - size + __reverse_ffz(tmp);
 170}
 171
 172bool need_SSR(struct f2fs_sb_info *sbi)
 173{
 174        int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
 175        int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
 176        int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
 177
 178        if (test_opt(sbi, LFS))
 179                return false;
 180        if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
 181                return true;
 182
 183        return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
 184                                                2 * reserved_sections(sbi));
 185}
 186
 187void register_inmem_page(struct inode *inode, struct page *page)
 188{
 189        struct f2fs_inode_info *fi = F2FS_I(inode);
 190        struct inmem_pages *new;
 191
 192        f2fs_trace_pid(page);
 193
 194        set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
 195        SetPagePrivate(page);
 196
 197        new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
 198
 199        /* add atomic page indices to the list */
 200        new->page = page;
 201        INIT_LIST_HEAD(&new->list);
 202
 203        /* increase reference count with clean state */
 204        mutex_lock(&fi->inmem_lock);
 205        get_page(page);
 206        list_add_tail(&new->list, &fi->inmem_pages);
 207        inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
 208        mutex_unlock(&fi->inmem_lock);
 209
 210        trace_f2fs_register_inmem_page(page, INMEM);
 211}
 212
 213static int __revoke_inmem_pages(struct inode *inode,
 214                                struct list_head *head, bool drop, bool recover)
 215{
 216        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 217        struct inmem_pages *cur, *tmp;
 218        int err = 0;
 219
 220        list_for_each_entry_safe(cur, tmp, head, list) {
 221                struct page *page = cur->page;
 222
 223                if (drop)
 224                        trace_f2fs_commit_inmem_page(page, INMEM_DROP);
 225
 226                lock_page(page);
 227
 228                if (recover) {
 229                        struct dnode_of_data dn;
 230                        struct node_info ni;
 231
 232                        trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
 233retry:
 234                        set_new_dnode(&dn, inode, NULL, NULL, 0);
 235                        err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
 236                        if (err) {
 237                                if (err == -ENOMEM) {
 238                                        congestion_wait(BLK_RW_ASYNC, HZ/50);
 239                                        cond_resched();
 240                                        goto retry;
 241                                }
 242                                err = -EAGAIN;
 243                                goto next;
 244                        }
 245                        get_node_info(sbi, dn.nid, &ni);
 246                        f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
 247                                        cur->old_addr, ni.version, true, true);
 248                        f2fs_put_dnode(&dn);
 249                }
 250next:
 251                /* we don't need to invalidate this in the sccessful status */
 252                if (drop || recover)
 253                        ClearPageUptodate(page);
 254                set_page_private(page, 0);
 255                ClearPagePrivate(page);
 256                f2fs_put_page(page, 1);
 257
 258                list_del(&cur->list);
 259                kmem_cache_free(inmem_entry_slab, cur);
 260                dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
 261        }
 262        return err;
 263}
 264
 265void drop_inmem_pages(struct inode *inode)
 266{
 267        struct f2fs_inode_info *fi = F2FS_I(inode);
 268
 269        mutex_lock(&fi->inmem_lock);
 270        __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
 271        mutex_unlock(&fi->inmem_lock);
 272
 273        clear_inode_flag(inode, FI_ATOMIC_FILE);
 274        clear_inode_flag(inode, FI_HOT_DATA);
 275        stat_dec_atomic_write(inode);
 276}
 277
 278void drop_inmem_page(struct inode *inode, struct page *page)
 279{
 280        struct f2fs_inode_info *fi = F2FS_I(inode);
 281        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 282        struct list_head *head = &fi->inmem_pages;
 283        struct inmem_pages *cur = NULL;
 284
 285        f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
 286
 287        mutex_lock(&fi->inmem_lock);
 288        list_for_each_entry(cur, head, list) {
 289                if (cur->page == page)
 290                        break;
 291        }
 292
 293        f2fs_bug_on(sbi, !cur || cur->page != page);
 294        list_del(&cur->list);
 295        mutex_unlock(&fi->inmem_lock);
 296
 297        dec_page_count(sbi, F2FS_INMEM_PAGES);
 298        kmem_cache_free(inmem_entry_slab, cur);
 299
 300        ClearPageUptodate(page);
 301        set_page_private(page, 0);
 302        ClearPagePrivate(page);
 303        f2fs_put_page(page, 0);
 304
 305        trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
 306}
 307
 308static int __commit_inmem_pages(struct inode *inode,
 309                                        struct list_head *revoke_list)
 310{
 311        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 312        struct f2fs_inode_info *fi = F2FS_I(inode);
 313        struct inmem_pages *cur, *tmp;
 314        struct f2fs_io_info fio = {
 315                .sbi = sbi,
 316                .type = DATA,
 317                .op = REQ_OP_WRITE,
 318                .op_flags = REQ_SYNC | REQ_PRIO,
 319                .io_type = FS_DATA_IO,
 320        };
 321        pgoff_t last_idx = ULONG_MAX;
 322        int err = 0;
 323
 324        list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
 325                struct page *page = cur->page;
 326
 327                lock_page(page);
 328                if (page->mapping == inode->i_mapping) {
 329                        trace_f2fs_commit_inmem_page(page, INMEM);
 330
 331                        set_page_dirty(page);
 332                        f2fs_wait_on_page_writeback(page, DATA, true);
 333                        if (clear_page_dirty_for_io(page)) {
 334                                inode_dec_dirty_pages(inode);
 335                                remove_dirty_inode(inode);
 336                        }
 337retry:
 338                        fio.page = page;
 339                        fio.old_blkaddr = NULL_ADDR;
 340                        fio.encrypted_page = NULL;
 341                        fio.need_lock = LOCK_DONE;
 342                        err = do_write_data_page(&fio);
 343                        if (err) {
 344                                if (err == -ENOMEM) {
 345                                        congestion_wait(BLK_RW_ASYNC, HZ/50);
 346                                        cond_resched();
 347                                        goto retry;
 348                                }
 349                                unlock_page(page);
 350                                break;
 351                        }
 352                        /* record old blkaddr for revoking */
 353                        cur->old_addr = fio.old_blkaddr;
 354                        last_idx = page->index;
 355                }
 356                unlock_page(page);
 357                list_move_tail(&cur->list, revoke_list);
 358        }
 359
 360        if (last_idx != ULONG_MAX)
 361                f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
 362
 363        if (!err)
 364                __revoke_inmem_pages(inode, revoke_list, false, false);
 365
 366        return err;
 367}
 368
 369int commit_inmem_pages(struct inode *inode)
 370{
 371        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 372        struct f2fs_inode_info *fi = F2FS_I(inode);
 373        struct list_head revoke_list;
 374        int err;
 375
 376        INIT_LIST_HEAD(&revoke_list);
 377        f2fs_balance_fs(sbi, true);
 378        f2fs_lock_op(sbi);
 379
 380        set_inode_flag(inode, FI_ATOMIC_COMMIT);
 381
 382        mutex_lock(&fi->inmem_lock);
 383        err = __commit_inmem_pages(inode, &revoke_list);
 384        if (err) {
 385                int ret;
 386                /*
 387                 * try to revoke all committed pages, but still we could fail
 388                 * due to no memory or other reason, if that happened, EAGAIN
 389                 * will be returned, which means in such case, transaction is
 390                 * already not integrity, caller should use journal to do the
 391                 * recovery or rewrite & commit last transaction. For other
 392                 * error number, revoking was done by filesystem itself.
 393                 */
 394                ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
 395                if (ret)
 396                        err = ret;
 397
 398                /* drop all uncommitted pages */
 399                __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
 400        }
 401        mutex_unlock(&fi->inmem_lock);
 402
 403        clear_inode_flag(inode, FI_ATOMIC_COMMIT);
 404
 405        f2fs_unlock_op(sbi);
 406        return err;
 407}
 408
 409/*
 410 * This function balances dirty node and dentry pages.
 411 * In addition, it controls garbage collection.
 412 */
 413void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 414{
 415#ifdef CONFIG_F2FS_FAULT_INJECTION
 416        if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
 417                f2fs_show_injection_info(FAULT_CHECKPOINT);
 418                f2fs_stop_checkpoint(sbi, false);
 419        }
 420#endif
 421
 422        /* balance_fs_bg is able to be pending */
 423        if (need && excess_cached_nats(sbi))
 424                f2fs_balance_fs_bg(sbi);
 425
 426        /*
 427         * We should do GC or end up with checkpoint, if there are so many dirty
 428         * dir/node pages without enough free segments.
 429         */
 430        if (has_not_enough_free_secs(sbi, 0, 0)) {
 431                mutex_lock(&sbi->gc_mutex);
 432                f2fs_gc(sbi, false, false, NULL_SEGNO);
 433        }
 434}
 435
 436void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 437{
 438        /* try to shrink extent cache when there is no enough memory */
 439        if (!available_free_memory(sbi, EXTENT_CACHE))
 440                f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
 441
 442        /* check the # of cached NAT entries */
 443        if (!available_free_memory(sbi, NAT_ENTRIES))
 444                try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
 445
 446        if (!available_free_memory(sbi, FREE_NIDS))
 447                try_to_free_nids(sbi, MAX_FREE_NIDS);
 448        else
 449                build_free_nids(sbi, false, false);
 450
 451        if (!is_idle(sbi) && !excess_dirty_nats(sbi))
 452                return;
 453
 454        /* checkpoint is the only way to shrink partial cached entries */
 455        if (!available_free_memory(sbi, NAT_ENTRIES) ||
 456                        !available_free_memory(sbi, INO_ENTRIES) ||
 457                        excess_prefree_segs(sbi) ||
 458                        excess_dirty_nats(sbi) ||
 459                        f2fs_time_over(sbi, CP_TIME)) {
 460                if (test_opt(sbi, DATA_FLUSH)) {
 461                        struct blk_plug plug;
 462
 463                        blk_start_plug(&plug);
 464                        sync_dirty_inodes(sbi, FILE_INODE);
 465                        blk_finish_plug(&plug);
 466                }
 467                f2fs_sync_fs(sbi->sb, true);
 468                stat_inc_bg_cp_count(sbi->stat_info);
 469        }
 470}
 471
 472static int __submit_flush_wait(struct f2fs_sb_info *sbi,
 473                                struct block_device *bdev)
 474{
 475        struct bio *bio = f2fs_bio_alloc(0);
 476        int ret;
 477
 478        bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
 479        bio_set_dev(bio, bdev);
 480        ret = submit_bio_wait(bio);
 481        bio_put(bio);
 482
 483        trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
 484                                test_opt(sbi, FLUSH_MERGE), ret);
 485        return ret;
 486}
 487
 488static int submit_flush_wait(struct f2fs_sb_info *sbi)
 489{
 490        int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
 491        int i;
 492
 493        if (!sbi->s_ndevs || ret)
 494                return ret;
 495
 496        for (i = 1; i < sbi->s_ndevs; i++) {
 497                ret = __submit_flush_wait(sbi, FDEV(i).bdev);
 498                if (ret)
 499                        break;
 500        }
 501        return ret;
 502}
 503
 504static int issue_flush_thread(void *data)
 505{
 506        struct f2fs_sb_info *sbi = data;
 507        struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
 508        wait_queue_head_t *q = &fcc->flush_wait_queue;
 509repeat:
 510        if (kthread_should_stop())
 511                return 0;
 512
 513        sb_start_intwrite(sbi->sb);
 514
 515        if (!llist_empty(&fcc->issue_list)) {
 516                struct flush_cmd *cmd, *next;
 517                int ret;
 518
 519                fcc->dispatch_list = llist_del_all(&fcc->issue_list);
 520                fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
 521
 522                ret = submit_flush_wait(sbi);
 523                atomic_inc(&fcc->issued_flush);
 524
 525                llist_for_each_entry_safe(cmd, next,
 526                                          fcc->dispatch_list, llnode) {
 527                        cmd->ret = ret;
 528                        complete(&cmd->wait);
 529                }
 530                fcc->dispatch_list = NULL;
 531        }
 532
 533        sb_end_intwrite(sbi->sb);
 534
 535        wait_event_interruptible(*q,
 536                kthread_should_stop() || !llist_empty(&fcc->issue_list));
 537        goto repeat;
 538}
 539
 540int f2fs_issue_flush(struct f2fs_sb_info *sbi)
 541{
 542        struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
 543        struct flush_cmd cmd;
 544        int ret;
 545
 546        if (test_opt(sbi, NOBARRIER))
 547                return 0;
 548
 549        if (!test_opt(sbi, FLUSH_MERGE)) {
 550                ret = submit_flush_wait(sbi);
 551                atomic_inc(&fcc->issued_flush);
 552                return ret;
 553        }
 554
 555        if (atomic_inc_return(&fcc->issing_flush) == 1) {
 556                ret = submit_flush_wait(sbi);
 557                atomic_dec(&fcc->issing_flush);
 558
 559                atomic_inc(&fcc->issued_flush);
 560                return ret;
 561        }
 562
 563        init_completion(&cmd.wait);
 564
 565        llist_add(&cmd.llnode, &fcc->issue_list);
 566
 567        /* update issue_list before we wake up issue_flush thread */
 568        smp_mb();
 569
 570        if (waitqueue_active(&fcc->flush_wait_queue))
 571                wake_up(&fcc->flush_wait_queue);
 572
 573        if (fcc->f2fs_issue_flush) {
 574                wait_for_completion(&cmd.wait);
 575                atomic_dec(&fcc->issing_flush);
 576        } else {
 577                struct llist_node *list;
 578
 579                list = llist_del_all(&fcc->issue_list);
 580                if (!list) {
 581                        wait_for_completion(&cmd.wait);
 582                        atomic_dec(&fcc->issing_flush);
 583                } else {
 584                        struct flush_cmd *tmp, *next;
 585
 586                        ret = submit_flush_wait(sbi);
 587
 588                        llist_for_each_entry_safe(tmp, next, list, llnode) {
 589                                if (tmp == &cmd) {
 590                                        cmd.ret = ret;
 591                                        atomic_dec(&fcc->issing_flush);
 592                                        continue;
 593                                }
 594                                tmp->ret = ret;
 595                                complete(&tmp->wait);
 596                        }
 597                }
 598        }
 599
 600        return cmd.ret;
 601}
 602
 603int create_flush_cmd_control(struct f2fs_sb_info *sbi)
 604{
 605        dev_t dev = sbi->sb->s_bdev->bd_dev;
 606        struct flush_cmd_control *fcc;
 607        int err = 0;
 608
 609        if (SM_I(sbi)->fcc_info) {
 610                fcc = SM_I(sbi)->fcc_info;
 611                if (fcc->f2fs_issue_flush)
 612                        return err;
 613                goto init_thread;
 614        }
 615
 616        fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
 617        if (!fcc)
 618                return -ENOMEM;
 619        atomic_set(&fcc->issued_flush, 0);
 620        atomic_set(&fcc->issing_flush, 0);
 621        init_waitqueue_head(&fcc->flush_wait_queue);
 622        init_llist_head(&fcc->issue_list);
 623        SM_I(sbi)->fcc_info = fcc;
 624        if (!test_opt(sbi, FLUSH_MERGE))
 625                return err;
 626
 627init_thread:
 628        fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
 629                                "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
 630        if (IS_ERR(fcc->f2fs_issue_flush)) {
 631                err = PTR_ERR(fcc->f2fs_issue_flush);
 632                kfree(fcc);
 633                SM_I(sbi)->fcc_info = NULL;
 634                return err;
 635        }
 636
 637        return err;
 638}
 639
 640void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
 641{
 642        struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
 643
 644        if (fcc && fcc->f2fs_issue_flush) {
 645                struct task_struct *flush_thread = fcc->f2fs_issue_flush;
 646
 647                fcc->f2fs_issue_flush = NULL;
 648                kthread_stop(flush_thread);
 649        }
 650        if (free) {
 651                kfree(fcc);
 652                SM_I(sbi)->fcc_info = NULL;
 653        }
 654}
 655
 656static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
 657                enum dirty_type dirty_type)
 658{
 659        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 660
 661        /* need not be added */
 662        if (IS_CURSEG(sbi, segno))
 663                return;
 664
 665        if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
 666                dirty_i->nr_dirty[dirty_type]++;
 667
 668        if (dirty_type == DIRTY) {
 669                struct seg_entry *sentry = get_seg_entry(sbi, segno);
 670                enum dirty_type t = sentry->type;
 671
 672                if (unlikely(t >= DIRTY)) {
 673                        f2fs_bug_on(sbi, 1);
 674                        return;
 675                }
 676                if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
 677                        dirty_i->nr_dirty[t]++;
 678        }
 679}
 680
 681static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
 682                enum dirty_type dirty_type)
 683{
 684        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 685
 686        if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
 687                dirty_i->nr_dirty[dirty_type]--;
 688
 689        if (dirty_type == DIRTY) {
 690                struct seg_entry *sentry = get_seg_entry(sbi, segno);
 691                enum dirty_type t = sentry->type;
 692
 693                if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
 694                        dirty_i->nr_dirty[t]--;
 695
 696                if (get_valid_blocks(sbi, segno, true) == 0)
 697                        clear_bit(GET_SEC_FROM_SEG(sbi, segno),
 698                                                dirty_i->victim_secmap);
 699        }
 700}
 701
 702/*
 703 * Should not occur error such as -ENOMEM.
 704 * Adding dirty entry into seglist is not critical operation.
 705 * If a given segment is one of current working segments, it won't be added.
 706 */
 707static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
 708{
 709        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 710        unsigned short valid_blocks;
 711
 712        if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
 713                return;
 714
 715        mutex_lock(&dirty_i->seglist_lock);
 716
 717        valid_blocks = get_valid_blocks(sbi, segno, false);
 718
 719        if (valid_blocks == 0) {
 720                __locate_dirty_segment(sbi, segno, PRE);
 721                __remove_dirty_segment(sbi, segno, DIRTY);
 722        } else if (valid_blocks < sbi->blocks_per_seg) {
 723                __locate_dirty_segment(sbi, segno, DIRTY);
 724        } else {
 725                /* Recovery routine with SSR needs this */
 726                __remove_dirty_segment(sbi, segno, DIRTY);
 727        }
 728
 729        mutex_unlock(&dirty_i->seglist_lock);
 730}
 731
 732static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
 733                struct block_device *bdev, block_t lstart,
 734                block_t start, block_t len)
 735{
 736        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 737        struct list_head *pend_list;
 738        struct discard_cmd *dc;
 739
 740        f2fs_bug_on(sbi, !len);
 741
 742        pend_list = &dcc->pend_list[plist_idx(len)];
 743
 744        dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
 745        INIT_LIST_HEAD(&dc->list);
 746        dc->bdev = bdev;
 747        dc->lstart = lstart;
 748        dc->start = start;
 749        dc->len = len;
 750        dc->ref = 0;
 751        dc->state = D_PREP;
 752        dc->error = 0;
 753        init_completion(&dc->wait);
 754        list_add_tail(&dc->list, pend_list);
 755        atomic_inc(&dcc->discard_cmd_cnt);
 756        dcc->undiscard_blks += len;
 757
 758        return dc;
 759}
 760
 761static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
 762                                struct block_device *bdev, block_t lstart,
 763                                block_t start, block_t len,
 764                                struct rb_node *parent, struct rb_node **p)
 765{
 766        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 767        struct discard_cmd *dc;
 768
 769        dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
 770
 771        rb_link_node(&dc->rb_node, parent, p);
 772        rb_insert_color(&dc->rb_node, &dcc->root);
 773
 774        return dc;
 775}
 776
 777static void __detach_discard_cmd(struct discard_cmd_control *dcc,
 778                                                        struct discard_cmd *dc)
 779{
 780        if (dc->state == D_DONE)
 781                atomic_dec(&dcc->issing_discard);
 782
 783        list_del(&dc->list);
 784        rb_erase(&dc->rb_node, &dcc->root);
 785        dcc->undiscard_blks -= dc->len;
 786
 787        kmem_cache_free(discard_cmd_slab, dc);
 788
 789        atomic_dec(&dcc->discard_cmd_cnt);
 790}
 791
 792static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
 793                                                        struct discard_cmd *dc)
 794{
 795        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 796
 797        f2fs_bug_on(sbi, dc->ref);
 798
 799        if (dc->error == -EOPNOTSUPP)
 800                dc->error = 0;
 801
 802        if (dc->error)
 803                f2fs_msg(sbi->sb, KERN_INFO,
 804                        "Issue discard(%u, %u, %u) failed, ret: %d",
 805                        dc->lstart, dc->start, dc->len, dc->error);
 806        __detach_discard_cmd(dcc, dc);
 807}
 808
 809static void f2fs_submit_discard_endio(struct bio *bio)
 810{
 811        struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
 812
 813        dc->error = blk_status_to_errno(bio->bi_status);
 814        dc->state = D_DONE;
 815        complete_all(&dc->wait);
 816        bio_put(bio);
 817}
 818
 819void __check_sit_bitmap(struct f2fs_sb_info *sbi,
 820                                block_t start, block_t end)
 821{
 822#ifdef CONFIG_F2FS_CHECK_FS
 823        struct seg_entry *sentry;
 824        unsigned int segno;
 825        block_t blk = start;
 826        unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
 827        unsigned long *map;
 828
 829        while (blk < end) {
 830                segno = GET_SEGNO(sbi, blk);
 831                sentry = get_seg_entry(sbi, segno);
 832                offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
 833
 834                if (end < START_BLOCK(sbi, segno + 1))
 835                        size = GET_BLKOFF_FROM_SEG0(sbi, end);
 836                else
 837                        size = max_blocks;
 838                map = (unsigned long *)(sentry->cur_valid_map);
 839                offset = __find_rev_next_bit(map, size, offset);
 840                f2fs_bug_on(sbi, offset != size);
 841                blk = START_BLOCK(sbi, segno + 1);
 842        }
 843#endif
 844}
 845
 846/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
 847static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
 848                                struct discard_cmd *dc)
 849{
 850        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 851        struct bio *bio = NULL;
 852
 853        if (dc->state != D_PREP)
 854                return;
 855
 856        trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
 857
 858        dc->error = __blkdev_issue_discard(dc->bdev,
 859                                SECTOR_FROM_BLOCK(dc->start),
 860                                SECTOR_FROM_BLOCK(dc->len),
 861                                GFP_NOFS, 0, &bio);
 862        if (!dc->error) {
 863                /* should keep before submission to avoid D_DONE right away */
 864                dc->state = D_SUBMIT;
 865                atomic_inc(&dcc->issued_discard);
 866                atomic_inc(&dcc->issing_discard);
 867                if (bio) {
 868                        bio->bi_private = dc;
 869                        bio->bi_end_io = f2fs_submit_discard_endio;
 870                        bio->bi_opf |= REQ_SYNC;
 871                        submit_bio(bio);
 872                        list_move_tail(&dc->list, &dcc->wait_list);
 873                        __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
 874
 875                        f2fs_update_iostat(sbi, FS_DISCARD, 1);
 876                }
 877        } else {
 878                __remove_discard_cmd(sbi, dc);
 879        }
 880}
 881
 882static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
 883                                struct block_device *bdev, block_t lstart,
 884                                block_t start, block_t len,
 885                                struct rb_node **insert_p,
 886                                struct rb_node *insert_parent)
 887{
 888        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 889        struct rb_node **p = &dcc->root.rb_node;
 890        struct rb_node *parent = NULL;
 891        struct discard_cmd *dc = NULL;
 892
 893        if (insert_p && insert_parent) {
 894                parent = insert_parent;
 895                p = insert_p;
 896                goto do_insert;
 897        }
 898
 899        p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
 900do_insert:
 901        dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
 902        if (!dc)
 903                return NULL;
 904
 905        return dc;
 906}
 907
 908static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
 909                                                struct discard_cmd *dc)
 910{
 911        list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
 912}
 913
 914static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
 915                                struct discard_cmd *dc, block_t blkaddr)
 916{
 917        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 918        struct discard_info di = dc->di;
 919        bool modified = false;
 920
 921        if (dc->state == D_DONE || dc->len == 1) {
 922                __remove_discard_cmd(sbi, dc);
 923                return;
 924        }
 925
 926        dcc->undiscard_blks -= di.len;
 927
 928        if (blkaddr > di.lstart) {
 929                dc->len = blkaddr - dc->lstart;
 930                dcc->undiscard_blks += dc->len;
 931                __relocate_discard_cmd(dcc, dc);
 932                modified = true;
 933        }
 934
 935        if (blkaddr < di.lstart + di.len - 1) {
 936                if (modified) {
 937                        __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
 938                                        di.start + blkaddr + 1 - di.lstart,
 939                                        di.lstart + di.len - 1 - blkaddr,
 940                                        NULL, NULL);
 941                } else {
 942                        dc->lstart++;
 943                        dc->len--;
 944                        dc->start++;
 945                        dcc->undiscard_blks += dc->len;
 946                        __relocate_discard_cmd(dcc, dc);
 947                }
 948        }
 949}
 950
 951static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
 952                                struct block_device *bdev, block_t lstart,
 953                                block_t start, block_t len)
 954{
 955        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 956        struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
 957        struct discard_cmd *dc;
 958        struct discard_info di = {0};
 959        struct rb_node **insert_p = NULL, *insert_parent = NULL;
 960        block_t end = lstart + len;
 961
 962        mutex_lock(&dcc->cmd_lock);
 963
 964        dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
 965                                        NULL, lstart,
 966                                        (struct rb_entry **)&prev_dc,
 967                                        (struct rb_entry **)&next_dc,
 968                                        &insert_p, &insert_parent, true);
 969        if (dc)
 970                prev_dc = dc;
 971
 972        if (!prev_dc) {
 973                di.lstart = lstart;
 974                di.len = next_dc ? next_dc->lstart - lstart : len;
 975                di.len = min(di.len, len);
 976                di.start = start;
 977        }
 978
 979        while (1) {
 980                struct rb_node *node;
 981                bool merged = false;
 982                struct discard_cmd *tdc = NULL;
 983
 984                if (prev_dc) {
 985                        di.lstart = prev_dc->lstart + prev_dc->len;
 986                        if (di.lstart < lstart)
 987                                di.lstart = lstart;
 988                        if (di.lstart >= end)
 989                                break;
 990
 991                        if (!next_dc || next_dc->lstart > end)
 992                                di.len = end - di.lstart;
 993                        else
 994                                di.len = next_dc->lstart - di.lstart;
 995                        di.start = start + di.lstart - lstart;
 996                }
 997
 998                if (!di.len)
 999                        goto next;
1000
1001                if (prev_dc && prev_dc->state == D_PREP &&
1002                        prev_dc->bdev == bdev &&
1003                        __is_discard_back_mergeable(&di, &prev_dc->di)) {
1004                        prev_dc->di.len += di.len;
1005                        dcc->undiscard_blks += di.len;
1006                        __relocate_discard_cmd(dcc, prev_dc);
1007                        di = prev_dc->di;
1008                        tdc = prev_dc;
1009                        merged = true;
1010                }
1011
1012                if (next_dc && next_dc->state == D_PREP &&
1013                        next_dc->bdev == bdev &&
1014                        __is_discard_front_mergeable(&di, &next_dc->di)) {
1015                        next_dc->di.lstart = di.lstart;
1016                        next_dc->di.len += di.len;
1017                        next_dc->di.start = di.start;
1018                        dcc->undiscard_blks += di.len;
1019                        __relocate_discard_cmd(dcc, next_dc);
1020                        if (tdc)
1021                                __remove_discard_cmd(sbi, tdc);
1022                        merged = true;
1023                }
1024
1025                if (!merged) {
1026                        __insert_discard_tree(sbi, bdev, di.lstart, di.start,
1027                                                        di.len, NULL, NULL);
1028                }
1029 next:
1030                prev_dc = next_dc;
1031                if (!prev_dc)
1032                        break;
1033
1034                node = rb_next(&prev_dc->rb_node);
1035                next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1036        }
1037
1038        mutex_unlock(&dcc->cmd_lock);
1039}
1040
1041static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1042                struct block_device *bdev, block_t blkstart, block_t blklen)
1043{
1044        block_t lblkstart = blkstart;
1045
1046        trace_f2fs_queue_discard(bdev, blkstart, blklen);
1047
1048        if (sbi->s_ndevs) {
1049                int devi = f2fs_target_device_index(sbi, blkstart);
1050
1051                blkstart -= FDEV(devi).start_blk;
1052        }
1053        __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1054        return 0;
1055}
1056
1057static int __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
1058{
1059        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1060        struct list_head *pend_list;
1061        struct discard_cmd *dc, *tmp;
1062        struct blk_plug plug;
1063        int iter = 0, issued = 0;
1064        int i;
1065        bool io_interrupted = false;
1066
1067        mutex_lock(&dcc->cmd_lock);
1068        f2fs_bug_on(sbi,
1069                !__check_rb_tree_consistence(sbi, &dcc->root));
1070        blk_start_plug(&plug);
1071        for (i = MAX_PLIST_NUM - 1;
1072                        i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) {
1073                pend_list = &dcc->pend_list[i];
1074                list_for_each_entry_safe(dc, tmp, pend_list, list) {
1075                        f2fs_bug_on(sbi, dc->state != D_PREP);
1076
1077                        /* Hurry up to finish fstrim */
1078                        if (dcc->pend_list_tag[i] & P_TRIM) {
1079                                __submit_discard_cmd(sbi, dc);
1080                                issued++;
1081
1082                                if (fatal_signal_pending(current))
1083                                        break;
1084                                continue;
1085                        }
1086
1087                        if (!issue_cond) {
1088                                __submit_discard_cmd(sbi, dc);
1089                                issued++;
1090                                continue;
1091                        }
1092
1093                        if (is_idle(sbi)) {
1094                                __submit_discard_cmd(sbi, dc);
1095                                issued++;
1096                        } else {
1097                                io_interrupted = true;
1098                        }
1099
1100                        if (++iter >= DISCARD_ISSUE_RATE)
1101                                goto out;
1102                }
1103                if (list_empty(pend_list) && dcc->pend_list_tag[i] & P_TRIM)
1104                        dcc->pend_list_tag[i] &= (~P_TRIM);
1105        }
1106out:
1107        blk_finish_plug(&plug);
1108        mutex_unlock(&dcc->cmd_lock);
1109
1110        if (!issued && io_interrupted)
1111                issued = -1;
1112
1113        return issued;
1114}
1115
1116static void __drop_discard_cmd(struct f2fs_sb_info *sbi)
1117{
1118        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1119        struct list_head *pend_list;
1120        struct discard_cmd *dc, *tmp;
1121        int i;
1122
1123        mutex_lock(&dcc->cmd_lock);
1124        for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1125                pend_list = &dcc->pend_list[i];
1126                list_for_each_entry_safe(dc, tmp, pend_list, list) {
1127                        f2fs_bug_on(sbi, dc->state != D_PREP);
1128                        __remove_discard_cmd(sbi, dc);
1129                }
1130        }
1131        mutex_unlock(&dcc->cmd_lock);
1132}
1133
1134static void __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1135                                                        struct discard_cmd *dc)
1136{
1137        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1138
1139        wait_for_completion_io(&dc->wait);
1140        mutex_lock(&dcc->cmd_lock);
1141        f2fs_bug_on(sbi, dc->state != D_DONE);
1142        dc->ref--;
1143        if (!dc->ref)
1144                __remove_discard_cmd(sbi, dc);
1145        mutex_unlock(&dcc->cmd_lock);
1146}
1147
1148static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
1149{
1150        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1151        struct list_head *wait_list = &(dcc->wait_list);
1152        struct discard_cmd *dc, *tmp;
1153        bool need_wait;
1154
1155next:
1156        need_wait = false;
1157
1158        mutex_lock(&dcc->cmd_lock);
1159        list_for_each_entry_safe(dc, tmp, wait_list, list) {
1160                if (!wait_cond || (dc->state == D_DONE && !dc->ref)) {
1161                        wait_for_completion_io(&dc->wait);
1162                        __remove_discard_cmd(sbi, dc);
1163                } else {
1164                        dc->ref++;
1165                        need_wait = true;
1166                        break;
1167                }
1168        }
1169        mutex_unlock(&dcc->cmd_lock);
1170
1171        if (need_wait) {
1172                __wait_one_discard_bio(sbi, dc);
1173                goto next;
1174        }
1175}
1176
1177/* This should be covered by global mutex, &sit_i->sentry_lock */
1178void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1179{
1180        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1181        struct discard_cmd *dc;
1182        bool need_wait = false;
1183
1184        mutex_lock(&dcc->cmd_lock);
1185        dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
1186        if (dc) {
1187                if (dc->state == D_PREP) {
1188                        __punch_discard_cmd(sbi, dc, blkaddr);
1189                } else {
1190                        dc->ref++;
1191                        need_wait = true;
1192                }
1193        }
1194        mutex_unlock(&dcc->cmd_lock);
1195
1196        if (need_wait)
1197                __wait_one_discard_bio(sbi, dc);
1198}
1199
1200void stop_discard_thread(struct f2fs_sb_info *sbi)
1201{
1202        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1203
1204        if (dcc && dcc->f2fs_issue_discard) {
1205                struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1206
1207                dcc->f2fs_issue_discard = NULL;
1208                kthread_stop(discard_thread);
1209        }
1210}
1211
1212/* This comes from f2fs_put_super and f2fs_trim_fs */
1213void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
1214{
1215        __issue_discard_cmd(sbi, false);
1216        __drop_discard_cmd(sbi);
1217        __wait_discard_cmd(sbi, !umount);
1218}
1219
1220static void mark_discard_range_all(struct f2fs_sb_info *sbi)
1221{
1222        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1223        int i;
1224
1225        mutex_lock(&dcc->cmd_lock);
1226        for (i = 0; i < MAX_PLIST_NUM; i++)
1227                dcc->pend_list_tag[i] |= P_TRIM;
1228        mutex_unlock(&dcc->cmd_lock);
1229}
1230
1231static int issue_discard_thread(void *data)
1232{
1233        struct f2fs_sb_info *sbi = data;
1234        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1235        wait_queue_head_t *q = &dcc->discard_wait_queue;
1236        unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1237        int issued;
1238
1239        set_freezable();
1240
1241        do {
1242                wait_event_interruptible_timeout(*q,
1243                                kthread_should_stop() || freezing(current) ||
1244                                dcc->discard_wake,
1245                                msecs_to_jiffies(wait_ms));
1246                if (try_to_freeze())
1247                        continue;
1248                if (kthread_should_stop())
1249                        return 0;
1250
1251                if (dcc->discard_wake) {
1252                        dcc->discard_wake = 0;
1253                        if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
1254                                mark_discard_range_all(sbi);
1255                }
1256
1257                sb_start_intwrite(sbi->sb);
1258
1259                issued = __issue_discard_cmd(sbi, true);
1260                if (issued) {
1261                        __wait_discard_cmd(sbi, true);
1262                        wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1263                } else {
1264                        wait_ms = DEF_MAX_DISCARD_ISSUE_TIME;
1265                }
1266
1267                sb_end_intwrite(sbi->sb);
1268
1269        } while (!kthread_should_stop());
1270        return 0;
1271}
1272
1273#ifdef CONFIG_BLK_DEV_ZONED
1274static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1275                struct block_device *bdev, block_t blkstart, block_t blklen)
1276{
1277        sector_t sector, nr_sects;
1278        block_t lblkstart = blkstart;
1279        int devi = 0;
1280
1281        if (sbi->s_ndevs) {
1282                devi = f2fs_target_device_index(sbi, blkstart);
1283                blkstart -= FDEV(devi).start_blk;
1284        }
1285
1286        /*
1287         * We need to know the type of the zone: for conventional zones,
1288         * use regular discard if the drive supports it. For sequential
1289         * zones, reset the zone write pointer.
1290         */
1291        switch (get_blkz_type(sbi, bdev, blkstart)) {
1292
1293        case BLK_ZONE_TYPE_CONVENTIONAL:
1294                if (!blk_queue_discard(bdev_get_queue(bdev)))
1295                        return 0;
1296                return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1297        case BLK_ZONE_TYPE_SEQWRITE_REQ:
1298        case BLK_ZONE_TYPE_SEQWRITE_PREF:
1299                sector = SECTOR_FROM_BLOCK(blkstart);
1300                nr_sects = SECTOR_FROM_BLOCK(blklen);
1301
1302                if (sector & (bdev_zone_sectors(bdev) - 1) ||
1303                                nr_sects != bdev_zone_sectors(bdev)) {
1304                        f2fs_msg(sbi->sb, KERN_INFO,
1305                                "(%d) %s: Unaligned discard attempted (block %x + %x)",
1306                                devi, sbi->s_ndevs ? FDEV(devi).path: "",
1307                                blkstart, blklen);
1308                        return -EIO;
1309                }
1310                trace_f2fs_issue_reset_zone(bdev, blkstart);
1311                return blkdev_reset_zones(bdev, sector,
1312                                          nr_sects, GFP_NOFS);
1313        default:
1314                /* Unknown zone type: broken device ? */
1315                return -EIO;
1316        }
1317}
1318#endif
1319
1320static int __issue_discard_async(struct f2fs_sb_info *sbi,
1321                struct block_device *bdev, block_t blkstart, block_t blklen)
1322{
1323#ifdef CONFIG_BLK_DEV_ZONED
1324        if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
1325                                bdev_zoned_model(bdev) != BLK_ZONED_NONE)
1326                return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1327#endif
1328        return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1329}
1330
1331static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1332                                block_t blkstart, block_t blklen)
1333{
1334        sector_t start = blkstart, len = 0;
1335        struct block_device *bdev;
1336        struct seg_entry *se;
1337        unsigned int offset;
1338        block_t i;
1339        int err = 0;
1340
1341        bdev = f2fs_target_device(sbi, blkstart, NULL);
1342
1343        for (i = blkstart; i < blkstart + blklen; i++, len++) {
1344                if (i != start) {
1345                        struct block_device *bdev2 =
1346                                f2fs_target_device(sbi, i, NULL);
1347
1348                        if (bdev2 != bdev) {
1349                                err = __issue_discard_async(sbi, bdev,
1350                                                start, len);
1351                                if (err)
1352                                        return err;
1353                                bdev = bdev2;
1354                                start = i;
1355                                len = 0;
1356                        }
1357                }
1358
1359                se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1360                offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1361
1362                if (!f2fs_test_and_set_bit(offset, se->discard_map))
1363                        sbi->discard_blks--;
1364        }
1365
1366        if (len)
1367                err = __issue_discard_async(sbi, bdev, start, len);
1368        return err;
1369}
1370
1371static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1372                                                        bool check_only)
1373{
1374        int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1375        int max_blocks = sbi->blocks_per_seg;
1376        struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1377        unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1378        unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1379        unsigned long *discard_map = (unsigned long *)se->discard_map;
1380        unsigned long *dmap = SIT_I(sbi)->tmp_map;
1381        unsigned int start = 0, end = -1;
1382        bool force = (cpc->reason & CP_DISCARD);
1383        struct discard_entry *de = NULL;
1384        struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1385        int i;
1386
1387        if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
1388                return false;
1389
1390        if (!force) {
1391                if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
1392                        SM_I(sbi)->dcc_info->nr_discards >=
1393                                SM_I(sbi)->dcc_info->max_discards)
1394                        return false;
1395        }
1396
1397        /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1398        for (i = 0; i < entries; i++)
1399                dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1400                                (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1401
1402        while (force || SM_I(sbi)->dcc_info->nr_discards <=
1403                                SM_I(sbi)->dcc_info->max_discards) {
1404                start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1405                if (start >= max_blocks)
1406                        break;
1407
1408                end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1409                if (force && start && end != max_blocks
1410                                        && (end - start) < cpc->trim_minlen)
1411                        continue;
1412
1413                if (check_only)
1414                        return true;
1415
1416                if (!de) {
1417                        de = f2fs_kmem_cache_alloc(discard_entry_slab,
1418                                                                GFP_F2FS_ZERO);
1419                        de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1420                        list_add_tail(&de->list, head);
1421                }
1422
1423                for (i = start; i < end; i++)
1424                        __set_bit_le(i, (void *)de->discard_map);
1425
1426                SM_I(sbi)->dcc_info->nr_discards += end - start;
1427        }
1428        return false;
1429}
1430
1431void release_discard_addrs(struct f2fs_sb_info *sbi)
1432{
1433        struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
1434        struct discard_entry *entry, *this;
1435
1436        /* drop caches */
1437        list_for_each_entry_safe(entry, this, head, list) {
1438                list_del(&entry->list);
1439                kmem_cache_free(discard_entry_slab, entry);
1440        }
1441}
1442
1443/*
1444 * Should call clear_prefree_segments after checkpoint is done.
1445 */
1446static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1447{
1448        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1449        unsigned int segno;
1450
1451        mutex_lock(&dirty_i->seglist_lock);
1452        for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
1453                __set_test_and_free(sbi, segno);
1454        mutex_unlock(&dirty_i->seglist_lock);
1455}
1456
1457void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1458{
1459        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1460        struct list_head *head = &dcc->entry_list;
1461        struct discard_entry *entry, *this;
1462        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1463        unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
1464        unsigned int start = 0, end = -1;
1465        unsigned int secno, start_segno;
1466        bool force = (cpc->reason & CP_DISCARD);
1467
1468        mutex_lock(&dirty_i->seglist_lock);
1469
1470        while (1) {
1471                int i;
1472                start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1473                if (start >= MAIN_SEGS(sbi))
1474                        break;
1475                end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1476                                                                start + 1);
1477
1478                for (i = start; i < end; i++)
1479                        clear_bit(i, prefree_map);
1480
1481                dirty_i->nr_dirty[PRE] -= end - start;
1482
1483                if (!test_opt(sbi, DISCARD))
1484                        continue;
1485
1486                if (force && start >= cpc->trim_start &&
1487                                        (end - 1) <= cpc->trim_end)
1488                                continue;
1489
1490                if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
1491                        f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
1492                                (end - start) << sbi->log_blocks_per_seg);
1493                        continue;
1494                }
1495next:
1496                secno = GET_SEC_FROM_SEG(sbi, start);
1497                start_segno = GET_SEG_FROM_SEC(sbi, secno);
1498                if (!IS_CURSEC(sbi, secno) &&
1499                        !get_valid_blocks(sbi, start, true))
1500                        f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1501                                sbi->segs_per_sec << sbi->log_blocks_per_seg);
1502
1503                start = start_segno + sbi->segs_per_sec;
1504                if (start < end)
1505                        goto next;
1506                else
1507                        end = start - 1;
1508        }
1509        mutex_unlock(&dirty_i->seglist_lock);
1510
1511        /* send small discards */
1512        list_for_each_entry_safe(entry, this, head, list) {
1513                unsigned int cur_pos = 0, next_pos, len, total_len = 0;
1514                bool is_valid = test_bit_le(0, entry->discard_map);
1515
1516find_next:
1517                if (is_valid) {
1518                        next_pos = find_next_zero_bit_le(entry->discard_map,
1519                                        sbi->blocks_per_seg, cur_pos);
1520                        len = next_pos - cur_pos;
1521
1522                        if (f2fs_sb_mounted_blkzoned(sbi->sb) ||
1523                            (force && len < cpc->trim_minlen))
1524                                goto skip;
1525
1526                        f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
1527                                                                        len);
1528                        cpc->trimmed += len;
1529                        total_len += len;
1530                } else {
1531                        next_pos = find_next_bit_le(entry->discard_map,
1532                                        sbi->blocks_per_seg, cur_pos);
1533                }
1534skip:
1535                cur_pos = next_pos;
1536                is_valid = !is_valid;
1537
1538                if (cur_pos < sbi->blocks_per_seg)
1539                        goto find_next;
1540
1541                list_del(&entry->list);
1542                dcc->nr_discards -= total_len;
1543                kmem_cache_free(discard_entry_slab, entry);
1544        }
1545
1546        wake_up_discard_thread(sbi, false);
1547}
1548
1549static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
1550{
1551        dev_t dev = sbi->sb->s_bdev->bd_dev;
1552        struct discard_cmd_control *dcc;
1553        int err = 0, i;
1554
1555        if (SM_I(sbi)->dcc_info) {
1556                dcc = SM_I(sbi)->dcc_info;
1557                goto init_thread;
1558        }
1559
1560        dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
1561        if (!dcc)
1562                return -ENOMEM;
1563
1564        dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
1565        INIT_LIST_HEAD(&dcc->entry_list);
1566        for (i = 0; i < MAX_PLIST_NUM; i++) {
1567                INIT_LIST_HEAD(&dcc->pend_list[i]);
1568                if (i >= dcc->discard_granularity - 1)
1569                        dcc->pend_list_tag[i] |= P_ACTIVE;
1570        }
1571        INIT_LIST_HEAD(&dcc->wait_list);
1572        mutex_init(&dcc->cmd_lock);
1573        atomic_set(&dcc->issued_discard, 0);
1574        atomic_set(&dcc->issing_discard, 0);
1575        atomic_set(&dcc->discard_cmd_cnt, 0);
1576        dcc->nr_discards = 0;
1577        dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
1578        dcc->undiscard_blks = 0;
1579        dcc->root = RB_ROOT;
1580
1581        init_waitqueue_head(&dcc->discard_wait_queue);
1582        SM_I(sbi)->dcc_info = dcc;
1583init_thread:
1584        dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1585                                "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
1586        if (IS_ERR(dcc->f2fs_issue_discard)) {
1587                err = PTR_ERR(dcc->f2fs_issue_discard);
1588                kfree(dcc);
1589                SM_I(sbi)->dcc_info = NULL;
1590                return err;
1591        }
1592
1593        return err;
1594}
1595
1596static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
1597{
1598        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1599
1600        if (!dcc)
1601                return;
1602
1603        stop_discard_thread(sbi);
1604
1605        kfree(dcc);
1606        SM_I(sbi)->dcc_info = NULL;
1607}
1608
1609static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
1610{
1611        struct sit_info *sit_i = SIT_I(sbi);
1612
1613        if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
1614                sit_i->dirty_sentries++;
1615                return false;
1616        }
1617
1618        return true;
1619}
1620
1621static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
1622                                        unsigned int segno, int modified)
1623{
1624        struct seg_entry *se = get_seg_entry(sbi, segno);
1625        se->type = type;
1626        if (modified)
1627                __mark_sit_entry_dirty(sbi, segno);
1628}
1629
1630static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1631{
1632        struct seg_entry *se;
1633        unsigned int segno, offset;
1634        long int new_vblocks;
1635        bool exist;
1636#ifdef CONFIG_F2FS_CHECK_FS
1637        bool mir_exist;
1638#endif
1639
1640        segno = GET_SEGNO(sbi, blkaddr);
1641
1642        se = get_seg_entry(sbi, segno);
1643        new_vblocks = se->valid_blocks + del;
1644        offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1645
1646        f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
1647                                (new_vblocks > sbi->blocks_per_seg)));
1648
1649        se->valid_blocks = new_vblocks;
1650        se->mtime = get_mtime(sbi);
1651        SIT_I(sbi)->max_mtime = se->mtime;
1652
1653        /* Update valid block bitmap */
1654        if (del > 0) {
1655                exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
1656#ifdef CONFIG_F2FS_CHECK_FS
1657                mir_exist = f2fs_test_and_set_bit(offset,
1658                                                se->cur_valid_map_mir);
1659                if (unlikely(exist != mir_exist)) {
1660                        f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1661                                "when setting bitmap, blk:%u, old bit:%d",
1662                                blkaddr, exist);
1663                        f2fs_bug_on(sbi, 1);
1664                }
1665#endif
1666                if (unlikely(exist)) {
1667                        f2fs_msg(sbi->sb, KERN_ERR,
1668                                "Bitmap was wrongly set, blk:%u", blkaddr);
1669                        f2fs_bug_on(sbi, 1);
1670                        se->valid_blocks--;
1671                        del = 0;
1672                }
1673
1674                if (f2fs_discard_en(sbi) &&
1675                        !f2fs_test_and_set_bit(offset, se->discard_map))
1676                        sbi->discard_blks--;
1677
1678                /* don't overwrite by SSR to keep node chain */
1679                if (se->type == CURSEG_WARM_NODE) {
1680                        if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1681                                se->ckpt_valid_blocks++;
1682                }
1683        } else {
1684                exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
1685#ifdef CONFIG_F2FS_CHECK_FS
1686                mir_exist = f2fs_test_and_clear_bit(offset,
1687                                                se->cur_valid_map_mir);
1688                if (unlikely(exist != mir_exist)) {
1689                        f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1690                                "when clearing bitmap, blk:%u, old bit:%d",
1691                                blkaddr, exist);
1692                        f2fs_bug_on(sbi, 1);
1693                }
1694#endif
1695                if (unlikely(!exist)) {
1696                        f2fs_msg(sbi->sb, KERN_ERR,
1697                                "Bitmap was wrongly cleared, blk:%u", blkaddr);
1698                        f2fs_bug_on(sbi, 1);
1699                        se->valid_blocks++;
1700                        del = 0;
1701                }
1702
1703                if (f2fs_discard_en(sbi) &&
1704                        f2fs_test_and_clear_bit(offset, se->discard_map))
1705                        sbi->discard_blks++;
1706        }
1707        if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1708                se->ckpt_valid_blocks += del;
1709
1710        __mark_sit_entry_dirty(sbi, segno);
1711
1712        /* update total number of valid blocks to be written in ckpt area */
1713        SIT_I(sbi)->written_valid_blocks += del;
1714
1715        if (sbi->segs_per_sec > 1)
1716                get_sec_entry(sbi, segno)->valid_blocks += del;
1717}
1718
1719void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
1720{
1721        update_sit_entry(sbi, new, 1);
1722        if (GET_SEGNO(sbi, old) != NULL_SEGNO)
1723                update_sit_entry(sbi, old, -1);
1724
1725        locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
1726        locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
1727}
1728
1729void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
1730{
1731        unsigned int segno = GET_SEGNO(sbi, addr);
1732        struct sit_info *sit_i = SIT_I(sbi);
1733
1734        f2fs_bug_on(sbi, addr == NULL_ADDR);
1735        if (addr == NEW_ADDR)
1736                return;
1737
1738        /* add it into sit main buffer */
1739        mutex_lock(&sit_i->sentry_lock);
1740
1741        update_sit_entry(sbi, addr, -1);
1742
1743        /* add it into dirty seglist */
1744        locate_dirty_segment(sbi, segno);
1745
1746        mutex_unlock(&sit_i->sentry_lock);
1747}
1748
1749bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
1750{
1751        struct sit_info *sit_i = SIT_I(sbi);
1752        unsigned int segno, offset;
1753        struct seg_entry *se;
1754        bool is_cp = false;
1755
1756        if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
1757                return true;
1758
1759        mutex_lock(&sit_i->sentry_lock);
1760
1761        segno = GET_SEGNO(sbi, blkaddr);
1762        se = get_seg_entry(sbi, segno);
1763        offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1764
1765        if (f2fs_test_bit(offset, se->ckpt_valid_map))
1766                is_cp = true;
1767
1768        mutex_unlock(&sit_i->sentry_lock);
1769
1770        return is_cp;
1771}
1772
1773/*
1774 * This function should be resided under the curseg_mutex lock
1775 */
1776static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
1777                                        struct f2fs_summary *sum)
1778{
1779        struct curseg_info *curseg = CURSEG_I(sbi, type);
1780        void *addr = curseg->sum_blk;
1781        addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
1782        memcpy(addr, sum, sizeof(struct f2fs_summary));
1783}
1784
1785/*
1786 * Calculate the number of current summary pages for writing
1787 */
1788int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
1789{
1790        int valid_sum_count = 0;
1791        int i, sum_in_page;
1792
1793        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1794                if (sbi->ckpt->alloc_type[i] == SSR)
1795                        valid_sum_count += sbi->blocks_per_seg;
1796                else {
1797                        if (for_ra)
1798                                valid_sum_count += le16_to_cpu(
1799                                        F2FS_CKPT(sbi)->cur_data_blkoff[i]);
1800                        else
1801                                valid_sum_count += curseg_blkoff(sbi, i);
1802                }
1803        }
1804
1805        sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
1806                        SUM_FOOTER_SIZE) / SUMMARY_SIZE;
1807        if (valid_sum_count <= sum_in_page)
1808                return 1;
1809        else if ((valid_sum_count - sum_in_page) <=
1810                (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
1811                return 2;
1812        return 3;
1813}
1814
1815/*
1816 * Caller should put this summary page
1817 */
1818struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
1819{
1820        return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
1821}
1822
1823void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
1824{
1825        struct page *page = grab_meta_page(sbi, blk_addr);
1826        void *dst = page_address(page);
1827
1828        if (src)
1829                memcpy(dst, src, PAGE_SIZE);
1830        else
1831                memset(dst, 0, PAGE_SIZE);
1832        set_page_dirty(page);
1833        f2fs_put_page(page, 1);
1834}
1835
1836static void write_sum_page(struct f2fs_sb_info *sbi,
1837                        struct f2fs_summary_block *sum_blk, block_t blk_addr)
1838{
1839        update_meta_page(sbi, (void *)sum_blk, blk_addr);
1840}
1841
1842static void write_current_sum_page(struct f2fs_sb_info *sbi,
1843                                                int type, block_t blk_addr)
1844{
1845        struct curseg_info *curseg = CURSEG_I(sbi, type);
1846        struct page *page = grab_meta_page(sbi, blk_addr);
1847        struct f2fs_summary_block *src = curseg->sum_blk;
1848        struct f2fs_summary_block *dst;
1849
1850        dst = (struct f2fs_summary_block *)page_address(page);
1851
1852        mutex_lock(&curseg->curseg_mutex);
1853
1854        down_read(&curseg->journal_rwsem);
1855        memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
1856        up_read(&curseg->journal_rwsem);
1857
1858        memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
1859        memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
1860
1861        mutex_unlock(&curseg->curseg_mutex);
1862
1863        set_page_dirty(page);
1864        f2fs_put_page(page, 1);
1865}
1866
1867static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
1868{
1869        struct curseg_info *curseg = CURSEG_I(sbi, type);
1870        unsigned int segno = curseg->segno + 1;
1871        struct free_segmap_info *free_i = FREE_I(sbi);
1872
1873        if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
1874                return !test_bit(segno, free_i->free_segmap);
1875        return 0;
1876}
1877
1878/*
1879 * Find a new segment from the free segments bitmap to right order
1880 * This function should be returned with success, otherwise BUG
1881 */
1882static void get_new_segment(struct f2fs_sb_info *sbi,
1883                        unsigned int *newseg, bool new_sec, int dir)
1884{
1885        struct free_segmap_info *free_i = FREE_I(sbi);
1886        unsigned int segno, secno, zoneno;
1887        unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
1888        unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
1889        unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
1890        unsigned int left_start = hint;
1891        bool init = true;
1892        int go_left = 0;
1893        int i;
1894
1895        spin_lock(&free_i->segmap_lock);
1896
1897        if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
1898                segno = find_next_zero_bit(free_i->free_segmap,
1899                        GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
1900                if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
1901                        goto got_it;
1902        }
1903find_other_zone:
1904        secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
1905        if (secno >= MAIN_SECS(sbi)) {
1906                if (dir == ALLOC_RIGHT) {
1907                        secno = find_next_zero_bit(free_i->free_secmap,
1908                                                        MAIN_SECS(sbi), 0);
1909                        f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
1910                } else {
1911                        go_left = 1;
1912                        left_start = hint - 1;
1913                }
1914        }
1915        if (go_left == 0)
1916                goto skip_left;
1917
1918        while (test_bit(left_start, free_i->free_secmap)) {
1919                if (left_start > 0) {
1920                        left_start--;
1921                        continue;
1922                }
1923                left_start = find_next_zero_bit(free_i->free_secmap,
1924                                                        MAIN_SECS(sbi), 0);
1925                f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
1926                break;
1927        }
1928        secno = left_start;
1929skip_left:
1930        hint = secno;
1931        segno = GET_SEG_FROM_SEC(sbi, secno);
1932        zoneno = GET_ZONE_FROM_SEC(sbi, secno);
1933
1934        /* give up on finding another zone */
1935        if (!init)
1936                goto got_it;
1937        if (sbi->secs_per_zone == 1)
1938                goto got_it;
1939        if (zoneno == old_zoneno)
1940                goto got_it;
1941        if (dir == ALLOC_LEFT) {
1942                if (!go_left && zoneno + 1 >= total_zones)
1943                        goto got_it;
1944                if (go_left && zoneno == 0)
1945                        goto got_it;
1946        }
1947        for (i = 0; i < NR_CURSEG_TYPE; i++)
1948                if (CURSEG_I(sbi, i)->zone == zoneno)
1949                        break;
1950
1951        if (i < NR_CURSEG_TYPE) {
1952                /* zone is in user, try another */
1953                if (go_left)
1954                        hint = zoneno * sbi->secs_per_zone - 1;
1955                else if (zoneno + 1 >= total_zones)
1956                        hint = 0;
1957                else
1958                        hint = (zoneno + 1) * sbi->secs_per_zone;
1959                init = false;
1960                goto find_other_zone;
1961        }
1962got_it:
1963        /* set it as dirty segment in free segmap */
1964        f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
1965        __set_inuse(sbi, segno);
1966        *newseg = segno;
1967        spin_unlock(&free_i->segmap_lock);
1968}
1969
1970static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
1971{
1972        struct curseg_info *curseg = CURSEG_I(sbi, type);
1973        struct summary_footer *sum_footer;
1974
1975        curseg->segno = curseg->next_segno;
1976        curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
1977        curseg->next_blkoff = 0;
1978        curseg->next_segno = NULL_SEGNO;
1979
1980        sum_footer = &(curseg->sum_blk->footer);
1981        memset(sum_footer, 0, sizeof(struct summary_footer));
1982        if (IS_DATASEG(type))
1983                SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1984        if (IS_NODESEG(type))
1985                SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1986        __set_sit_entry_type(sbi, type, curseg->segno, modified);
1987}
1988
1989static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
1990{
1991        /* if segs_per_sec is large than 1, we need to keep original policy. */
1992        if (sbi->segs_per_sec != 1)
1993                return CURSEG_I(sbi, type)->segno;
1994
1995        if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
1996                return 0;
1997
1998        if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
1999                return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2000        return CURSEG_I(sbi, type)->segno;
2001}
2002
2003/*
2004 * Allocate a current working segment.
2005 * This function always allocates a free segment in LFS manner.
2006 */
2007static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2008{
2009        struct curseg_info *curseg = CURSEG_I(sbi, type);
2010        unsigned int segno = curseg->segno;
2011        int dir = ALLOC_LEFT;
2012
2013        write_sum_page(sbi, curseg->sum_blk,
2014                                GET_SUM_BLOCK(sbi, segno));
2015        if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
2016                dir = ALLOC_RIGHT;
2017
2018        if (test_opt(sbi, NOHEAP))
2019                dir = ALLOC_RIGHT;
2020
2021        segno = __get_next_segno(sbi, type);
2022        get_new_segment(sbi, &segno, new_sec, dir);
2023        curseg->next_segno = segno;
2024        reset_curseg(sbi, type, 1);
2025        curseg->alloc_type = LFS;
2026}
2027
2028static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2029                        struct curseg_info *seg, block_t start)
2030{
2031        struct seg_entry *se = get_seg_entry(sbi, seg->segno);
2032        int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2033        unsigned long *target_map = SIT_I(sbi)->tmp_map;
2034        unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2035        unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2036        int i, pos;
2037
2038        for (i = 0; i < entries; i++)
2039                target_map[i] = ckpt_map[i] | cur_map[i];
2040
2041        pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2042
2043        seg->next_blkoff = pos;
2044}
2045
2046/*
2047 * If a segment is written by LFS manner, next block offset is just obtained
2048 * by increasing the current block offset. However, if a segment is written by
2049 * SSR manner, next block offset obtained by calling __next_free_blkoff
2050 */
2051static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2052                                struct curseg_info *seg)
2053{
2054        if (seg->alloc_type == SSR)
2055                __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2056        else
2057                seg->next_blkoff++;
2058}
2059
2060/*
2061 * This function always allocates a used segment(from dirty seglist) by SSR
2062 * manner, so it should recover the existing segment information of valid blocks
2063 */
2064static void change_curseg(struct f2fs_sb_info *sbi, int type)
2065{
2066        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2067        struct curseg_info *curseg = CURSEG_I(sbi, type);
2068        unsigned int new_segno = curseg->next_segno;
2069        struct f2fs_summary_block *sum_node;
2070        struct page *sum_page;
2071
2072        write_sum_page(sbi, curseg->sum_blk,
2073                                GET_SUM_BLOCK(sbi, curseg->segno));
2074        __set_test_and_inuse(sbi, new_segno);
2075
2076        mutex_lock(&dirty_i->seglist_lock);
2077        __remove_dirty_segment(sbi, new_segno, PRE);
2078        __remove_dirty_segment(sbi, new_segno, DIRTY);
2079        mutex_unlock(&dirty_i->seglist_lock);
2080
2081        reset_curseg(sbi, type, 1);
2082        curseg->alloc_type = SSR;
2083        __next_free_blkoff(sbi, curseg, 0);
2084
2085        sum_page = get_sum_page(sbi, new_segno);
2086        sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2087        memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2088        f2fs_put_page(sum_page, 1);
2089}
2090
2091static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
2092{
2093        struct curseg_info *curseg = CURSEG_I(sbi, type);
2094        const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2095        unsigned segno = NULL_SEGNO;
2096        int i, cnt;
2097        bool reversed = false;
2098
2099        /* need_SSR() already forces to do this */
2100        if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
2101                curseg->next_segno = segno;
2102                return 1;
2103        }
2104
2105        /* For node segments, let's do SSR more intensively */
2106        if (IS_NODESEG(type)) {
2107                if (type >= CURSEG_WARM_NODE) {
2108                        reversed = true;
2109                        i = CURSEG_COLD_NODE;
2110                } else {
2111                        i = CURSEG_HOT_NODE;
2112                }
2113                cnt = NR_CURSEG_NODE_TYPE;
2114        } else {
2115                if (type >= CURSEG_WARM_DATA) {
2116                        reversed = true;
2117                        i = CURSEG_COLD_DATA;
2118                } else {
2119                        i = CURSEG_HOT_DATA;
2120                }
2121                cnt = NR_CURSEG_DATA_TYPE;
2122        }
2123
2124        for (; cnt-- > 0; reversed ? i-- : i++) {
2125                if (i == type)
2126                        continue;
2127                if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
2128                        curseg->next_segno = segno;
2129                        return 1;
2130                }
2131        }
2132        return 0;
2133}
2134
2135/*
2136 * flush out current segment and replace it with new segment
2137 * This function should be returned with success, otherwise BUG
2138 */
2139static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2140                                                int type, bool force)
2141{
2142        struct curseg_info *curseg = CURSEG_I(sbi, type);
2143
2144        if (force)
2145                new_curseg(sbi, type, true);
2146        else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2147                                        type == CURSEG_WARM_NODE)
2148                new_curseg(sbi, type, false);
2149        else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
2150                new_curseg(sbi, type, false);
2151        else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
2152                change_curseg(sbi, type);
2153        else
2154                new_curseg(sbi, type, false);
2155
2156        stat_inc_seg_type(sbi, curseg);
2157}
2158
2159void allocate_new_segments(struct f2fs_sb_info *sbi)
2160{
2161        struct curseg_info *curseg;
2162        unsigned int old_segno;
2163        int i;
2164
2165        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2166                curseg = CURSEG_I(sbi, i);
2167                old_segno = curseg->segno;
2168                SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
2169                locate_dirty_segment(sbi, old_segno);
2170        }
2171}
2172
2173static const struct segment_allocation default_salloc_ops = {
2174        .allocate_segment = allocate_segment_by_default,
2175};
2176
2177bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2178{
2179        __u64 trim_start = cpc->trim_start;
2180        bool has_candidate = false;
2181
2182        mutex_lock(&SIT_I(sbi)->sentry_lock);
2183        for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2184                if (add_discard_addrs(sbi, cpc, true)) {
2185                        has_candidate = true;
2186                        break;
2187                }
2188        }
2189        mutex_unlock(&SIT_I(sbi)->sentry_lock);
2190
2191        cpc->trim_start = trim_start;
2192        return has_candidate;
2193}
2194
2195int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2196{
2197        __u64 start = F2FS_BYTES_TO_BLK(range->start);
2198        __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
2199        unsigned int start_segno, end_segno;
2200        struct cp_control cpc;
2201        int err = 0;
2202
2203        if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
2204                return -EINVAL;
2205
2206        cpc.trimmed = 0;
2207        if (end <= MAIN_BLKADDR(sbi))
2208                goto out;
2209
2210        if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2211                f2fs_msg(sbi->sb, KERN_WARNING,
2212                        "Found FS corruption, run fsck to fix.");
2213                goto out;
2214        }
2215
2216        /* start/end segment number in main_area */
2217        start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
2218        end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
2219                                                GET_SEGNO(sbi, end);
2220        cpc.reason = CP_DISCARD;
2221        cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
2222
2223        /* do checkpoint to issue discard commands safely */
2224        for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
2225                cpc.trim_start = start_segno;
2226
2227                if (sbi->discard_blks == 0)
2228                        break;
2229                else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
2230                        cpc.trim_end = end_segno;
2231                else
2232                        cpc.trim_end = min_t(unsigned int,
2233                                rounddown(start_segno +
2234                                BATCHED_TRIM_SEGMENTS(sbi),
2235                                sbi->segs_per_sec) - 1, end_segno);
2236
2237                mutex_lock(&sbi->gc_mutex);
2238                err = write_checkpoint(sbi, &cpc);
2239                mutex_unlock(&sbi->gc_mutex);
2240                if (err)
2241                        break;
2242
2243                schedule();
2244        }
2245        /* It's time to issue all the filed discards */
2246        mark_discard_range_all(sbi);
2247        f2fs_wait_discard_bios(sbi, false);
2248out:
2249        range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
2250        return err;
2251}
2252
2253static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
2254{
2255        struct curseg_info *curseg = CURSEG_I(sbi, type);
2256        if (curseg->next_blkoff < sbi->blocks_per_seg)
2257                return true;
2258        return false;
2259}
2260
2261static int __get_segment_type_2(struct f2fs_io_info *fio)
2262{
2263        if (fio->type == DATA)
2264                return CURSEG_HOT_DATA;
2265        else
2266                return CURSEG_HOT_NODE;
2267}
2268
2269static int __get_segment_type_4(struct f2fs_io_info *fio)
2270{
2271        if (fio->type == DATA) {
2272                struct inode *inode = fio->page->mapping->host;
2273
2274                if (S_ISDIR(inode->i_mode))
2275                        return CURSEG_HOT_DATA;
2276                else
2277                        return CURSEG_COLD_DATA;
2278        } else {
2279                if (IS_DNODE(fio->page) && is_cold_node(fio->page))
2280                        return CURSEG_WARM_NODE;
2281                else
2282                        return CURSEG_COLD_NODE;
2283        }
2284}
2285
2286static int __get_segment_type_6(struct f2fs_io_info *fio)
2287{
2288        if (fio->type == DATA) {
2289                struct inode *inode = fio->page->mapping->host;
2290
2291                if (is_cold_data(fio->page) || file_is_cold(inode))
2292                        return CURSEG_COLD_DATA;
2293                if (is_inode_flag_set(inode, FI_HOT_DATA))
2294                        return CURSEG_HOT_DATA;
2295                return CURSEG_WARM_DATA;
2296        } else {
2297                if (IS_DNODE(fio->page))
2298                        return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
2299                                                CURSEG_HOT_NODE;
2300                return CURSEG_COLD_NODE;
2301        }
2302}
2303
2304static int __get_segment_type(struct f2fs_io_info *fio)
2305{
2306        int type = 0;
2307
2308        switch (fio->sbi->active_logs) {
2309        case 2:
2310                type = __get_segment_type_2(fio);
2311                break;
2312        case 4:
2313                type = __get_segment_type_4(fio);
2314                break;
2315        case 6:
2316                type = __get_segment_type_6(fio);
2317                break;
2318        default:
2319                f2fs_bug_on(fio->sbi, true);
2320        }
2321
2322        if (IS_HOT(type))
2323                fio->temp = HOT;
2324        else if (IS_WARM(type))
2325                fio->temp = WARM;
2326        else
2327                fio->temp = COLD;
2328        return type;
2329}
2330
2331void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
2332                block_t old_blkaddr, block_t *new_blkaddr,
2333                struct f2fs_summary *sum, int type,
2334                struct f2fs_io_info *fio, bool add_list)
2335{
2336        struct sit_info *sit_i = SIT_I(sbi);
2337        struct curseg_info *curseg = CURSEG_I(sbi, type);
2338
2339        mutex_lock(&curseg->curseg_mutex);
2340        mutex_lock(&sit_i->sentry_lock);
2341
2342        *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
2343
2344        f2fs_wait_discard_bio(sbi, *new_blkaddr);
2345
2346        /*
2347         * __add_sum_entry should be resided under the curseg_mutex
2348         * because, this function updates a summary entry in the
2349         * current summary block.
2350         */
2351        __add_sum_entry(sbi, type, sum);
2352
2353        __refresh_next_blkoff(sbi, curseg);
2354
2355        stat_inc_block_count(sbi, curseg);
2356
2357        if (!__has_curseg_space(sbi, type))
2358                sit_i->s_ops->allocate_segment(sbi, type, false);
2359        /*
2360         * SIT information should be updated after segment allocation,
2361         * since we need to keep dirty segments precisely under SSR.
2362         */
2363        refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
2364
2365        mutex_unlock(&sit_i->sentry_lock);
2366
2367        if (page && IS_NODESEG(type)) {
2368                fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
2369
2370                f2fs_inode_chksum_set(sbi, page);
2371        }
2372
2373        if (add_list) {
2374                struct f2fs_bio_info *io;
2375
2376                INIT_LIST_HEAD(&fio->list);
2377                fio->in_list = true;
2378                io = sbi->write_io[fio->type] + fio->temp;
2379                spin_lock(&io->io_lock);
2380                list_add_tail(&fio->list, &io->io_list);
2381                spin_unlock(&io->io_lock);
2382        }
2383
2384        mutex_unlock(&curseg->curseg_mutex);
2385}
2386
2387static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
2388{
2389        int type = __get_segment_type(fio);
2390        int err;
2391
2392reallocate:
2393        allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
2394                        &fio->new_blkaddr, sum, type, fio, true);
2395
2396        /* writeout dirty page into bdev */
2397        err = f2fs_submit_page_write(fio);
2398        if (err == -EAGAIN) {
2399                fio->old_blkaddr = fio->new_blkaddr;
2400                goto reallocate;
2401        }
2402}
2403
2404void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
2405                                        enum iostat_type io_type)
2406{
2407        struct f2fs_io_info fio = {
2408                .sbi = sbi,
2409                .type = META,
2410                .op = REQ_OP_WRITE,
2411                .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
2412                .old_blkaddr = page->index,
2413                .new_blkaddr = page->index,
2414                .page = page,
2415                .encrypted_page = NULL,
2416                .in_list = false,
2417        };
2418
2419        if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
2420                fio.op_flags &= ~REQ_META;
2421
2422        set_page_writeback(page);
2423        f2fs_submit_page_write(&fio);
2424
2425        f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
2426}
2427
2428void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
2429{
2430        struct f2fs_summary sum;
2431
2432        set_summary(&sum, nid, 0, 0);
2433        do_write_page(&sum, fio);
2434
2435        f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
2436}
2437
2438void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
2439{
2440        struct f2fs_sb_info *sbi = fio->sbi;
2441        struct f2fs_summary sum;
2442        struct node_info ni;
2443
2444        f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
2445        get_node_info(sbi, dn->nid, &ni);
2446        set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
2447        do_write_page(&sum, fio);
2448        f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
2449
2450        f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
2451}
2452
2453int rewrite_data_page(struct f2fs_io_info *fio)
2454{
2455        int err;
2456
2457        fio->new_blkaddr = fio->old_blkaddr;
2458        stat_inc_inplace_blocks(fio->sbi);
2459
2460        err = f2fs_submit_page_bio(fio);
2461
2462        f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
2463
2464        return err;
2465}
2466
2467void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
2468                                block_t old_blkaddr, block_t new_blkaddr,
2469                                bool recover_curseg, bool recover_newaddr)
2470{
2471        struct sit_info *sit_i = SIT_I(sbi);
2472        struct curseg_info *curseg;
2473        unsigned int segno, old_cursegno;
2474        struct seg_entry *se;
2475        int type;
2476        unsigned short old_blkoff;
2477
2478        segno = GET_SEGNO(sbi, new_blkaddr);
2479        se = get_seg_entry(sbi, segno);
2480        type = se->type;
2481
2482        if (!recover_curseg) {
2483                /* for recovery flow */
2484                if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
2485                        if (old_blkaddr == NULL_ADDR)
2486                                type = CURSEG_COLD_DATA;
2487                        else
2488                                type = CURSEG_WARM_DATA;
2489                }
2490        } else {
2491                if (!IS_CURSEG(sbi, segno))
2492                        type = CURSEG_WARM_DATA;
2493        }
2494
2495        curseg = CURSEG_I(sbi, type);
2496
2497        mutex_lock(&curseg->curseg_mutex);
2498        mutex_lock(&sit_i->sentry_lock);
2499
2500        old_cursegno = curseg->segno;
2501        old_blkoff = curseg->next_blkoff;
2502
2503        /* change the current segment */
2504        if (segno != curseg->segno) {
2505                curseg->next_segno = segno;
2506                change_curseg(sbi, type);
2507        }
2508
2509        curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
2510        __add_sum_entry(sbi, type, sum);
2511
2512        if (!recover_curseg || recover_newaddr)
2513                update_sit_entry(sbi, new_blkaddr, 1);
2514        if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
2515                update_sit_entry(sbi, old_blkaddr, -1);
2516
2517        locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
2518        locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
2519
2520        locate_dirty_segment(sbi, old_cursegno);
2521
2522        if (recover_curseg) {
2523                if (old_cursegno != curseg->segno) {
2524                        curseg->next_segno = old_cursegno;
2525                        change_curseg(sbi, type);
2526                }
2527                curseg->next_blkoff = old_blkoff;
2528        }
2529
2530        mutex_unlock(&sit_i->sentry_lock);
2531        mutex_unlock(&curseg->curseg_mutex);
2532}
2533
2534void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
2535                                block_t old_addr, block_t new_addr,
2536                                unsigned char version, bool recover_curseg,
2537                                bool recover_newaddr)
2538{
2539        struct f2fs_summary sum;
2540
2541        set_summary(&sum, dn->nid, dn->ofs_in_node, version);
2542
2543        __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
2544                                        recover_curseg, recover_newaddr);
2545
2546        f2fs_update_data_blkaddr(dn, new_addr);
2547}
2548
2549void f2fs_wait_on_page_writeback(struct page *page,
2550                                enum page_type type, bool ordered)
2551{
2552        if (PageWriteback(page)) {
2553                struct f2fs_sb_info *sbi = F2FS_P_SB(page);
2554
2555                f2fs_submit_merged_write_cond(sbi, page->mapping->host,
2556                                                0, page->index, type);
2557                if (ordered)
2558                        wait_on_page_writeback(page);
2559                else
2560                        wait_for_stable_page(page);
2561        }
2562}
2563
2564void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
2565{
2566        struct page *cpage;
2567
2568        if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
2569                return;
2570
2571        cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
2572        if (cpage) {
2573                f2fs_wait_on_page_writeback(cpage, DATA, true);
2574                f2fs_put_page(cpage, 1);
2575        }
2576}
2577
2578static int read_compacted_summaries(struct f2fs_sb_info *sbi)
2579{
2580        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2581        struct curseg_info *seg_i;
2582        unsigned char *kaddr;
2583        struct page *page;
2584        block_t start;
2585        int i, j, offset;
2586
2587        start = start_sum_block(sbi);
2588
2589        page = get_meta_page(sbi, start++);
2590        kaddr = (unsigned char *)page_address(page);
2591
2592        /* Step 1: restore nat cache */
2593        seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
2594        memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
2595
2596        /* Step 2: restore sit cache */
2597        seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
2598        memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
2599        offset = 2 * SUM_JOURNAL_SIZE;
2600
2601        /* Step 3: restore summary entries */
2602        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2603                unsigned short blk_off;
2604                unsigned int segno;
2605
2606                seg_i = CURSEG_I(sbi, i);
2607                segno = le32_to_cpu(ckpt->cur_data_segno[i]);
2608                blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
2609                seg_i->next_segno = segno;
2610                reset_curseg(sbi, i, 0);
2611                seg_i->alloc_type = ckpt->alloc_type[i];
2612                seg_i->next_blkoff = blk_off;
2613
2614                if (seg_i->alloc_type == SSR)
2615                        blk_off = sbi->blocks_per_seg;
2616
2617                for (j = 0; j < blk_off; j++) {
2618                        struct f2fs_summary *s;
2619                        s = (struct f2fs_summary *)(kaddr + offset);
2620                        seg_i->sum_blk->entries[j] = *s;
2621                        offset += SUMMARY_SIZE;
2622                        if (offset + SUMMARY_SIZE <= PAGE_SIZE -
2623                                                SUM_FOOTER_SIZE)
2624                                continue;
2625
2626                        f2fs_put_page(page, 1);
2627                        page = NULL;
2628
2629                        page = get_meta_page(sbi, start++);
2630                        kaddr = (unsigned char *)page_address(page);
2631                        offset = 0;
2632                }
2633        }
2634        f2fs_put_page(page, 1);
2635        return 0;
2636}
2637
2638static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
2639{
2640        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2641        struct f2fs_summary_block *sum;
2642        struct curseg_info *curseg;
2643        struct page *new;
2644        unsigned short blk_off;
2645        unsigned int segno = 0;
2646        block_t blk_addr = 0;
2647
2648        /* get segment number and block addr */
2649        if (IS_DATASEG(type)) {
2650                segno = le32_to_cpu(ckpt->cur_data_segno[type]);
2651                blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
2652                                                        CURSEG_HOT_DATA]);
2653                if (__exist_node_summaries(sbi))
2654                        blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
2655                else
2656                        blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
2657        } else {
2658                segno = le32_to_cpu(ckpt->cur_node_segno[type -
2659                                                        CURSEG_HOT_NODE]);
2660                blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
2661                                                        CURSEG_HOT_NODE]);
2662                if (__exist_node_summaries(sbi))
2663                        blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
2664                                                        type - CURSEG_HOT_NODE);
2665                else
2666                        blk_addr = GET_SUM_BLOCK(sbi, segno);
2667        }
2668
2669        new = get_meta_page(sbi, blk_addr);
2670        sum = (struct f2fs_summary_block *)page_address(new);
2671
2672        if (IS_NODESEG(type)) {
2673                if (__exist_node_summaries(sbi)) {
2674                        struct f2fs_summary *ns = &sum->entries[0];
2675                        int i;
2676                        for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
2677                                ns->version = 0;
2678                                ns->ofs_in_node = 0;
2679                        }
2680                } else {
2681                        int err;
2682
2683                        err = restore_node_summary(sbi, segno, sum);
2684                        if (err) {
2685                                f2fs_put_page(new, 1);
2686                                return err;
2687                        }
2688                }
2689        }
2690
2691        /* set uncompleted segment to curseg */
2692        curseg = CURSEG_I(sbi, type);
2693        mutex_lock(&curseg->curseg_mutex);
2694
2695        /* update journal info */
2696        down_write(&curseg->journal_rwsem);
2697        memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
2698        up_write(&curseg->journal_rwsem);
2699
2700        memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
2701        memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
2702        curseg->next_segno = segno;
2703        reset_curseg(sbi, type, 0);
2704        curseg->alloc_type = ckpt->alloc_type[type];
2705        curseg->next_blkoff = blk_off;
2706        mutex_unlock(&curseg->curseg_mutex);
2707        f2fs_put_page(new, 1);
2708        return 0;
2709}
2710
2711static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
2712{
2713        struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
2714        struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
2715        int type = CURSEG_HOT_DATA;
2716        int err;
2717
2718        if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
2719                int npages = npages_for_summary_flush(sbi, true);
2720
2721                if (npages >= 2)
2722                        ra_meta_pages(sbi, start_sum_block(sbi), npages,
2723                                                        META_CP, true);
2724
2725                /* restore for compacted data summary */
2726                if (read_compacted_summaries(sbi))
2727                        return -EINVAL;
2728                type = CURSEG_HOT_NODE;
2729        }
2730
2731        if (__exist_node_summaries(sbi))
2732                ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
2733                                        NR_CURSEG_TYPE - type, META_CP, true);
2734
2735        for (; type <= CURSEG_COLD_NODE; type++) {
2736                err = read_normal_summaries(sbi, type);
2737                if (err)
2738                        return err;
2739        }
2740
2741        /* sanity check for summary blocks */
2742        if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
2743                        sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
2744                return -EINVAL;
2745
2746        return 0;
2747}
2748
2749static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
2750{
2751        struct page *page;
2752        unsigned char *kaddr;
2753        struct f2fs_summary *summary;
2754        struct curseg_info *seg_i;
2755        int written_size = 0;
2756        int i, j;
2757
2758        page = grab_meta_page(sbi, blkaddr++);
2759        kaddr = (unsigned char *)page_address(page);
2760
2761        /* Step 1: write nat cache */
2762        seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
2763        memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
2764        written_size += SUM_JOURNAL_SIZE;
2765
2766        /* Step 2: write sit cache */
2767        seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
2768        memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
2769        written_size += SUM_JOURNAL_SIZE;
2770
2771        /* Step 3: write summary entries */
2772        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2773                unsigned short blkoff;
2774                seg_i = CURSEG_I(sbi, i);
2775                if (sbi->ckpt->alloc_type[i] == SSR)
2776                        blkoff = sbi->blocks_per_seg;
2777                else
2778                        blkoff = curseg_blkoff(sbi, i);
2779
2780                for (j = 0; j < blkoff; j++) {
2781                        if (!page) {
2782                                page = grab_meta_page(sbi, blkaddr++);
2783                                kaddr = (unsigned char *)page_address(page);
2784                                written_size = 0;
2785                        }
2786                        summary = (struct f2fs_summary *)(kaddr + written_size);
2787                        *summary = seg_i->sum_blk->entries[j];
2788                        written_size += SUMMARY_SIZE;
2789
2790                        if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
2791                                                        SUM_FOOTER_SIZE)
2792                                continue;
2793
2794                        set_page_dirty(page);
2795                        f2fs_put_page(page, 1);
2796                        page = NULL;
2797                }
2798        }
2799        if (page) {
2800                set_page_dirty(page);
2801                f2fs_put_page(page, 1);
2802        }
2803}
2804
2805static void write_normal_summaries(struct f2fs_sb_info *sbi,
2806                                        block_t blkaddr, int type)
2807{
2808        int i, end;
2809        if (IS_DATASEG(type))
2810                end = type + NR_CURSEG_DATA_TYPE;
2811        else
2812                end = type + NR_CURSEG_NODE_TYPE;
2813
2814        for (i = type; i < end; i++)
2815                write_current_sum_page(sbi, i, blkaddr + (i - type));
2816}
2817
2818void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2819{
2820        if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
2821                write_compacted_summaries(sbi, start_blk);
2822        else
2823                write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
2824}
2825
2826void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2827{
2828        write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
2829}
2830
2831int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
2832                                        unsigned int val, int alloc)
2833{
2834        int i;
2835
2836        if (type == NAT_JOURNAL) {
2837                for (i = 0; i < nats_in_cursum(journal); i++) {
2838                        if (le32_to_cpu(nid_in_journal(journal, i)) == val)
2839                                return i;
2840                }
2841                if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
2842                        return update_nats_in_cursum(journal, 1);
2843        } else if (type == SIT_JOURNAL) {
2844                for (i = 0; i < sits_in_cursum(journal); i++)
2845                        if (le32_to_cpu(segno_in_journal(journal, i)) == val)
2846                                return i;
2847                if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
2848                        return update_sits_in_cursum(journal, 1);
2849        }
2850        return -1;
2851}
2852
2853static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
2854                                        unsigned int segno)
2855{
2856        return get_meta_page(sbi, current_sit_addr(sbi, segno));
2857}
2858
2859static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
2860                                        unsigned int start)
2861{
2862        struct sit_info *sit_i = SIT_I(sbi);
2863        struct page *src_page, *dst_page;
2864        pgoff_t src_off, dst_off;
2865        void *src_addr, *dst_addr;
2866
2867        src_off = current_sit_addr(sbi, start);
2868        dst_off = next_sit_addr(sbi, src_off);
2869
2870        /* get current sit block page without lock */
2871        src_page = get_meta_page(sbi, src_off);
2872        dst_page = grab_meta_page(sbi, dst_off);
2873        f2fs_bug_on(sbi, PageDirty(src_page));
2874
2875        src_addr = page_address(src_page);
2876        dst_addr = page_address(dst_page);
2877        memcpy(dst_addr, src_addr, PAGE_SIZE);
2878
2879        set_page_dirty(dst_page);
2880        f2fs_put_page(src_page, 1);
2881
2882        set_to_next_sit(sit_i, start);
2883
2884        return dst_page;
2885}
2886
2887static struct sit_entry_set *grab_sit_entry_set(void)
2888{
2889        struct sit_entry_set *ses =
2890                        f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
2891
2892        ses->entry_cnt = 0;
2893        INIT_LIST_HEAD(&ses->set_list);
2894        return ses;
2895}
2896
2897static void release_sit_entry_set(struct sit_entry_set *ses)
2898{
2899        list_del(&ses->set_list);
2900        kmem_cache_free(sit_entry_set_slab, ses);
2901}
2902
2903static void adjust_sit_entry_set(struct sit_entry_set *ses,
2904                                                struct list_head *head)
2905{
2906        struct sit_entry_set *next = ses;
2907
2908        if (list_is_last(&ses->set_list, head))
2909                return;
2910
2911        list_for_each_entry_continue(next, head, set_list)
2912                if (ses->entry_cnt <= next->entry_cnt)
2913                        break;
2914
2915        list_move_tail(&ses->set_list, &next->set_list);
2916}
2917
2918static void add_sit_entry(unsigned int segno, struct list_head *head)
2919{
2920        struct sit_entry_set *ses;
2921        unsigned int start_segno = START_SEGNO(segno);
2922
2923        list_for_each_entry(ses, head, set_list) {
2924                if (ses->start_segno == start_segno) {
2925                        ses->entry_cnt++;
2926                        adjust_sit_entry_set(ses, head);
2927                        return;
2928                }
2929        }
2930
2931        ses = grab_sit_entry_set();
2932
2933        ses->start_segno = start_segno;
2934        ses->entry_cnt++;
2935        list_add(&ses->set_list, head);
2936}
2937
2938static void add_sits_in_set(struct f2fs_sb_info *sbi)
2939{
2940        struct f2fs_sm_info *sm_info = SM_I(sbi);
2941        struct list_head *set_list = &sm_info->sit_entry_set;
2942        unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
2943        unsigned int segno;
2944
2945        for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
2946                add_sit_entry(segno, set_list);
2947}
2948
2949static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
2950{
2951        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2952        struct f2fs_journal *journal = curseg->journal;
2953        int i;
2954
2955        down_write(&curseg->journal_rwsem);
2956        for (i = 0; i < sits_in_cursum(journal); i++) {
2957                unsigned int segno;
2958                bool dirtied;
2959
2960                segno = le32_to_cpu(segno_in_journal(journal, i));
2961                dirtied = __mark_sit_entry_dirty(sbi, segno);
2962
2963                if (!dirtied)
2964                        add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
2965        }
2966        update_sits_in_cursum(journal, -i);
2967        up_write(&curseg->journal_rwsem);
2968}
2969
2970/*
2971 * CP calls this function, which flushes SIT entries including sit_journal,
2972 * and moves prefree segs to free segs.
2973 */
2974void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2975{
2976        struct sit_info *sit_i = SIT_I(sbi);
2977        unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
2978        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2979        struct f2fs_journal *journal = curseg->journal;
2980        struct sit_entry_set *ses, *tmp;
2981        struct list_head *head = &SM_I(sbi)->sit_entry_set;
2982        bool to_journal = true;
2983        struct seg_entry *se;
2984
2985        mutex_lock(&sit_i->sentry_lock);
2986
2987        if (!sit_i->dirty_sentries)
2988                goto out;
2989
2990        /*
2991         * add and account sit entries of dirty bitmap in sit entry
2992         * set temporarily
2993         */
2994        add_sits_in_set(sbi);
2995
2996        /*
2997         * if there are no enough space in journal to store dirty sit
2998         * entries, remove all entries from journal and add and account
2999         * them in sit entry set.
3000         */
3001        if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
3002                remove_sits_in_journal(sbi);
3003
3004        /*
3005         * there are two steps to flush sit entries:
3006         * #1, flush sit entries to journal in current cold data summary block.
3007         * #2, flush sit entries to sit page.
3008         */
3009        list_for_each_entry_safe(ses, tmp, head, set_list) {
3010                struct page *page = NULL;
3011                struct f2fs_sit_block *raw_sit = NULL;
3012                unsigned int start_segno = ses->start_segno;
3013                unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
3014                                                (unsigned long)MAIN_SEGS(sbi));
3015                unsigned int segno = start_segno;
3016
3017                if (to_journal &&
3018                        !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
3019                        to_journal = false;
3020
3021                if (to_journal) {
3022                        down_write(&curseg->journal_rwsem);
3023                } else {
3024                        page = get_next_sit_page(sbi, start_segno);
3025                        raw_sit = page_address(page);
3026                }
3027
3028                /* flush dirty sit entries in region of current sit set */
3029                for_each_set_bit_from(segno, bitmap, end) {
3030                        int offset, sit_offset;
3031
3032                        se = get_seg_entry(sbi, segno);
3033
3034                        /* add discard candidates */
3035                        if (!(cpc->reason & CP_DISCARD)) {
3036                                cpc->trim_start = segno;
3037                                add_discard_addrs(sbi, cpc, false);
3038                        }
3039
3040                        if (to_journal) {
3041                                offset = lookup_journal_in_cursum(journal,
3042                                                        SIT_JOURNAL, segno, 1);
3043                                f2fs_bug_on(sbi, offset < 0);
3044                                segno_in_journal(journal, offset) =
3045                                                        cpu_to_le32(segno);
3046                                seg_info_to_raw_sit(se,
3047                                        &sit_in_journal(journal, offset));
3048                        } else {
3049                                sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
3050                                seg_info_to_raw_sit(se,
3051                                                &raw_sit->entries[sit_offset]);
3052                        }
3053
3054                        __clear_bit(segno, bitmap);
3055                        sit_i->dirty_sentries--;
3056                        ses->entry_cnt--;
3057                }
3058
3059                if (to_journal)
3060                        up_write(&curseg->journal_rwsem);
3061                else
3062                        f2fs_put_page(page, 1);
3063
3064                f2fs_bug_on(sbi, ses->entry_cnt);
3065                release_sit_entry_set(ses);
3066        }
3067
3068        f2fs_bug_on(sbi, !list_empty(head));
3069        f2fs_bug_on(sbi, sit_i->dirty_sentries);
3070out:
3071        if (cpc->reason & CP_DISCARD) {
3072                __u64 trim_start = cpc->trim_start;
3073
3074                for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
3075                        add_discard_addrs(sbi, cpc, false);
3076
3077                cpc->trim_start = trim_start;
3078        }
3079        mutex_unlock(&sit_i->sentry_lock);
3080
3081        set_prefree_as_free_segments(sbi);
3082}
3083
3084static int build_sit_info(struct f2fs_sb_info *sbi)
3085{
3086        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3087        struct sit_info *sit_i;
3088        unsigned int sit_segs, start;
3089        char *src_bitmap;
3090        unsigned int bitmap_size;
3091
3092        /* allocate memory for SIT information */
3093        sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
3094        if (!sit_i)
3095                return -ENOMEM;
3096
3097        SM_I(sbi)->sit_info = sit_i;
3098
3099        sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) *
3100                                        sizeof(struct seg_entry), GFP_KERNEL);
3101        if (!sit_i->sentries)
3102                return -ENOMEM;
3103
3104        bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3105        sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL);
3106        if (!sit_i->dirty_sentries_bitmap)
3107                return -ENOMEM;
3108
3109        for (start = 0; start < MAIN_SEGS(sbi); start++) {
3110                sit_i->sentries[start].cur_valid_map
3111                        = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3112                sit_i->sentries[start].ckpt_valid_map
3113                        = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3114                if (!sit_i->sentries[start].cur_valid_map ||
3115                                !sit_i->sentries[start].ckpt_valid_map)
3116                        return -ENOMEM;
3117
3118#ifdef CONFIG_F2FS_CHECK_FS
3119                sit_i->sentries[start].cur_valid_map_mir
3120                        = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3121                if (!sit_i->sentries[start].cur_valid_map_mir)
3122                        return -ENOMEM;
3123#endif
3124
3125                if (f2fs_discard_en(sbi)) {
3126                        sit_i->sentries[start].discard_map
3127                                = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3128                        if (!sit_i->sentries[start].discard_map)
3129                                return -ENOMEM;
3130                }
3131        }
3132
3133        sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3134        if (!sit_i->tmp_map)
3135                return -ENOMEM;
3136
3137        if (sbi->segs_per_sec > 1) {
3138                sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) *
3139                                        sizeof(struct sec_entry), GFP_KERNEL);
3140                if (!sit_i->sec_entries)
3141                        return -ENOMEM;
3142        }
3143
3144        /* get information related with SIT */
3145        sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
3146
3147        /* setup SIT bitmap from ckeckpoint pack */
3148        bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
3149        src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
3150
3151        sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3152        if (!sit_i->sit_bitmap)
3153                return -ENOMEM;
3154
3155#ifdef CONFIG_F2FS_CHECK_FS
3156        sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3157        if (!sit_i->sit_bitmap_mir)
3158                return -ENOMEM;
3159#endif
3160
3161        /* init SIT information */
3162        sit_i->s_ops = &default_salloc_ops;
3163
3164        sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
3165        sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
3166        sit_i->written_valid_blocks = 0;
3167        sit_i->bitmap_size = bitmap_size;
3168        sit_i->dirty_sentries = 0;
3169        sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
3170        sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
3171        sit_i->mounted_time = ktime_get_real_seconds();
3172        mutex_init(&sit_i->sentry_lock);
3173        return 0;
3174}
3175
3176static int build_free_segmap(struct f2fs_sb_info *sbi)
3177{
3178        struct free_segmap_info *free_i;
3179        unsigned int bitmap_size, sec_bitmap_size;
3180
3181        /* allocate memory for free segmap information */
3182        free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
3183        if (!free_i)
3184                return -ENOMEM;
3185
3186        SM_I(sbi)->free_info = free_i;
3187
3188        bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3189        free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL);
3190        if (!free_i->free_segmap)
3191                return -ENOMEM;
3192
3193        sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
3194        free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL);
3195        if (!free_i->free_secmap)
3196                return -ENOMEM;
3197
3198        /* set all segments as dirty temporarily */
3199        memset(free_i->free_segmap, 0xff, bitmap_size);
3200        memset(free_i->free_secmap, 0xff, sec_bitmap_size);
3201
3202        /* init free segmap information */
3203        free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
3204        free_i->free_segments = 0;
3205        free_i->free_sections = 0;
3206        spin_lock_init(&free_i->segmap_lock);
3207        return 0;
3208}
3209
3210static int build_curseg(struct f2fs_sb_info *sbi)
3211{
3212        struct curseg_info *array;
3213        int i;
3214
3215        array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
3216        if (!array)
3217                return -ENOMEM;
3218
3219        SM_I(sbi)->curseg_array = array;
3220
3221        for (i = 0; i < NR_CURSEG_TYPE; i++) {
3222                mutex_init(&array[i].curseg_mutex);
3223                array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
3224                if (!array[i].sum_blk)
3225                        return -ENOMEM;
3226                init_rwsem(&array[i].journal_rwsem);
3227                array[i].journal = kzalloc(sizeof(struct f2fs_journal),
3228                                                        GFP_KERNEL);
3229                if (!array[i].journal)
3230                        return -ENOMEM;
3231                array[i].segno = NULL_SEGNO;
3232                array[i].next_blkoff = 0;
3233        }
3234        return restore_curseg_summaries(sbi);
3235}
3236
3237static void build_sit_entries(struct f2fs_sb_info *sbi)
3238{
3239        struct sit_info *sit_i = SIT_I(sbi);
3240        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3241        struct f2fs_journal *journal = curseg->journal;
3242        struct seg_entry *se;
3243        struct f2fs_sit_entry sit;
3244        int sit_blk_cnt = SIT_BLK_CNT(sbi);
3245        unsigned int i, start, end;
3246        unsigned int readed, start_blk = 0;
3247
3248        do {
3249                readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
3250                                                        META_SIT, true);
3251
3252                start = start_blk * sit_i->sents_per_block;
3253                end = (start_blk + readed) * sit_i->sents_per_block;
3254
3255                for (; start < end && start < MAIN_SEGS(sbi); start++) {
3256                        struct f2fs_sit_block *sit_blk;
3257                        struct page *page;
3258
3259                        se = &sit_i->sentries[start];
3260                        page = get_current_sit_page(sbi, start);
3261                        sit_blk = (struct f2fs_sit_block *)page_address(page);
3262                        sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
3263                        f2fs_put_page(page, 1);
3264
3265                        check_block_count(sbi, start, &sit);
3266                        seg_info_from_raw_sit(se, &sit);
3267
3268                        /* build discard map only one time */
3269                        if (f2fs_discard_en(sbi)) {
3270                                if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3271                                        memset(se->discard_map, 0xff,
3272                                                SIT_VBLOCK_MAP_SIZE);
3273                                } else {
3274                                        memcpy(se->discard_map,
3275                                                se->cur_valid_map,
3276                                                SIT_VBLOCK_MAP_SIZE);
3277                                        sbi->discard_blks +=
3278                                                sbi->blocks_per_seg -
3279                                                se->valid_blocks;
3280                                }
3281                        }
3282
3283                        if (sbi->segs_per_sec > 1)
3284                                get_sec_entry(sbi, start)->valid_blocks +=
3285                                                        se->valid_blocks;
3286                }
3287                start_blk += readed;
3288        } while (start_blk < sit_blk_cnt);
3289
3290        down_read(&curseg->journal_rwsem);
3291        for (i = 0; i < sits_in_cursum(journal); i++) {
3292                unsigned int old_valid_blocks;
3293
3294                start = le32_to_cpu(segno_in_journal(journal, i));
3295                se = &sit_i->sentries[start];
3296                sit = sit_in_journal(journal, i);
3297
3298                old_valid_blocks = se->valid_blocks;
3299
3300                check_block_count(sbi, start, &sit);
3301                seg_info_from_raw_sit(se, &sit);
3302
3303                if (f2fs_discard_en(sbi)) {
3304                        if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3305                                memset(se->discard_map, 0xff,
3306                                                        SIT_VBLOCK_MAP_SIZE);
3307                        } else {
3308                                memcpy(se->discard_map, se->cur_valid_map,
3309                                                        SIT_VBLOCK_MAP_SIZE);
3310                                sbi->discard_blks += old_valid_blocks -
3311                                                        se->valid_blocks;
3312                        }
3313                }
3314
3315                if (sbi->segs_per_sec > 1)
3316                        get_sec_entry(sbi, start)->valid_blocks +=
3317                                se->valid_blocks - old_valid_blocks;
3318        }
3319        up_read(&curseg->journal_rwsem);
3320}
3321
3322static void init_free_segmap(struct f2fs_sb_info *sbi)
3323{
3324        unsigned int start;
3325        int type;
3326
3327        for (start = 0; start < MAIN_SEGS(sbi); start++) {
3328                struct seg_entry *sentry = get_seg_entry(sbi, start);
3329                if (!sentry->valid_blocks)
3330                        __set_free(sbi, start);
3331                else
3332                        SIT_I(sbi)->written_valid_blocks +=
3333                                                sentry->valid_blocks;
3334        }
3335
3336        /* set use the current segments */
3337        for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
3338                struct curseg_info *curseg_t = CURSEG_I(sbi, type);
3339                __set_test_and_inuse(sbi, curseg_t->segno);
3340        }
3341}
3342
3343static void init_dirty_segmap(struct f2fs_sb_info *sbi)
3344{
3345        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3346        struct free_segmap_info *free_i = FREE_I(sbi);
3347        unsigned int segno = 0, offset = 0;
3348        unsigned short valid_blocks;
3349
3350        while (1) {
3351                /* find dirty segment based on free segmap */
3352                segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
3353                if (segno >= MAIN_SEGS(sbi))
3354                        break;
3355                offset = segno + 1;
3356                valid_blocks = get_valid_blocks(sbi, segno, false);
3357                if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
3358                        continue;
3359                if (valid_blocks > sbi->blocks_per_seg) {
3360                        f2fs_bug_on(sbi, 1);
3361                        continue;
3362                }
3363                mutex_lock(&dirty_i->seglist_lock);
3364                __locate_dirty_segment(sbi, segno, DIRTY);
3365                mutex_unlock(&dirty_i->seglist_lock);
3366        }
3367}
3368
3369static int init_victim_secmap(struct f2fs_sb_info *sbi)
3370{
3371        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3372        unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
3373
3374        dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL);
3375        if (!dirty_i->victim_secmap)
3376                return -ENOMEM;
3377        return 0;
3378}
3379
3380static int build_dirty_segmap(struct f2fs_sb_info *sbi)
3381{
3382        struct dirty_seglist_info *dirty_i;
3383        unsigned int bitmap_size, i;
3384
3385        /* allocate memory for dirty segments list information */
3386        dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
3387        if (!dirty_i)
3388                return -ENOMEM;
3389
3390        SM_I(sbi)->dirty_info = dirty_i;
3391        mutex_init(&dirty_i->seglist_lock);
3392
3393        bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3394
3395        for (i = 0; i < NR_DIRTY_TYPE; i++) {
3396                dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL);
3397                if (!dirty_i->dirty_segmap[i])
3398                        return -ENOMEM;
3399        }
3400
3401        init_dirty_segmap(sbi);
3402        return init_victim_secmap(sbi);
3403}
3404
3405/*
3406 * Update min, max modified time for cost-benefit GC algorithm
3407 */
3408static void init_min_max_mtime(struct f2fs_sb_info *sbi)
3409{
3410        struct sit_info *sit_i = SIT_I(sbi);
3411        unsigned int segno;
3412
3413        mutex_lock(&sit_i->sentry_lock);
3414
3415        sit_i->min_mtime = LLONG_MAX;
3416
3417        for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
3418                unsigned int i;
3419                unsigned long long mtime = 0;
3420
3421                for (i = 0; i < sbi->segs_per_sec; i++)
3422                        mtime += get_seg_entry(sbi, segno + i)->mtime;
3423
3424                mtime = div_u64(mtime, sbi->segs_per_sec);
3425
3426                if (sit_i->min_mtime > mtime)
3427                        sit_i->min_mtime = mtime;
3428        }
3429        sit_i->max_mtime = get_mtime(sbi);
3430        mutex_unlock(&sit_i->sentry_lock);
3431}
3432
3433int build_segment_manager(struct f2fs_sb_info *sbi)
3434{
3435        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3436        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3437        struct f2fs_sm_info *sm_info;
3438        int err;
3439
3440        sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
3441        if (!sm_info)
3442                return -ENOMEM;
3443
3444        /* init sm info */
3445        sbi->sm_info = sm_info;
3446        sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3447        sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3448        sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
3449        sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3450        sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3451        sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
3452        sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3453        sm_info->rec_prefree_segments = sm_info->main_segments *
3454                                        DEF_RECLAIM_PREFREE_SEGMENTS / 100;
3455        if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
3456                sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
3457
3458        if (!test_opt(sbi, LFS))
3459                sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
3460        sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
3461        sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
3462        sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
3463
3464        sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
3465
3466        INIT_LIST_HEAD(&sm_info->sit_entry_set);
3467
3468        if (!f2fs_readonly(sbi->sb)) {
3469                err = create_flush_cmd_control(sbi);
3470                if (err)
3471                        return err;
3472        }
3473
3474        err = create_discard_cmd_control(sbi);
3475        if (err)
3476                return err;
3477
3478        err = build_sit_info(sbi);
3479        if (err)
3480                return err;
3481        err = build_free_segmap(sbi);
3482        if (err)
3483                return err;
3484        err = build_curseg(sbi);
3485        if (err)
3486                return err;
3487
3488        /* reinit free segmap based on SIT */
3489        build_sit_entries(sbi);
3490
3491        init_free_segmap(sbi);
3492        err = build_dirty_segmap(sbi);
3493        if (err)
3494                return err;
3495
3496        init_min_max_mtime(sbi);
3497        return 0;
3498}
3499
3500static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
3501                enum dirty_type dirty_type)
3502{
3503        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3504
3505        mutex_lock(&dirty_i->seglist_lock);
3506        kvfree(dirty_i->dirty_segmap[dirty_type]);
3507        dirty_i->nr_dirty[dirty_type] = 0;
3508        mutex_unlock(&dirty_i->seglist_lock);
3509}
3510
3511static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
3512{
3513        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3514        kvfree(dirty_i->victim_secmap);
3515}
3516
3517static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
3518{
3519        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3520        int i;
3521
3522        if (!dirty_i)
3523                return;
3524
3525        /* discard pre-free/dirty segments list */
3526        for (i = 0; i < NR_DIRTY_TYPE; i++)
3527                discard_dirty_segmap(sbi, i);
3528
3529        destroy_victim_secmap(sbi);
3530        SM_I(sbi)->dirty_info = NULL;
3531        kfree(dirty_i);
3532}
3533
3534static void destroy_curseg(struct f2fs_sb_info *sbi)
3535{
3536        struct curseg_info *array = SM_I(sbi)->curseg_array;
3537        int i;
3538
3539        if (!array)
3540                return;
3541        SM_I(sbi)->curseg_array = NULL;
3542        for (i = 0; i < NR_CURSEG_TYPE; i++) {
3543                kfree(array[i].sum_blk);
3544                kfree(array[i].journal);
3545        }
3546        kfree(array);
3547}
3548
3549static void destroy_free_segmap(struct f2fs_sb_info *sbi)
3550{
3551        struct free_segmap_info *free_i = SM_I(sbi)->free_info;
3552        if (!free_i)
3553                return;
3554        SM_I(sbi)->free_info = NULL;
3555        kvfree(free_i->free_segmap);
3556        kvfree(free_i->free_secmap);
3557        kfree(free_i);
3558}
3559
3560static void destroy_sit_info(struct f2fs_sb_info *sbi)
3561{
3562        struct sit_info *sit_i = SIT_I(sbi);
3563        unsigned int start;
3564
3565        if (!sit_i)
3566                return;
3567
3568        if (sit_i->sentries) {
3569                for (start = 0; start < MAIN_SEGS(sbi); start++) {
3570                        kfree(sit_i->sentries[start].cur_valid_map);
3571#ifdef CONFIG_F2FS_CHECK_FS
3572                        kfree(sit_i->sentries[start].cur_valid_map_mir);
3573#endif
3574                        kfree(sit_i->sentries[start].ckpt_valid_map);
3575                        kfree(sit_i->sentries[start].discard_map);
3576                }
3577        }
3578        kfree(sit_i->tmp_map);
3579
3580        kvfree(sit_i->sentries);
3581        kvfree(sit_i->sec_entries);
3582        kvfree(sit_i->dirty_sentries_bitmap);
3583
3584        SM_I(sbi)->sit_info = NULL;
3585        kfree(sit_i->sit_bitmap);
3586#ifdef CONFIG_F2FS_CHECK_FS
3587        kfree(sit_i->sit_bitmap_mir);
3588#endif
3589        kfree(sit_i);
3590}
3591
3592void destroy_segment_manager(struct f2fs_sb_info *sbi)
3593{
3594        struct f2fs_sm_info *sm_info = SM_I(sbi);
3595
3596        if (!sm_info)
3597                return;
3598        destroy_flush_cmd_control(sbi, true);
3599        destroy_discard_cmd_control(sbi);
3600        destroy_dirty_segmap(sbi);
3601        destroy_curseg(sbi);
3602        destroy_free_segmap(sbi);
3603        destroy_sit_info(sbi);
3604        sbi->sm_info = NULL;
3605        kfree(sm_info);
3606}
3607
3608int __init create_segment_manager_caches(void)
3609{
3610        discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
3611                        sizeof(struct discard_entry));
3612        if (!discard_entry_slab)
3613                goto fail;
3614
3615        discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
3616                        sizeof(struct discard_cmd));
3617        if (!discard_cmd_slab)
3618                goto destroy_discard_entry;
3619
3620        sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
3621                        sizeof(struct sit_entry_set));
3622        if (!sit_entry_set_slab)
3623                goto destroy_discard_cmd;
3624
3625        inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
3626                        sizeof(struct inmem_pages));
3627        if (!inmem_entry_slab)
3628                goto destroy_sit_entry_set;
3629        return 0;
3630
3631destroy_sit_entry_set:
3632        kmem_cache_destroy(sit_entry_set_slab);
3633destroy_discard_cmd:
3634        kmem_cache_destroy(discard_cmd_slab);
3635destroy_discard_entry:
3636        kmem_cache_destroy(discard_entry_slab);
3637fail:
3638        return -ENOMEM;
3639}
3640
3641void destroy_segment_manager_caches(void)
3642{
3643        kmem_cache_destroy(sit_entry_set_slab);
3644        kmem_cache_destroy(discard_cmd_slab);
3645        kmem_cache_destroy(discard_entry_slab);
3646        kmem_cache_destroy(inmem_entry_slab);
3647}
3648