linux/fs/f2fs/gc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/gc.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/f2fs_fs.h>
  12#include <linux/kthread.h>
  13#include <linux/delay.h>
  14#include <linux/freezer.h>
  15#include <linux/sched/signal.h>
  16#include <linux/random.h>
  17#include <linux/sched/mm.h>
  18
  19#include "f2fs.h"
  20#include "node.h"
  21#include "segment.h"
  22#include "gc.h"
  23#include "iostat.h"
  24#include <trace/events/f2fs.h>
  25
  26static struct kmem_cache *victim_entry_slab;
  27
  28static unsigned int count_bits(const unsigned long *addr,
  29                                unsigned int offset, unsigned int len);
  30
  31static int gc_thread_func(void *data)
  32{
  33        struct f2fs_sb_info *sbi = data;
  34        struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  35        wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
  36        wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
  37        unsigned int wait_ms;
  38        struct f2fs_gc_control gc_control = {
  39                .victim_segno = NULL_SEGNO,
  40                .should_migrate_blocks = false,
  41                .err_gc_skipped = false };
  42
  43        wait_ms = gc_th->min_sleep_time;
  44
  45        set_freezable();
  46        do {
  47                bool sync_mode, foreground = false;
  48
  49                wait_event_interruptible_timeout(*wq,
  50                                kthread_should_stop() || freezing(current) ||
  51                                waitqueue_active(fggc_wq) ||
  52                                gc_th->gc_wake,
  53                                msecs_to_jiffies(wait_ms));
  54
  55                if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
  56                        foreground = true;
  57
  58                /* give it a try one time */
  59                if (gc_th->gc_wake)
  60                        gc_th->gc_wake = 0;
  61
  62                if (try_to_freeze()) {
  63                        stat_other_skip_bggc_count(sbi);
  64                        continue;
  65                }
  66                if (kthread_should_stop())
  67                        break;
  68
  69                if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
  70                        increase_sleep_time(gc_th, &wait_ms);
  71                        stat_other_skip_bggc_count(sbi);
  72                        continue;
  73                }
  74
  75                if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
  76                        f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
  77                        f2fs_stop_checkpoint(sbi, false);
  78                }
  79
  80                if (!sb_start_write_trylock(sbi->sb)) {
  81                        stat_other_skip_bggc_count(sbi);
  82                        continue;
  83                }
  84
  85                /*
  86                 * [GC triggering condition]
  87                 * 0. GC is not conducted currently.
  88                 * 1. There are enough dirty segments.
  89                 * 2. IO subsystem is idle by checking the # of writeback pages.
  90                 * 3. IO subsystem is idle by checking the # of requests in
  91                 *    bdev's request list.
  92                 *
  93                 * Note) We have to avoid triggering GCs frequently.
  94                 * Because it is possible that some segments can be
  95                 * invalidated soon after by user update or deletion.
  96                 * So, I'd like to wait some time to collect dirty segments.
  97                 */
  98                if (sbi->gc_mode == GC_URGENT_HIGH) {
  99                        spin_lock(&sbi->gc_urgent_high_lock);
 100                        if (sbi->gc_urgent_high_limited) {
 101                                if (!sbi->gc_urgent_high_remaining) {
 102                                        sbi->gc_urgent_high_limited = false;
 103                                        spin_unlock(&sbi->gc_urgent_high_lock);
 104                                        sbi->gc_mode = GC_NORMAL;
 105                                        continue;
 106                                }
 107                                sbi->gc_urgent_high_remaining--;
 108                        }
 109                        spin_unlock(&sbi->gc_urgent_high_lock);
 110                }
 111
 112                if (sbi->gc_mode == GC_URGENT_HIGH ||
 113                                sbi->gc_mode == GC_URGENT_MID) {
 114                        wait_ms = gc_th->urgent_sleep_time;
 115                        f2fs_down_write(&sbi->gc_lock);
 116                        goto do_gc;
 117                }
 118
 119                if (foreground) {
 120                        f2fs_down_write(&sbi->gc_lock);
 121                        goto do_gc;
 122                } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
 123                        stat_other_skip_bggc_count(sbi);
 124                        goto next;
 125                }
 126
 127                if (!is_idle(sbi, GC_TIME)) {
 128                        increase_sleep_time(gc_th, &wait_ms);
 129                        f2fs_up_write(&sbi->gc_lock);
 130                        stat_io_skip_bggc_count(sbi);
 131                        goto next;
 132                }
 133
 134                if (has_enough_invalid_blocks(sbi))
 135                        decrease_sleep_time(gc_th, &wait_ms);
 136                else
 137                        increase_sleep_time(gc_th, &wait_ms);
 138do_gc:
 139                if (!foreground)
 140                        stat_inc_bggc_count(sbi->stat_info);
 141
 142                sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
 143
 144                /* foreground GC was been triggered via f2fs_balance_fs() */
 145                if (foreground)
 146                        sync_mode = false;
 147
 148                gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
 149                gc_control.no_bg_gc = foreground;
 150                gc_control.nr_free_secs = foreground ? 1 : 0;
 151
 152                /* if return value is not zero, no victim was selected */
 153                if (f2fs_gc(sbi, &gc_control))
 154                        wait_ms = gc_th->no_gc_sleep_time;
 155
 156                if (foreground)
 157                        wake_up_all(&gc_th->fggc_wq);
 158
 159                trace_f2fs_background_gc(sbi->sb, wait_ms,
 160                                prefree_segments(sbi), free_segments(sbi));
 161
 162                /* balancing f2fs's metadata periodically */
 163                f2fs_balance_fs_bg(sbi, true);
 164next:
 165                sb_end_write(sbi->sb);
 166
 167        } while (!kthread_should_stop());
 168        return 0;
 169}
 170
 171int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
 172{
 173        struct f2fs_gc_kthread *gc_th;
 174        dev_t dev = sbi->sb->s_bdev->bd_dev;
 175        int err = 0;
 176
 177        gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
 178        if (!gc_th) {
 179                err = -ENOMEM;
 180                goto out;
 181        }
 182
 183        gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
 184        gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
 185        gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
 186        gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
 187
 188        gc_th->gc_wake = 0;
 189
 190        sbi->gc_thread = gc_th;
 191        init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
 192        init_waitqueue_head(&sbi->gc_thread->fggc_wq);
 193        sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
 194                        "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
 195        if (IS_ERR(gc_th->f2fs_gc_task)) {
 196                err = PTR_ERR(gc_th->f2fs_gc_task);
 197                kfree(gc_th);
 198                sbi->gc_thread = NULL;
 199        }
 200out:
 201        return err;
 202}
 203
 204void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
 205{
 206        struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
 207
 208        if (!gc_th)
 209                return;
 210        kthread_stop(gc_th->f2fs_gc_task);
 211        wake_up_all(&gc_th->fggc_wq);
 212        kfree(gc_th);
 213        sbi->gc_thread = NULL;
 214}
 215
 216static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
 217{
 218        int gc_mode;
 219
 220        if (gc_type == BG_GC) {
 221                if (sbi->am.atgc_enabled)
 222                        gc_mode = GC_AT;
 223                else
 224                        gc_mode = GC_CB;
 225        } else {
 226                gc_mode = GC_GREEDY;
 227        }
 228
 229        switch (sbi->gc_mode) {
 230        case GC_IDLE_CB:
 231                gc_mode = GC_CB;
 232                break;
 233        case GC_IDLE_GREEDY:
 234        case GC_URGENT_HIGH:
 235                gc_mode = GC_GREEDY;
 236                break;
 237        case GC_IDLE_AT:
 238                gc_mode = GC_AT;
 239                break;
 240        }
 241
 242        return gc_mode;
 243}
 244
 245static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
 246                        int type, struct victim_sel_policy *p)
 247{
 248        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 249
 250        if (p->alloc_mode == SSR) {
 251                p->gc_mode = GC_GREEDY;
 252                p->dirty_bitmap = dirty_i->dirty_segmap[type];
 253                p->max_search = dirty_i->nr_dirty[type];
 254                p->ofs_unit = 1;
 255        } else if (p->alloc_mode == AT_SSR) {
 256                p->gc_mode = GC_GREEDY;
 257                p->dirty_bitmap = dirty_i->dirty_segmap[type];
 258                p->max_search = dirty_i->nr_dirty[type];
 259                p->ofs_unit = 1;
 260        } else {
 261                p->gc_mode = select_gc_type(sbi, gc_type);
 262                p->ofs_unit = sbi->segs_per_sec;
 263                if (__is_large_section(sbi)) {
 264                        p->dirty_bitmap = dirty_i->dirty_secmap;
 265                        p->max_search = count_bits(p->dirty_bitmap,
 266                                                0, MAIN_SECS(sbi));
 267                } else {
 268                        p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
 269                        p->max_search = dirty_i->nr_dirty[DIRTY];
 270                }
 271        }
 272
 273        /*
 274         * adjust candidates range, should select all dirty segments for
 275         * foreground GC and urgent GC cases.
 276         */
 277        if (gc_type != FG_GC &&
 278                        (sbi->gc_mode != GC_URGENT_HIGH) &&
 279                        (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
 280                        p->max_search > sbi->max_victim_search)
 281                p->max_search = sbi->max_victim_search;
 282
 283        /* let's select beginning hot/small space first in no_heap mode*/
 284        if (f2fs_need_rand_seg(sbi))
 285                p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
 286        else if (test_opt(sbi, NOHEAP) &&
 287                (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
 288                p->offset = 0;
 289        else
 290                p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
 291}
 292
 293static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
 294                                struct victim_sel_policy *p)
 295{
 296        /* SSR allocates in a segment unit */
 297        if (p->alloc_mode == SSR)
 298                return sbi->blocks_per_seg;
 299        else if (p->alloc_mode == AT_SSR)
 300                return UINT_MAX;
 301
 302        /* LFS */
 303        if (p->gc_mode == GC_GREEDY)
 304                return 2 * sbi->blocks_per_seg * p->ofs_unit;
 305        else if (p->gc_mode == GC_CB)
 306                return UINT_MAX;
 307        else if (p->gc_mode == GC_AT)
 308                return UINT_MAX;
 309        else /* No other gc_mode */
 310                return 0;
 311}
 312
 313static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
 314{
 315        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 316        unsigned int secno;
 317
 318        /*
 319         * If the gc_type is FG_GC, we can select victim segments
 320         * selected by background GC before.
 321         * Those segments guarantee they have small valid blocks.
 322         */
 323        for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
 324                if (sec_usage_check(sbi, secno))
 325                        continue;
 326                clear_bit(secno, dirty_i->victim_secmap);
 327                return GET_SEG_FROM_SEC(sbi, secno);
 328        }
 329        return NULL_SEGNO;
 330}
 331
 332static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
 333{
 334        struct sit_info *sit_i = SIT_I(sbi);
 335        unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 336        unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
 337        unsigned long long mtime = 0;
 338        unsigned int vblocks;
 339        unsigned char age = 0;
 340        unsigned char u;
 341        unsigned int i;
 342        unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
 343
 344        for (i = 0; i < usable_segs_per_sec; i++)
 345                mtime += get_seg_entry(sbi, start + i)->mtime;
 346        vblocks = get_valid_blocks(sbi, segno, true);
 347
 348        mtime = div_u64(mtime, usable_segs_per_sec);
 349        vblocks = div_u64(vblocks, usable_segs_per_sec);
 350
 351        u = (vblocks * 100) >> sbi->log_blocks_per_seg;
 352
 353        /* Handle if the system time has changed by the user */
 354        if (mtime < sit_i->min_mtime)
 355                sit_i->min_mtime = mtime;
 356        if (mtime > sit_i->max_mtime)
 357                sit_i->max_mtime = mtime;
 358        if (sit_i->max_mtime != sit_i->min_mtime)
 359                age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
 360                                sit_i->max_mtime - sit_i->min_mtime);
 361
 362        return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 363}
 364
 365static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
 366                        unsigned int segno, struct victim_sel_policy *p)
 367{
 368        if (p->alloc_mode == SSR)
 369                return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
 370
 371        /* alloc_mode == LFS */
 372        if (p->gc_mode == GC_GREEDY)
 373                return get_valid_blocks(sbi, segno, true);
 374        else if (p->gc_mode == GC_CB)
 375                return get_cb_cost(sbi, segno);
 376
 377        f2fs_bug_on(sbi, 1);
 378        return 0;
 379}
 380
 381static unsigned int count_bits(const unsigned long *addr,
 382                                unsigned int offset, unsigned int len)
 383{
 384        unsigned int end = offset + len, sum = 0;
 385
 386        while (offset < end) {
 387                if (test_bit(offset++, addr))
 388                        ++sum;
 389        }
 390        return sum;
 391}
 392
 393static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
 394                                unsigned long long mtime, unsigned int segno,
 395                                struct rb_node *parent, struct rb_node **p,
 396                                bool left_most)
 397{
 398        struct atgc_management *am = &sbi->am;
 399        struct victim_entry *ve;
 400
 401        ve =  f2fs_kmem_cache_alloc(victim_entry_slab,
 402                                GFP_NOFS, true, NULL);
 403
 404        ve->mtime = mtime;
 405        ve->segno = segno;
 406
 407        rb_link_node(&ve->rb_node, parent, p);
 408        rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
 409
 410        list_add_tail(&ve->list, &am->victim_list);
 411
 412        am->victim_count++;
 413
 414        return ve;
 415}
 416
 417static void insert_victim_entry(struct f2fs_sb_info *sbi,
 418                                unsigned long long mtime, unsigned int segno)
 419{
 420        struct atgc_management *am = &sbi->am;
 421        struct rb_node **p;
 422        struct rb_node *parent = NULL;
 423        bool left_most = true;
 424
 425        p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
 426        attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
 427}
 428
 429static void add_victim_entry(struct f2fs_sb_info *sbi,
 430                                struct victim_sel_policy *p, unsigned int segno)
 431{
 432        struct sit_info *sit_i = SIT_I(sbi);
 433        unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 434        unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
 435        unsigned long long mtime = 0;
 436        unsigned int i;
 437
 438        if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 439                if (p->gc_mode == GC_AT &&
 440                        get_valid_blocks(sbi, segno, true) == 0)
 441                        return;
 442        }
 443
 444        for (i = 0; i < sbi->segs_per_sec; i++)
 445                mtime += get_seg_entry(sbi, start + i)->mtime;
 446        mtime = div_u64(mtime, sbi->segs_per_sec);
 447
 448        /* Handle if the system time has changed by the user */
 449        if (mtime < sit_i->min_mtime)
 450                sit_i->min_mtime = mtime;
 451        if (mtime > sit_i->max_mtime)
 452                sit_i->max_mtime = mtime;
 453        if (mtime < sit_i->dirty_min_mtime)
 454                sit_i->dirty_min_mtime = mtime;
 455        if (mtime > sit_i->dirty_max_mtime)
 456                sit_i->dirty_max_mtime = mtime;
 457
 458        /* don't choose young section as candidate */
 459        if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
 460                return;
 461
 462        insert_victim_entry(sbi, mtime, segno);
 463}
 464
 465static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
 466                                                struct victim_sel_policy *p)
 467{
 468        struct atgc_management *am = &sbi->am;
 469        struct rb_node *parent = NULL;
 470        bool left_most;
 471
 472        f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
 473
 474        return parent;
 475}
 476
 477static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
 478                                                struct victim_sel_policy *p)
 479{
 480        struct sit_info *sit_i = SIT_I(sbi);
 481        struct atgc_management *am = &sbi->am;
 482        struct rb_root_cached *root = &am->root;
 483        struct rb_node *node;
 484        struct rb_entry *re;
 485        struct victim_entry *ve;
 486        unsigned long long total_time;
 487        unsigned long long age, u, accu;
 488        unsigned long long max_mtime = sit_i->dirty_max_mtime;
 489        unsigned long long min_mtime = sit_i->dirty_min_mtime;
 490        unsigned int sec_blocks = BLKS_PER_SEC(sbi);
 491        unsigned int vblocks;
 492        unsigned int dirty_threshold = max(am->max_candidate_count,
 493                                        am->candidate_ratio *
 494                                        am->victim_count / 100);
 495        unsigned int age_weight = am->age_weight;
 496        unsigned int cost;
 497        unsigned int iter = 0;
 498
 499        if (max_mtime < min_mtime)
 500                return;
 501
 502        max_mtime += 1;
 503        total_time = max_mtime - min_mtime;
 504
 505        accu = div64_u64(ULLONG_MAX, total_time);
 506        accu = min_t(unsigned long long, div_u64(accu, 100),
 507                                        DEFAULT_ACCURACY_CLASS);
 508
 509        node = rb_first_cached(root);
 510next:
 511        re = rb_entry_safe(node, struct rb_entry, rb_node);
 512        if (!re)
 513                return;
 514
 515        ve = (struct victim_entry *)re;
 516
 517        if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
 518                goto skip;
 519
 520        /* age = 10000 * x% * 60 */
 521        age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
 522                                                                age_weight;
 523
 524        vblocks = get_valid_blocks(sbi, ve->segno, true);
 525        f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
 526
 527        /* u = 10000 * x% * 40 */
 528        u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
 529                                                        (100 - age_weight);
 530
 531        f2fs_bug_on(sbi, age + u >= UINT_MAX);
 532
 533        cost = UINT_MAX - (age + u);
 534        iter++;
 535
 536        if (cost < p->min_cost ||
 537                        (cost == p->min_cost && age > p->oldest_age)) {
 538                p->min_cost = cost;
 539                p->oldest_age = age;
 540                p->min_segno = ve->segno;
 541        }
 542skip:
 543        if (iter < dirty_threshold) {
 544                node = rb_next(node);
 545                goto next;
 546        }
 547}
 548
 549/*
 550 * select candidates around source section in range of
 551 * [target - dirty_threshold, target + dirty_threshold]
 552 */
 553static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
 554                                                struct victim_sel_policy *p)
 555{
 556        struct sit_info *sit_i = SIT_I(sbi);
 557        struct atgc_management *am = &sbi->am;
 558        struct rb_node *node;
 559        struct rb_entry *re;
 560        struct victim_entry *ve;
 561        unsigned long long age;
 562        unsigned long long max_mtime = sit_i->dirty_max_mtime;
 563        unsigned long long min_mtime = sit_i->dirty_min_mtime;
 564        unsigned int seg_blocks = sbi->blocks_per_seg;
 565        unsigned int vblocks;
 566        unsigned int dirty_threshold = max(am->max_candidate_count,
 567                                        am->candidate_ratio *
 568                                        am->victim_count / 100);
 569        unsigned int cost;
 570        unsigned int iter = 0;
 571        int stage = 0;
 572
 573        if (max_mtime < min_mtime)
 574                return;
 575        max_mtime += 1;
 576next_stage:
 577        node = lookup_central_victim(sbi, p);
 578next_node:
 579        re = rb_entry_safe(node, struct rb_entry, rb_node);
 580        if (!re) {
 581                if (stage == 0)
 582                        goto skip_stage;
 583                return;
 584        }
 585
 586        ve = (struct victim_entry *)re;
 587
 588        if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
 589                goto skip_node;
 590
 591        age = max_mtime - ve->mtime;
 592
 593        vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
 594        f2fs_bug_on(sbi, !vblocks);
 595
 596        /* rare case */
 597        if (vblocks == seg_blocks)
 598                goto skip_node;
 599
 600        iter++;
 601
 602        age = max_mtime - abs(p->age - age);
 603        cost = UINT_MAX - vblocks;
 604
 605        if (cost < p->min_cost ||
 606                        (cost == p->min_cost && age > p->oldest_age)) {
 607                p->min_cost = cost;
 608                p->oldest_age = age;
 609                p->min_segno = ve->segno;
 610        }
 611skip_node:
 612        if (iter < dirty_threshold) {
 613                if (stage == 0)
 614                        node = rb_prev(node);
 615                else if (stage == 1)
 616                        node = rb_next(node);
 617                goto next_node;
 618        }
 619skip_stage:
 620        if (stage < 1) {
 621                stage++;
 622                iter = 0;
 623                goto next_stage;
 624        }
 625}
 626static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
 627                                                struct victim_sel_policy *p)
 628{
 629        f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
 630                                                &sbi->am.root, true));
 631
 632        if (p->gc_mode == GC_AT)
 633                atgc_lookup_victim(sbi, p);
 634        else if (p->alloc_mode == AT_SSR)
 635                atssr_lookup_victim(sbi, p);
 636        else
 637                f2fs_bug_on(sbi, 1);
 638}
 639
 640static void release_victim_entry(struct f2fs_sb_info *sbi)
 641{
 642        struct atgc_management *am = &sbi->am;
 643        struct victim_entry *ve, *tmp;
 644
 645        list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
 646                list_del(&ve->list);
 647                kmem_cache_free(victim_entry_slab, ve);
 648                am->victim_count--;
 649        }
 650
 651        am->root = RB_ROOT_CACHED;
 652
 653        f2fs_bug_on(sbi, am->victim_count);
 654        f2fs_bug_on(sbi, !list_empty(&am->victim_list));
 655}
 656
 657static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
 658{
 659        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 660        unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 661
 662        if (!dirty_i->enable_pin_section)
 663                return false;
 664        if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
 665                dirty_i->pinned_secmap_cnt++;
 666        return true;
 667}
 668
 669static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
 670{
 671        return dirty_i->pinned_secmap_cnt;
 672}
 673
 674static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
 675                                                unsigned int secno)
 676{
 677        return dirty_i->enable_pin_section &&
 678                f2fs_pinned_section_exists(dirty_i) &&
 679                test_bit(secno, dirty_i->pinned_secmap);
 680}
 681
 682static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
 683{
 684        unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
 685
 686        if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
 687                memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
 688                DIRTY_I(sbi)->pinned_secmap_cnt = 0;
 689        }
 690        DIRTY_I(sbi)->enable_pin_section = enable;
 691}
 692
 693static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
 694                                                        unsigned int segno)
 695{
 696        if (!f2fs_is_pinned_file(inode))
 697                return 0;
 698        if (gc_type != FG_GC)
 699                return -EBUSY;
 700        if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
 701                f2fs_pin_file_control(inode, true);
 702        return -EAGAIN;
 703}
 704
 705/*
 706 * This function is called from two paths.
 707 * One is garbage collection and the other is SSR segment selection.
 708 * When it is called during GC, it just gets a victim segment
 709 * and it does not remove it from dirty seglist.
 710 * When it is called from SSR segment selection, it finds a segment
 711 * which has minimum valid blocks and removes it from dirty seglist.
 712 */
 713static int get_victim_by_default(struct f2fs_sb_info *sbi,
 714                        unsigned int *result, int gc_type, int type,
 715                        char alloc_mode, unsigned long long age)
 716{
 717        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 718        struct sit_info *sm = SIT_I(sbi);
 719        struct victim_sel_policy p;
 720        unsigned int secno, last_victim;
 721        unsigned int last_segment;
 722        unsigned int nsearched;
 723        bool is_atgc;
 724        int ret = 0;
 725
 726        mutex_lock(&dirty_i->seglist_lock);
 727        last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
 728
 729        p.alloc_mode = alloc_mode;
 730        p.age = age;
 731        p.age_threshold = sbi->am.age_threshold;
 732
 733retry:
 734        select_policy(sbi, gc_type, type, &p);
 735        p.min_segno = NULL_SEGNO;
 736        p.oldest_age = 0;
 737        p.min_cost = get_max_cost(sbi, &p);
 738
 739        is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
 740        nsearched = 0;
 741
 742        if (is_atgc)
 743                SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
 744
 745        if (*result != NULL_SEGNO) {
 746                if (!get_valid_blocks(sbi, *result, false)) {
 747                        ret = -ENODATA;
 748                        goto out;
 749                }
 750
 751                if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
 752                        ret = -EBUSY;
 753                else
 754                        p.min_segno = *result;
 755                goto out;
 756        }
 757
 758        ret = -ENODATA;
 759        if (p.max_search == 0)
 760                goto out;
 761
 762        if (__is_large_section(sbi) && p.alloc_mode == LFS) {
 763                if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
 764                        p.min_segno = sbi->next_victim_seg[BG_GC];
 765                        *result = p.min_segno;
 766                        sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
 767                        goto got_result;
 768                }
 769                if (gc_type == FG_GC &&
 770                                sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
 771                        p.min_segno = sbi->next_victim_seg[FG_GC];
 772                        *result = p.min_segno;
 773                        sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
 774                        goto got_result;
 775                }
 776        }
 777
 778        last_victim = sm->last_victim[p.gc_mode];
 779        if (p.alloc_mode == LFS && gc_type == FG_GC) {
 780                p.min_segno = check_bg_victims(sbi);
 781                if (p.min_segno != NULL_SEGNO)
 782                        goto got_it;
 783        }
 784
 785        while (1) {
 786                unsigned long cost, *dirty_bitmap;
 787                unsigned int unit_no, segno;
 788
 789                dirty_bitmap = p.dirty_bitmap;
 790                unit_no = find_next_bit(dirty_bitmap,
 791                                last_segment / p.ofs_unit,
 792                                p.offset / p.ofs_unit);
 793                segno = unit_no * p.ofs_unit;
 794                if (segno >= last_segment) {
 795                        if (sm->last_victim[p.gc_mode]) {
 796                                last_segment =
 797                                        sm->last_victim[p.gc_mode];
 798                                sm->last_victim[p.gc_mode] = 0;
 799                                p.offset = 0;
 800                                continue;
 801                        }
 802                        break;
 803                }
 804
 805                p.offset = segno + p.ofs_unit;
 806                nsearched++;
 807
 808#ifdef CONFIG_F2FS_CHECK_FS
 809                /*
 810                 * skip selecting the invalid segno (that is failed due to block
 811                 * validity check failure during GC) to avoid endless GC loop in
 812                 * such cases.
 813                 */
 814                if (test_bit(segno, sm->invalid_segmap))
 815                        goto next;
 816#endif
 817
 818                secno = GET_SEC_FROM_SEG(sbi, segno);
 819
 820                if (sec_usage_check(sbi, secno))
 821                        goto next;
 822
 823                /* Don't touch checkpointed data */
 824                if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 825                        if (p.alloc_mode == LFS) {
 826                                /*
 827                                 * LFS is set to find source section during GC.
 828                                 * The victim should have no checkpointed data.
 829                                 */
 830                                if (get_ckpt_valid_blocks(sbi, segno, true))
 831                                        goto next;
 832                        } else {
 833                                /*
 834                                 * SSR | AT_SSR are set to find target segment
 835                                 * for writes which can be full by checkpointed
 836                                 * and newly written blocks.
 837                                 */
 838                                if (!f2fs_segment_has_free_slot(sbi, segno))
 839                                        goto next;
 840                        }
 841                }
 842
 843                if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
 844                        goto next;
 845
 846                if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
 847                        goto next;
 848
 849                if (is_atgc) {
 850                        add_victim_entry(sbi, &p, segno);
 851                        goto next;
 852                }
 853
 854                cost = get_gc_cost(sbi, segno, &p);
 855
 856                if (p.min_cost > cost) {
 857                        p.min_segno = segno;
 858                        p.min_cost = cost;
 859                }
 860next:
 861                if (nsearched >= p.max_search) {
 862                        if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
 863                                sm->last_victim[p.gc_mode] =
 864                                        last_victim + p.ofs_unit;
 865                        else
 866                                sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
 867                        sm->last_victim[p.gc_mode] %=
 868                                (MAIN_SECS(sbi) * sbi->segs_per_sec);
 869                        break;
 870                }
 871        }
 872
 873        /* get victim for GC_AT/AT_SSR */
 874        if (is_atgc) {
 875                lookup_victim_by_age(sbi, &p);
 876                release_victim_entry(sbi);
 877        }
 878
 879        if (is_atgc && p.min_segno == NULL_SEGNO &&
 880                        sm->elapsed_time < p.age_threshold) {
 881                p.age_threshold = 0;
 882                goto retry;
 883        }
 884
 885        if (p.min_segno != NULL_SEGNO) {
 886got_it:
 887                *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
 888got_result:
 889                if (p.alloc_mode == LFS) {
 890                        secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
 891                        if (gc_type == FG_GC)
 892                                sbi->cur_victim_sec = secno;
 893                        else
 894                                set_bit(secno, dirty_i->victim_secmap);
 895                }
 896                ret = 0;
 897
 898        }
 899out:
 900        if (p.min_segno != NULL_SEGNO)
 901                trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
 902                                sbi->cur_victim_sec,
 903                                prefree_segments(sbi), free_segments(sbi));
 904        mutex_unlock(&dirty_i->seglist_lock);
 905
 906        return ret;
 907}
 908
 909static const struct victim_selection default_v_ops = {
 910        .get_victim = get_victim_by_default,
 911};
 912
 913static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
 914{
 915        struct inode_entry *ie;
 916
 917        ie = radix_tree_lookup(&gc_list->iroot, ino);
 918        if (ie)
 919                return ie->inode;
 920        return NULL;
 921}
 922
 923static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
 924{
 925        struct inode_entry *new_ie;
 926
 927        if (inode == find_gc_inode(gc_list, inode->i_ino)) {
 928                iput(inode);
 929                return;
 930        }
 931        new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
 932                                        GFP_NOFS, true, NULL);
 933        new_ie->inode = inode;
 934
 935        f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
 936        list_add_tail(&new_ie->list, &gc_list->ilist);
 937}
 938
 939static void put_gc_inode(struct gc_inode_list *gc_list)
 940{
 941        struct inode_entry *ie, *next_ie;
 942
 943        list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
 944                radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
 945                iput(ie->inode);
 946                list_del(&ie->list);
 947                kmem_cache_free(f2fs_inode_entry_slab, ie);
 948        }
 949}
 950
 951static int check_valid_map(struct f2fs_sb_info *sbi,
 952                                unsigned int segno, int offset)
 953{
 954        struct sit_info *sit_i = SIT_I(sbi);
 955        struct seg_entry *sentry;
 956        int ret;
 957
 958        down_read(&sit_i->sentry_lock);
 959        sentry = get_seg_entry(sbi, segno);
 960        ret = f2fs_test_bit(offset, sentry->cur_valid_map);
 961        up_read(&sit_i->sentry_lock);
 962        return ret;
 963}
 964
 965/*
 966 * This function compares node address got in summary with that in NAT.
 967 * On validity, copy that node with cold status, otherwise (invalid node)
 968 * ignore that.
 969 */
 970static int gc_node_segment(struct f2fs_sb_info *sbi,
 971                struct f2fs_summary *sum, unsigned int segno, int gc_type)
 972{
 973        struct f2fs_summary *entry;
 974        block_t start_addr;
 975        int off;
 976        int phase = 0;
 977        bool fggc = (gc_type == FG_GC);
 978        int submitted = 0;
 979        unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
 980
 981        start_addr = START_BLOCK(sbi, segno);
 982
 983next_step:
 984        entry = sum;
 985
 986        if (fggc && phase == 2)
 987                atomic_inc(&sbi->wb_sync_req[NODE]);
 988
 989        for (off = 0; off < usable_blks_in_seg; off++, entry++) {
 990                nid_t nid = le32_to_cpu(entry->nid);
 991                struct page *node_page;
 992                struct node_info ni;
 993                int err;
 994
 995                /* stop BG_GC if there is not enough free sections. */
 996                if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
 997                        return submitted;
 998
 999                if (check_valid_map(sbi, segno, off) == 0)
1000                        continue;
1001
1002                if (phase == 0) {
1003                        f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1004                                                        META_NAT, true);
1005                        continue;
1006                }
1007
1008                if (phase == 1) {
1009                        f2fs_ra_node_page(sbi, nid);
1010                        continue;
1011                }
1012
1013                /* phase == 2 */
1014                node_page = f2fs_get_node_page(sbi, nid);
1015                if (IS_ERR(node_page))
1016                        continue;
1017
1018                /* block may become invalid during f2fs_get_node_page */
1019                if (check_valid_map(sbi, segno, off) == 0) {
1020                        f2fs_put_page(node_page, 1);
1021                        continue;
1022                }
1023
1024                if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1025                        f2fs_put_page(node_page, 1);
1026                        continue;
1027                }
1028
1029                if (ni.blk_addr != start_addr + off) {
1030                        f2fs_put_page(node_page, 1);
1031                        continue;
1032                }
1033
1034                err = f2fs_move_node_page(node_page, gc_type);
1035                if (!err && gc_type == FG_GC)
1036                        submitted++;
1037                stat_inc_node_blk_count(sbi, 1, gc_type);
1038        }
1039
1040        if (++phase < 3)
1041                goto next_step;
1042
1043        if (fggc)
1044                atomic_dec(&sbi->wb_sync_req[NODE]);
1045        return submitted;
1046}
1047
1048/*
1049 * Calculate start block index indicating the given node offset.
1050 * Be careful, caller should give this node offset only indicating direct node
1051 * blocks. If any node offsets, which point the other types of node blocks such
1052 * as indirect or double indirect node blocks, are given, it must be a caller's
1053 * bug.
1054 */
1055block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1056{
1057        unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1058        unsigned int bidx;
1059
1060        if (node_ofs == 0)
1061                return 0;
1062
1063        if (node_ofs <= 2) {
1064                bidx = node_ofs - 1;
1065        } else if (node_ofs <= indirect_blks) {
1066                int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1067
1068                bidx = node_ofs - 2 - dec;
1069        } else {
1070                int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1071
1072                bidx = node_ofs - 5 - dec;
1073        }
1074        return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1075}
1076
1077static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1078                struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1079{
1080        struct page *node_page;
1081        nid_t nid;
1082        unsigned int ofs_in_node;
1083        block_t source_blkaddr;
1084
1085        nid = le32_to_cpu(sum->nid);
1086        ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1087
1088        node_page = f2fs_get_node_page(sbi, nid);
1089        if (IS_ERR(node_page))
1090                return false;
1091
1092        if (f2fs_get_node_info(sbi, nid, dni, false)) {
1093                f2fs_put_page(node_page, 1);
1094                return false;
1095        }
1096
1097        if (sum->version != dni->version) {
1098                f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1099                          __func__);
1100                set_sbi_flag(sbi, SBI_NEED_FSCK);
1101        }
1102
1103        if (f2fs_check_nid_range(sbi, dni->ino)) {
1104                f2fs_put_page(node_page, 1);
1105                return false;
1106        }
1107
1108        *nofs = ofs_of_node(node_page);
1109        source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1110        f2fs_put_page(node_page, 1);
1111
1112        if (source_blkaddr != blkaddr) {
1113#ifdef CONFIG_F2FS_CHECK_FS
1114                unsigned int segno = GET_SEGNO(sbi, blkaddr);
1115                unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1116
1117                if (unlikely(check_valid_map(sbi, segno, offset))) {
1118                        if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1119                                f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1120                                         blkaddr, source_blkaddr, segno);
1121                                set_sbi_flag(sbi, SBI_NEED_FSCK);
1122                        }
1123                }
1124#endif
1125                return false;
1126        }
1127        return true;
1128}
1129
1130static int ra_data_block(struct inode *inode, pgoff_t index)
1131{
1132        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1133        struct address_space *mapping = inode->i_mapping;
1134        struct dnode_of_data dn;
1135        struct page *page;
1136        struct extent_info ei = {0, 0, 0};
1137        struct f2fs_io_info fio = {
1138                .sbi = sbi,
1139                .ino = inode->i_ino,
1140                .type = DATA,
1141                .temp = COLD,
1142                .op = REQ_OP_READ,
1143                .op_flags = 0,
1144                .encrypted_page = NULL,
1145                .in_list = false,
1146                .retry = false,
1147        };
1148        int err;
1149
1150        page = f2fs_grab_cache_page(mapping, index, true);
1151        if (!page)
1152                return -ENOMEM;
1153
1154        if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1155                dn.data_blkaddr = ei.blk + index - ei.fofs;
1156                if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1157                                                DATA_GENERIC_ENHANCE_READ))) {
1158                        err = -EFSCORRUPTED;
1159                        goto put_page;
1160                }
1161                goto got_it;
1162        }
1163
1164        set_new_dnode(&dn, inode, NULL, NULL, 0);
1165        err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1166        if (err)
1167                goto put_page;
1168        f2fs_put_dnode(&dn);
1169
1170        if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1171                err = -ENOENT;
1172                goto put_page;
1173        }
1174        if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1175                                                DATA_GENERIC_ENHANCE))) {
1176                err = -EFSCORRUPTED;
1177                goto put_page;
1178        }
1179got_it:
1180        /* read page */
1181        fio.page = page;
1182        fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1183
1184        /*
1185         * don't cache encrypted data into meta inode until previous dirty
1186         * data were writebacked to avoid racing between GC and flush.
1187         */
1188        f2fs_wait_on_page_writeback(page, DATA, true, true);
1189
1190        f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1191
1192        fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1193                                        dn.data_blkaddr,
1194                                        FGP_LOCK | FGP_CREAT, GFP_NOFS);
1195        if (!fio.encrypted_page) {
1196                err = -ENOMEM;
1197                goto put_page;
1198        }
1199
1200        err = f2fs_submit_page_bio(&fio);
1201        if (err)
1202                goto put_encrypted_page;
1203        f2fs_put_page(fio.encrypted_page, 0);
1204        f2fs_put_page(page, 1);
1205
1206        f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1207        f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1208
1209        return 0;
1210put_encrypted_page:
1211        f2fs_put_page(fio.encrypted_page, 1);
1212put_page:
1213        f2fs_put_page(page, 1);
1214        return err;
1215}
1216
1217/*
1218 * Move data block via META_MAPPING while keeping locked data page.
1219 * This can be used to move blocks, aka LBAs, directly on disk.
1220 */
1221static int move_data_block(struct inode *inode, block_t bidx,
1222                                int gc_type, unsigned int segno, int off)
1223{
1224        struct f2fs_io_info fio = {
1225                .sbi = F2FS_I_SB(inode),
1226                .ino = inode->i_ino,
1227                .type = DATA,
1228                .temp = COLD,
1229                .op = REQ_OP_READ,
1230                .op_flags = 0,
1231                .encrypted_page = NULL,
1232                .in_list = false,
1233                .retry = false,
1234        };
1235        struct dnode_of_data dn;
1236        struct f2fs_summary sum;
1237        struct node_info ni;
1238        struct page *page, *mpage;
1239        block_t newaddr;
1240        int err = 0;
1241        bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1242        int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1243                                (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1244                                CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1245
1246        /* do not read out */
1247        page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1248        if (!page)
1249                return -ENOMEM;
1250
1251        if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1252                err = -ENOENT;
1253                goto out;
1254        }
1255
1256        err = f2fs_gc_pinned_control(inode, gc_type, segno);
1257        if (err)
1258                goto out;
1259
1260        set_new_dnode(&dn, inode, NULL, NULL, 0);
1261        err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1262        if (err)
1263                goto out;
1264
1265        if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1266                ClearPageUptodate(page);
1267                err = -ENOENT;
1268                goto put_out;
1269        }
1270
1271        /*
1272         * don't cache encrypted data into meta inode until previous dirty
1273         * data were writebacked to avoid racing between GC and flush.
1274         */
1275        f2fs_wait_on_page_writeback(page, DATA, true, true);
1276
1277        f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1278
1279        err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1280        if (err)
1281                goto put_out;
1282
1283        /* read page */
1284        fio.page = page;
1285        fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1286
1287        if (lfs_mode)
1288                f2fs_down_write(&fio.sbi->io_order_lock);
1289
1290        mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1291                                        fio.old_blkaddr, false);
1292        if (!mpage) {
1293                err = -ENOMEM;
1294                goto up_out;
1295        }
1296
1297        fio.encrypted_page = mpage;
1298
1299        /* read source block in mpage */
1300        if (!PageUptodate(mpage)) {
1301                err = f2fs_submit_page_bio(&fio);
1302                if (err) {
1303                        f2fs_put_page(mpage, 1);
1304                        goto up_out;
1305                }
1306
1307                f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1308                f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1309
1310                lock_page(mpage);
1311                if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1312                                                !PageUptodate(mpage))) {
1313                        err = -EIO;
1314                        f2fs_put_page(mpage, 1);
1315                        goto up_out;
1316                }
1317        }
1318
1319        set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1320
1321        /* allocate block address */
1322        f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1323                                &sum, type, NULL);
1324
1325        fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1326                                newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1327        if (!fio.encrypted_page) {
1328                err = -ENOMEM;
1329                f2fs_put_page(mpage, 1);
1330                goto recover_block;
1331        }
1332
1333        /* write target block */
1334        f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1335        memcpy(page_address(fio.encrypted_page),
1336                                page_address(mpage), PAGE_SIZE);
1337        f2fs_put_page(mpage, 1);
1338        invalidate_mapping_pages(META_MAPPING(fio.sbi),
1339                                fio.old_blkaddr, fio.old_blkaddr);
1340        f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
1341
1342        set_page_dirty(fio.encrypted_page);
1343        if (clear_page_dirty_for_io(fio.encrypted_page))
1344                dec_page_count(fio.sbi, F2FS_DIRTY_META);
1345
1346        set_page_writeback(fio.encrypted_page);
1347        ClearPageError(page);
1348
1349        fio.op = REQ_OP_WRITE;
1350        fio.op_flags = REQ_SYNC;
1351        fio.new_blkaddr = newaddr;
1352        f2fs_submit_page_write(&fio);
1353        if (fio.retry) {
1354                err = -EAGAIN;
1355                if (PageWriteback(fio.encrypted_page))
1356                        end_page_writeback(fio.encrypted_page);
1357                goto put_page_out;
1358        }
1359
1360        f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1361
1362        f2fs_update_data_blkaddr(&dn, newaddr);
1363        set_inode_flag(inode, FI_APPEND_WRITE);
1364        if (page->index == 0)
1365                set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1366put_page_out:
1367        f2fs_put_page(fio.encrypted_page, 1);
1368recover_block:
1369        if (err)
1370                f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1371                                                        true, true, true);
1372up_out:
1373        if (lfs_mode)
1374                f2fs_up_write(&fio.sbi->io_order_lock);
1375put_out:
1376        f2fs_put_dnode(&dn);
1377out:
1378        f2fs_put_page(page, 1);
1379        return err;
1380}
1381
1382static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1383                                                        unsigned int segno, int off)
1384{
1385        struct page *page;
1386        int err = 0;
1387
1388        page = f2fs_get_lock_data_page(inode, bidx, true);
1389        if (IS_ERR(page))
1390                return PTR_ERR(page);
1391
1392        if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1393                err = -ENOENT;
1394                goto out;
1395        }
1396
1397        err = f2fs_gc_pinned_control(inode, gc_type, segno);
1398        if (err)
1399                goto out;
1400
1401        if (gc_type == BG_GC) {
1402                if (PageWriteback(page)) {
1403                        err = -EAGAIN;
1404                        goto out;
1405                }
1406                set_page_dirty(page);
1407                set_page_private_gcing(page);
1408        } else {
1409                struct f2fs_io_info fio = {
1410                        .sbi = F2FS_I_SB(inode),
1411                        .ino = inode->i_ino,
1412                        .type = DATA,
1413                        .temp = COLD,
1414                        .op = REQ_OP_WRITE,
1415                        .op_flags = REQ_SYNC,
1416                        .old_blkaddr = NULL_ADDR,
1417                        .page = page,
1418                        .encrypted_page = NULL,
1419                        .need_lock = LOCK_REQ,
1420                        .io_type = FS_GC_DATA_IO,
1421                };
1422                bool is_dirty = PageDirty(page);
1423
1424retry:
1425                f2fs_wait_on_page_writeback(page, DATA, true, true);
1426
1427                set_page_dirty(page);
1428                if (clear_page_dirty_for_io(page)) {
1429                        inode_dec_dirty_pages(inode);
1430                        f2fs_remove_dirty_inode(inode);
1431                }
1432
1433                set_page_private_gcing(page);
1434
1435                err = f2fs_do_write_data_page(&fio);
1436                if (err) {
1437                        clear_page_private_gcing(page);
1438                        if (err == -ENOMEM) {
1439                                memalloc_retry_wait(GFP_NOFS);
1440                                goto retry;
1441                        }
1442                        if (is_dirty)
1443                                set_page_dirty(page);
1444                }
1445        }
1446out:
1447        f2fs_put_page(page, 1);
1448        return err;
1449}
1450
1451/*
1452 * This function tries to get parent node of victim data block, and identifies
1453 * data block validity. If the block is valid, copy that with cold status and
1454 * modify parent node.
1455 * If the parent node is not valid or the data block address is different,
1456 * the victim data block is ignored.
1457 */
1458static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1459                struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1460                bool force_migrate)
1461{
1462        struct super_block *sb = sbi->sb;
1463        struct f2fs_summary *entry;
1464        block_t start_addr;
1465        int off;
1466        int phase = 0;
1467        int submitted = 0;
1468        unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1469
1470        start_addr = START_BLOCK(sbi, segno);
1471
1472next_step:
1473        entry = sum;
1474
1475        for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1476                struct page *data_page;
1477                struct inode *inode;
1478                struct node_info dni; /* dnode info for the data */
1479                unsigned int ofs_in_node, nofs;
1480                block_t start_bidx;
1481                nid_t nid = le32_to_cpu(entry->nid);
1482
1483                /*
1484                 * stop BG_GC if there is not enough free sections.
1485                 * Or, stop GC if the segment becomes fully valid caused by
1486                 * race condition along with SSR block allocation.
1487                 */
1488                if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1489                        (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1490                                                        BLKS_PER_SEC(sbi)))
1491                        return submitted;
1492
1493                if (check_valid_map(sbi, segno, off) == 0)
1494                        continue;
1495
1496                if (phase == 0) {
1497                        f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1498                                                        META_NAT, true);
1499                        continue;
1500                }
1501
1502                if (phase == 1) {
1503                        f2fs_ra_node_page(sbi, nid);
1504                        continue;
1505                }
1506
1507                /* Get an inode by ino with checking validity */
1508                if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1509                        continue;
1510
1511                if (phase == 2) {
1512                        f2fs_ra_node_page(sbi, dni.ino);
1513                        continue;
1514                }
1515
1516                ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1517
1518                if (phase == 3) {
1519                        int err;
1520
1521                        inode = f2fs_iget(sb, dni.ino);
1522                        if (IS_ERR(inode) || is_bad_inode(inode) ||
1523                                        special_file(inode->i_mode))
1524                                continue;
1525
1526                        err = f2fs_gc_pinned_control(inode, gc_type, segno);
1527                        if (err == -EAGAIN) {
1528                                iput(inode);
1529                                return submitted;
1530                        }
1531
1532                        if (!f2fs_down_write_trylock(
1533                                &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1534                                iput(inode);
1535                                sbi->skipped_gc_rwsem++;
1536                                continue;
1537                        }
1538
1539                        start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1540                                                                ofs_in_node;
1541
1542                        if (f2fs_post_read_required(inode)) {
1543                                int err = ra_data_block(inode, start_bidx);
1544
1545                                f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1546                                if (err) {
1547                                        iput(inode);
1548                                        continue;
1549                                }
1550                                add_gc_inode(gc_list, inode);
1551                                continue;
1552                        }
1553
1554                        data_page = f2fs_get_read_data_page(inode,
1555                                                start_bidx, REQ_RAHEAD, true);
1556                        f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1557                        if (IS_ERR(data_page)) {
1558                                iput(inode);
1559                                continue;
1560                        }
1561
1562                        f2fs_put_page(data_page, 0);
1563                        add_gc_inode(gc_list, inode);
1564                        continue;
1565                }
1566
1567                /* phase 4 */
1568                inode = find_gc_inode(gc_list, dni.ino);
1569                if (inode) {
1570                        struct f2fs_inode_info *fi = F2FS_I(inode);
1571                        bool locked = false;
1572                        int err;
1573
1574                        if (S_ISREG(inode->i_mode)) {
1575                                if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
1576                                        sbi->skipped_gc_rwsem++;
1577                                        continue;
1578                                }
1579                                if (!f2fs_down_write_trylock(
1580                                                &fi->i_gc_rwsem[WRITE])) {
1581                                        sbi->skipped_gc_rwsem++;
1582                                        f2fs_up_write(&fi->i_gc_rwsem[READ]);
1583                                        continue;
1584                                }
1585                                locked = true;
1586
1587                                /* wait for all inflight aio data */
1588                                inode_dio_wait(inode);
1589                        }
1590
1591                        start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1592                                                                + ofs_in_node;
1593                        if (f2fs_post_read_required(inode))
1594                                err = move_data_block(inode, start_bidx,
1595                                                        gc_type, segno, off);
1596                        else
1597                                err = move_data_page(inode, start_bidx, gc_type,
1598                                                                segno, off);
1599
1600                        if (!err && (gc_type == FG_GC ||
1601                                        f2fs_post_read_required(inode)))
1602                                submitted++;
1603
1604                        if (locked) {
1605                                f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1606                                f2fs_up_write(&fi->i_gc_rwsem[READ]);
1607                        }
1608
1609                        stat_inc_data_blk_count(sbi, 1, gc_type);
1610                }
1611        }
1612
1613        if (++phase < 5)
1614                goto next_step;
1615
1616        return submitted;
1617}
1618
1619static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1620                        int gc_type)
1621{
1622        struct sit_info *sit_i = SIT_I(sbi);
1623        int ret;
1624
1625        down_write(&sit_i->sentry_lock);
1626        ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1627                                              NO_CHECK_TYPE, LFS, 0);
1628        up_write(&sit_i->sentry_lock);
1629        return ret;
1630}
1631
1632static int do_garbage_collect(struct f2fs_sb_info *sbi,
1633                                unsigned int start_segno,
1634                                struct gc_inode_list *gc_list, int gc_type,
1635                                bool force_migrate)
1636{
1637        struct page *sum_page;
1638        struct f2fs_summary_block *sum;
1639        struct blk_plug plug;
1640        unsigned int segno = start_segno;
1641        unsigned int end_segno = start_segno + sbi->segs_per_sec;
1642        int seg_freed = 0, migrated = 0;
1643        unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1644                                                SUM_TYPE_DATA : SUM_TYPE_NODE;
1645        int submitted = 0;
1646
1647        if (__is_large_section(sbi))
1648                end_segno = rounddown(end_segno, sbi->segs_per_sec);
1649
1650        /*
1651         * zone-capacity can be less than zone-size in zoned devices,
1652         * resulting in less than expected usable segments in the zone,
1653         * calculate the end segno in the zone which can be garbage collected
1654         */
1655        if (f2fs_sb_has_blkzoned(sbi))
1656                end_segno -= sbi->segs_per_sec -
1657                                        f2fs_usable_segs_in_sec(sbi, segno);
1658
1659        sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1660
1661        /* readahead multi ssa blocks those have contiguous address */
1662        if (__is_large_section(sbi))
1663                f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1664                                        end_segno - segno, META_SSA, true);
1665
1666        /* reference all summary page */
1667        while (segno < end_segno) {
1668                sum_page = f2fs_get_sum_page(sbi, segno++);
1669                if (IS_ERR(sum_page)) {
1670                        int err = PTR_ERR(sum_page);
1671
1672                        end_segno = segno - 1;
1673                        for (segno = start_segno; segno < end_segno; segno++) {
1674                                sum_page = find_get_page(META_MAPPING(sbi),
1675                                                GET_SUM_BLOCK(sbi, segno));
1676                                f2fs_put_page(sum_page, 0);
1677                                f2fs_put_page(sum_page, 0);
1678                        }
1679                        return err;
1680                }
1681                unlock_page(sum_page);
1682        }
1683
1684        blk_start_plug(&plug);
1685
1686        for (segno = start_segno; segno < end_segno; segno++) {
1687
1688                /* find segment summary of victim */
1689                sum_page = find_get_page(META_MAPPING(sbi),
1690                                        GET_SUM_BLOCK(sbi, segno));
1691                f2fs_put_page(sum_page, 0);
1692
1693                if (get_valid_blocks(sbi, segno, false) == 0)
1694                        goto freed;
1695                if (gc_type == BG_GC && __is_large_section(sbi) &&
1696                                migrated >= sbi->migration_granularity)
1697                        goto skip;
1698                if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1699                        goto skip;
1700
1701                sum = page_address(sum_page);
1702                if (type != GET_SUM_TYPE((&sum->footer))) {
1703                        f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1704                                 segno, type, GET_SUM_TYPE((&sum->footer)));
1705                        set_sbi_flag(sbi, SBI_NEED_FSCK);
1706                        f2fs_stop_checkpoint(sbi, false);
1707                        goto skip;
1708                }
1709
1710                /*
1711                 * this is to avoid deadlock:
1712                 * - lock_page(sum_page)         - f2fs_replace_block
1713                 *  - check_valid_map()            - down_write(sentry_lock)
1714                 *   - down_read(sentry_lock)     - change_curseg()
1715                 *                                  - lock_page(sum_page)
1716                 */
1717                if (type == SUM_TYPE_NODE)
1718                        submitted += gc_node_segment(sbi, sum->entries, segno,
1719                                                                gc_type);
1720                else
1721                        submitted += gc_data_segment(sbi, sum->entries, gc_list,
1722                                                        segno, gc_type,
1723                                                        force_migrate);
1724
1725                stat_inc_seg_count(sbi, type, gc_type);
1726                sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1727                migrated++;
1728
1729freed:
1730                if (gc_type == FG_GC &&
1731                                get_valid_blocks(sbi, segno, false) == 0)
1732                        seg_freed++;
1733
1734                if (__is_large_section(sbi) && segno + 1 < end_segno)
1735                        sbi->next_victim_seg[gc_type] = segno + 1;
1736skip:
1737                f2fs_put_page(sum_page, 0);
1738        }
1739
1740        if (submitted)
1741                f2fs_submit_merged_write(sbi,
1742                                (type == SUM_TYPE_NODE) ? NODE : DATA);
1743
1744        blk_finish_plug(&plug);
1745
1746        stat_inc_call_count(sbi->stat_info);
1747
1748        return seg_freed;
1749}
1750
1751int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1752{
1753        int gc_type = gc_control->init_gc_type;
1754        unsigned int segno = gc_control->victim_segno;
1755        int sec_freed = 0, seg_freed = 0, total_freed = 0;
1756        int ret = 0;
1757        struct cp_control cpc;
1758        struct gc_inode_list gc_list = {
1759                .ilist = LIST_HEAD_INIT(gc_list.ilist),
1760                .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1761        };
1762        unsigned int skipped_round = 0, round = 0;
1763
1764        trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1765                                gc_control->nr_free_secs,
1766                                get_pages(sbi, F2FS_DIRTY_NODES),
1767                                get_pages(sbi, F2FS_DIRTY_DENTS),
1768                                get_pages(sbi, F2FS_DIRTY_IMETA),
1769                                free_sections(sbi),
1770                                free_segments(sbi),
1771                                reserved_segments(sbi),
1772                                prefree_segments(sbi));
1773
1774        cpc.reason = __get_cp_reason(sbi);
1775        sbi->skipped_gc_rwsem = 0;
1776gc_more:
1777        if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1778                ret = -EINVAL;
1779                goto stop;
1780        }
1781        if (unlikely(f2fs_cp_error(sbi))) {
1782                ret = -EIO;
1783                goto stop;
1784        }
1785
1786        if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1787                /*
1788                 * For example, if there are many prefree_segments below given
1789                 * threshold, we can make them free by checkpoint. Then, we
1790                 * secure free segments which doesn't need fggc any more.
1791                 */
1792                if (prefree_segments(sbi)) {
1793                        ret = f2fs_write_checkpoint(sbi, &cpc);
1794                        if (ret)
1795                                goto stop;
1796                }
1797                if (has_not_enough_free_secs(sbi, 0, 0))
1798                        gc_type = FG_GC;
1799        }
1800
1801        /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1802        if (gc_type == BG_GC && gc_control->no_bg_gc) {
1803                ret = -EINVAL;
1804                goto stop;
1805        }
1806retry:
1807        ret = __get_victim(sbi, &segno, gc_type);
1808        if (ret) {
1809                /* allow to search victim from sections has pinned data */
1810                if (ret == -ENODATA && gc_type == FG_GC &&
1811                                f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1812                        f2fs_unpin_all_sections(sbi, false);
1813                        goto retry;
1814                }
1815                goto stop;
1816        }
1817
1818        seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1819                                gc_control->should_migrate_blocks);
1820        total_freed += seg_freed;
1821
1822        if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1823                sec_freed++;
1824
1825        if (gc_type == FG_GC)
1826                sbi->cur_victim_sec = NULL_SEGNO;
1827
1828        if (gc_control->init_gc_type == FG_GC ||
1829            !has_not_enough_free_secs(sbi,
1830                                (gc_type == FG_GC) ? sec_freed : 0, 0)) {
1831                if (gc_type == FG_GC && sec_freed < gc_control->nr_free_secs)
1832                        goto go_gc_more;
1833                goto stop;
1834        }
1835
1836        /* FG_GC stops GC by skip_count */
1837        if (gc_type == FG_GC) {
1838                if (sbi->skipped_gc_rwsem)
1839                        skipped_round++;
1840                round++;
1841                if (skipped_round > MAX_SKIP_GC_COUNT &&
1842                                skipped_round * 2 >= round) {
1843                        ret = f2fs_write_checkpoint(sbi, &cpc);
1844                        goto stop;
1845                }
1846        }
1847
1848        /* Write checkpoint to reclaim prefree segments */
1849        if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
1850                                prefree_segments(sbi)) {
1851                ret = f2fs_write_checkpoint(sbi, &cpc);
1852                if (ret)
1853                        goto stop;
1854        }
1855go_gc_more:
1856        segno = NULL_SEGNO;
1857        goto gc_more;
1858
1859stop:
1860        SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1861        SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1862
1863        if (gc_type == FG_GC)
1864                f2fs_unpin_all_sections(sbi, true);
1865
1866        trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1867                                get_pages(sbi, F2FS_DIRTY_NODES),
1868                                get_pages(sbi, F2FS_DIRTY_DENTS),
1869                                get_pages(sbi, F2FS_DIRTY_IMETA),
1870                                free_sections(sbi),
1871                                free_segments(sbi),
1872                                reserved_segments(sbi),
1873                                prefree_segments(sbi));
1874
1875        f2fs_up_write(&sbi->gc_lock);
1876
1877        put_gc_inode(&gc_list);
1878
1879        if (gc_control->err_gc_skipped && !ret)
1880                ret = sec_freed ? 0 : -EAGAIN;
1881        return ret;
1882}
1883
1884int __init f2fs_create_garbage_collection_cache(void)
1885{
1886        victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1887                                        sizeof(struct victim_entry));
1888        if (!victim_entry_slab)
1889                return -ENOMEM;
1890        return 0;
1891}
1892
1893void f2fs_destroy_garbage_collection_cache(void)
1894{
1895        kmem_cache_destroy(victim_entry_slab);
1896}
1897
1898static void init_atgc_management(struct f2fs_sb_info *sbi)
1899{
1900        struct atgc_management *am = &sbi->am;
1901
1902        if (test_opt(sbi, ATGC) &&
1903                SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1904                am->atgc_enabled = true;
1905
1906        am->root = RB_ROOT_CACHED;
1907        INIT_LIST_HEAD(&am->victim_list);
1908        am->victim_count = 0;
1909
1910        am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1911        am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1912        am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1913        am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1914}
1915
1916void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1917{
1918        DIRTY_I(sbi)->v_ops = &default_v_ops;
1919
1920        sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1921
1922        /* give warm/cold data area from slower device */
1923        if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1924                SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1925                                GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1926
1927        init_atgc_management(sbi);
1928}
1929
1930static int free_segment_range(struct f2fs_sb_info *sbi,
1931                                unsigned int secs, bool gc_only)
1932{
1933        unsigned int segno, next_inuse, start, end;
1934        struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1935        int gc_mode, gc_type;
1936        int err = 0;
1937        int type;
1938
1939        /* Force block allocation for GC */
1940        MAIN_SECS(sbi) -= secs;
1941        start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1942        end = MAIN_SEGS(sbi) - 1;
1943
1944        mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1945        for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1946                if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1947                        SIT_I(sbi)->last_victim[gc_mode] = 0;
1948
1949        for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1950                if (sbi->next_victim_seg[gc_type] >= start)
1951                        sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1952        mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1953
1954        /* Move out cursegs from the target range */
1955        for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1956                f2fs_allocate_segment_for_resize(sbi, type, start, end);
1957
1958        /* do GC to move out valid blocks in the range */
1959        for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1960                struct gc_inode_list gc_list = {
1961                        .ilist = LIST_HEAD_INIT(gc_list.ilist),
1962                        .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1963                };
1964
1965                do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1966                put_gc_inode(&gc_list);
1967
1968                if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1969                        err = -EAGAIN;
1970                        goto out;
1971                }
1972                if (fatal_signal_pending(current)) {
1973                        err = -ERESTARTSYS;
1974                        goto out;
1975                }
1976        }
1977        if (gc_only)
1978                goto out;
1979
1980        err = f2fs_write_checkpoint(sbi, &cpc);
1981        if (err)
1982                goto out;
1983
1984        next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1985        if (next_inuse <= end) {
1986                f2fs_err(sbi, "segno %u should be free but still inuse!",
1987                         next_inuse);
1988                f2fs_bug_on(sbi, 1);
1989        }
1990out:
1991        MAIN_SECS(sbi) += secs;
1992        return err;
1993}
1994
1995static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1996{
1997        struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1998        int section_count;
1999        int segment_count;
2000        int segment_count_main;
2001        long long block_count;
2002        int segs = secs * sbi->segs_per_sec;
2003
2004        f2fs_down_write(&sbi->sb_lock);
2005
2006        section_count = le32_to_cpu(raw_sb->section_count);
2007        segment_count = le32_to_cpu(raw_sb->segment_count);
2008        segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2009        block_count = le64_to_cpu(raw_sb->block_count);
2010
2011        raw_sb->section_count = cpu_to_le32(section_count + secs);
2012        raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2013        raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2014        raw_sb->block_count = cpu_to_le64(block_count +
2015                                        (long long)segs * sbi->blocks_per_seg);
2016        if (f2fs_is_multi_device(sbi)) {
2017                int last_dev = sbi->s_ndevs - 1;
2018                int dev_segs =
2019                        le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2020
2021                raw_sb->devs[last_dev].total_segments =
2022                                                cpu_to_le32(dev_segs + segs);
2023        }
2024
2025        f2fs_up_write(&sbi->sb_lock);
2026}
2027
2028static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2029{
2030        int segs = secs * sbi->segs_per_sec;
2031        long long blks = (long long)segs * sbi->blocks_per_seg;
2032        long long user_block_count =
2033                                le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2034
2035        SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2036        MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2037        MAIN_SECS(sbi) += secs;
2038        FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2039        FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2040        F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2041
2042        if (f2fs_is_multi_device(sbi)) {
2043                int last_dev = sbi->s_ndevs - 1;
2044
2045                FDEV(last_dev).total_segments =
2046                                (int)FDEV(last_dev).total_segments + segs;
2047                FDEV(last_dev).end_blk =
2048                                (long long)FDEV(last_dev).end_blk + blks;
2049#ifdef CONFIG_BLK_DEV_ZONED
2050                FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
2051                                        (int)(blks >> sbi->log_blocks_per_blkz);
2052#endif
2053        }
2054}
2055
2056int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
2057{
2058        __u64 old_block_count, shrunk_blocks;
2059        struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2060        unsigned int secs;
2061        int err = 0;
2062        __u32 rem;
2063
2064        old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2065        if (block_count > old_block_count)
2066                return -EINVAL;
2067
2068        if (f2fs_is_multi_device(sbi)) {
2069                int last_dev = sbi->s_ndevs - 1;
2070                __u64 last_segs = FDEV(last_dev).total_segments;
2071
2072                if (block_count + last_segs * sbi->blocks_per_seg <=
2073                                                                old_block_count)
2074                        return -EINVAL;
2075        }
2076
2077        /* new fs size should align to section size */
2078        div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2079        if (rem)
2080                return -EINVAL;
2081
2082        if (block_count == old_block_count)
2083                return 0;
2084
2085        if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2086                f2fs_err(sbi, "Should run fsck to repair first.");
2087                return -EFSCORRUPTED;
2088        }
2089
2090        if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2091                f2fs_err(sbi, "Checkpoint should be enabled.");
2092                return -EINVAL;
2093        }
2094
2095        shrunk_blocks = old_block_count - block_count;
2096        secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2097
2098        /* stop other GC */
2099        if (!f2fs_down_write_trylock(&sbi->gc_lock))
2100                return -EAGAIN;
2101
2102        /* stop CP to protect MAIN_SEC in free_segment_range */
2103        f2fs_lock_op(sbi);
2104
2105        spin_lock(&sbi->stat_lock);
2106        if (shrunk_blocks + valid_user_blocks(sbi) +
2107                sbi->current_reserved_blocks + sbi->unusable_block_count +
2108                F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2109                err = -ENOSPC;
2110        spin_unlock(&sbi->stat_lock);
2111
2112        if (err)
2113                goto out_unlock;
2114
2115        err = free_segment_range(sbi, secs, true);
2116
2117out_unlock:
2118        f2fs_unlock_op(sbi);
2119        f2fs_up_write(&sbi->gc_lock);
2120        if (err)
2121                return err;
2122
2123        set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2124
2125        freeze_super(sbi->sb);
2126        f2fs_down_write(&sbi->gc_lock);
2127        f2fs_down_write(&sbi->cp_global_sem);
2128
2129        spin_lock(&sbi->stat_lock);
2130        if (shrunk_blocks + valid_user_blocks(sbi) +
2131                sbi->current_reserved_blocks + sbi->unusable_block_count +
2132                F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2133                err = -ENOSPC;
2134        else
2135                sbi->user_block_count -= shrunk_blocks;
2136        spin_unlock(&sbi->stat_lock);
2137        if (err)
2138                goto out_err;
2139
2140        err = free_segment_range(sbi, secs, false);
2141        if (err)
2142                goto recover_out;
2143
2144        update_sb_metadata(sbi, -secs);
2145
2146        err = f2fs_commit_super(sbi, false);
2147        if (err) {
2148                update_sb_metadata(sbi, secs);
2149                goto recover_out;
2150        }
2151
2152        update_fs_metadata(sbi, -secs);
2153        clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2154        set_sbi_flag(sbi, SBI_IS_DIRTY);
2155
2156        err = f2fs_write_checkpoint(sbi, &cpc);
2157        if (err) {
2158                update_fs_metadata(sbi, secs);
2159                update_sb_metadata(sbi, secs);
2160                f2fs_commit_super(sbi, false);
2161        }
2162recover_out:
2163        if (err) {
2164                set_sbi_flag(sbi, SBI_NEED_FSCK);
2165                f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2166
2167                spin_lock(&sbi->stat_lock);
2168                sbi->user_block_count += shrunk_blocks;
2169                spin_unlock(&sbi->stat_lock);
2170        }
2171out_err:
2172        f2fs_up_write(&sbi->cp_global_sem);
2173        f2fs_up_write(&sbi->gc_lock);
2174        thaw_super(sbi->sb);
2175        clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2176        return err;
2177}
2178