linux/fs/f2fs/extent_cache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * f2fs extent cache support
   4 *
   5 * Copyright (c) 2015 Motorola Mobility
   6 * Copyright (c) 2015 Samsung Electronics
   7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
   8 *          Chao Yu <chao2.yu@samsung.com>
   9 */
  10
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13
  14#include "f2fs.h"
  15#include "node.h"
  16#include <trace/events/f2fs.h>
  17
  18static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
  19                                                        unsigned int ofs)
  20{
  21        if (cached_re) {
  22                if (cached_re->ofs <= ofs &&
  23                                cached_re->ofs + cached_re->len > ofs) {
  24                        return cached_re;
  25                }
  26        }
  27        return NULL;
  28}
  29
  30static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
  31                                                        unsigned int ofs)
  32{
  33        struct rb_node *node = root->rb_root.rb_node;
  34        struct rb_entry *re;
  35
  36        while (node) {
  37                re = rb_entry(node, struct rb_entry, rb_node);
  38
  39                if (ofs < re->ofs)
  40                        node = node->rb_left;
  41                else if (ofs >= re->ofs + re->len)
  42                        node = node->rb_right;
  43                else
  44                        return re;
  45        }
  46        return NULL;
  47}
  48
  49struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
  50                                struct rb_entry *cached_re, unsigned int ofs)
  51{
  52        struct rb_entry *re;
  53
  54        re = __lookup_rb_tree_fast(cached_re, ofs);
  55        if (!re)
  56                return __lookup_rb_tree_slow(root, ofs);
  57
  58        return re;
  59}
  60
  61struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
  62                                        struct rb_root_cached *root,
  63                                        struct rb_node **parent,
  64                                        unsigned long long key, bool *leftmost)
  65{
  66        struct rb_node **p = &root->rb_root.rb_node;
  67        struct rb_entry *re;
  68
  69        while (*p) {
  70                *parent = *p;
  71                re = rb_entry(*parent, struct rb_entry, rb_node);
  72
  73                if (key < re->key) {
  74                        p = &(*p)->rb_left;
  75                } else {
  76                        p = &(*p)->rb_right;
  77                        *leftmost = false;
  78                }
  79        }
  80
  81        return p;
  82}
  83
  84struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
  85                                struct rb_root_cached *root,
  86                                struct rb_node **parent,
  87                                unsigned int ofs, bool *leftmost)
  88{
  89        struct rb_node **p = &root->rb_root.rb_node;
  90        struct rb_entry *re;
  91
  92        while (*p) {
  93                *parent = *p;
  94                re = rb_entry(*parent, struct rb_entry, rb_node);
  95
  96                if (ofs < re->ofs) {
  97                        p = &(*p)->rb_left;
  98                } else if (ofs >= re->ofs + re->len) {
  99                        p = &(*p)->rb_right;
 100                        *leftmost = false;
 101                } else {
 102                        f2fs_bug_on(sbi, 1);
 103                }
 104        }
 105
 106        return p;
 107}
 108
 109/*
 110 * lookup rb entry in position of @ofs in rb-tree,
 111 * if hit, return the entry, otherwise, return NULL
 112 * @prev_ex: extent before ofs
 113 * @next_ex: extent after ofs
 114 * @insert_p: insert point for new extent at ofs
 115 * in order to simpfy the insertion after.
 116 * tree must stay unchanged between lookup and insertion.
 117 */
 118struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
 119                                struct rb_entry *cached_re,
 120                                unsigned int ofs,
 121                                struct rb_entry **prev_entry,
 122                                struct rb_entry **next_entry,
 123                                struct rb_node ***insert_p,
 124                                struct rb_node **insert_parent,
 125                                bool force, bool *leftmost)
 126{
 127        struct rb_node **pnode = &root->rb_root.rb_node;
 128        struct rb_node *parent = NULL, *tmp_node;
 129        struct rb_entry *re = cached_re;
 130
 131        *insert_p = NULL;
 132        *insert_parent = NULL;
 133        *prev_entry = NULL;
 134        *next_entry = NULL;
 135
 136        if (RB_EMPTY_ROOT(&root->rb_root))
 137                return NULL;
 138
 139        if (re) {
 140                if (re->ofs <= ofs && re->ofs + re->len > ofs)
 141                        goto lookup_neighbors;
 142        }
 143
 144        if (leftmost)
 145                *leftmost = true;
 146
 147        while (*pnode) {
 148                parent = *pnode;
 149                re = rb_entry(*pnode, struct rb_entry, rb_node);
 150
 151                if (ofs < re->ofs) {
 152                        pnode = &(*pnode)->rb_left;
 153                } else if (ofs >= re->ofs + re->len) {
 154                        pnode = &(*pnode)->rb_right;
 155                        if (leftmost)
 156                                *leftmost = false;
 157                } else {
 158                        goto lookup_neighbors;
 159                }
 160        }
 161
 162        *insert_p = pnode;
 163        *insert_parent = parent;
 164
 165        re = rb_entry(parent, struct rb_entry, rb_node);
 166        tmp_node = parent;
 167        if (parent && ofs > re->ofs)
 168                tmp_node = rb_next(parent);
 169        *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
 170
 171        tmp_node = parent;
 172        if (parent && ofs < re->ofs)
 173                tmp_node = rb_prev(parent);
 174        *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
 175        return NULL;
 176
 177lookup_neighbors:
 178        if (ofs == re->ofs || force) {
 179                /* lookup prev node for merging backward later */
 180                tmp_node = rb_prev(&re->rb_node);
 181                *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
 182        }
 183        if (ofs == re->ofs + re->len - 1 || force) {
 184                /* lookup next node for merging frontward later */
 185                tmp_node = rb_next(&re->rb_node);
 186                *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
 187        }
 188        return re;
 189}
 190
 191bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
 192                                struct rb_root_cached *root, bool check_key)
 193{
 194#ifdef CONFIG_F2FS_CHECK_FS
 195        struct rb_node *cur = rb_first_cached(root), *next;
 196        struct rb_entry *cur_re, *next_re;
 197
 198        if (!cur)
 199                return true;
 200
 201        while (cur) {
 202                next = rb_next(cur);
 203                if (!next)
 204                        return true;
 205
 206                cur_re = rb_entry(cur, struct rb_entry, rb_node);
 207                next_re = rb_entry(next, struct rb_entry, rb_node);
 208
 209                if (check_key) {
 210                        if (cur_re->key > next_re->key) {
 211                                f2fs_info(sbi, "inconsistent rbtree, "
 212                                        "cur(%llu) next(%llu)",
 213                                        cur_re->key, next_re->key);
 214                                return false;
 215                        }
 216                        goto next;
 217                }
 218
 219                if (cur_re->ofs + cur_re->len > next_re->ofs) {
 220                        f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
 221                                  cur_re->ofs, cur_re->len,
 222                                  next_re->ofs, next_re->len);
 223                        return false;
 224                }
 225next:
 226                cur = next;
 227        }
 228#endif
 229        return true;
 230}
 231
 232static struct kmem_cache *extent_tree_slab;
 233static struct kmem_cache *extent_node_slab;
 234
 235static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
 236                                struct extent_tree *et, struct extent_info *ei,
 237                                struct rb_node *parent, struct rb_node **p,
 238                                bool leftmost)
 239{
 240        struct extent_node *en;
 241
 242        en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
 243        if (!en)
 244                return NULL;
 245
 246        en->ei = *ei;
 247        INIT_LIST_HEAD(&en->list);
 248        en->et = et;
 249
 250        rb_link_node(&en->rb_node, parent, p);
 251        rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
 252        atomic_inc(&et->node_cnt);
 253        atomic_inc(&sbi->total_ext_node);
 254        return en;
 255}
 256
 257static void __detach_extent_node(struct f2fs_sb_info *sbi,
 258                                struct extent_tree *et, struct extent_node *en)
 259{
 260        rb_erase_cached(&en->rb_node, &et->root);
 261        atomic_dec(&et->node_cnt);
 262        atomic_dec(&sbi->total_ext_node);
 263
 264        if (et->cached_en == en)
 265                et->cached_en = NULL;
 266        kmem_cache_free(extent_node_slab, en);
 267}
 268
 269/*
 270 * Flow to release an extent_node:
 271 * 1. list_del_init
 272 * 2. __detach_extent_node
 273 * 3. kmem_cache_free.
 274 */
 275static void __release_extent_node(struct f2fs_sb_info *sbi,
 276                        struct extent_tree *et, struct extent_node *en)
 277{
 278        spin_lock(&sbi->extent_lock);
 279        f2fs_bug_on(sbi, list_empty(&en->list));
 280        list_del_init(&en->list);
 281        spin_unlock(&sbi->extent_lock);
 282
 283        __detach_extent_node(sbi, et, en);
 284}
 285
 286static struct extent_tree *__grab_extent_tree(struct inode *inode)
 287{
 288        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 289        struct extent_tree *et;
 290        nid_t ino = inode->i_ino;
 291
 292        mutex_lock(&sbi->extent_tree_lock);
 293        et = radix_tree_lookup(&sbi->extent_tree_root, ino);
 294        if (!et) {
 295                et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
 296                f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
 297                memset(et, 0, sizeof(struct extent_tree));
 298                et->ino = ino;
 299                et->root = RB_ROOT_CACHED;
 300                et->cached_en = NULL;
 301                rwlock_init(&et->lock);
 302                INIT_LIST_HEAD(&et->list);
 303                atomic_set(&et->node_cnt, 0);
 304                atomic_inc(&sbi->total_ext_tree);
 305        } else {
 306                atomic_dec(&sbi->total_zombie_tree);
 307                list_del_init(&et->list);
 308        }
 309        mutex_unlock(&sbi->extent_tree_lock);
 310
 311        /* never died until evict_inode */
 312        F2FS_I(inode)->extent_tree = et;
 313
 314        return et;
 315}
 316
 317static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
 318                                struct extent_tree *et, struct extent_info *ei)
 319{
 320        struct rb_node **p = &et->root.rb_root.rb_node;
 321        struct extent_node *en;
 322
 323        en = __attach_extent_node(sbi, et, ei, NULL, p, true);
 324        if (!en)
 325                return NULL;
 326
 327        et->largest = en->ei;
 328        et->cached_en = en;
 329        return en;
 330}
 331
 332static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
 333                                        struct extent_tree *et)
 334{
 335        struct rb_node *node, *next;
 336        struct extent_node *en;
 337        unsigned int count = atomic_read(&et->node_cnt);
 338
 339        node = rb_first_cached(&et->root);
 340        while (node) {
 341                next = rb_next(node);
 342                en = rb_entry(node, struct extent_node, rb_node);
 343                __release_extent_node(sbi, et, en);
 344                node = next;
 345        }
 346
 347        return count - atomic_read(&et->node_cnt);
 348}
 349
 350static void __drop_largest_extent(struct extent_tree *et,
 351                                        pgoff_t fofs, unsigned int len)
 352{
 353        if (fofs < et->largest.fofs + et->largest.len &&
 354                        fofs + len > et->largest.fofs) {
 355                et->largest.len = 0;
 356                et->largest_updated = true;
 357        }
 358}
 359
 360/* return true, if inode page is changed */
 361static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
 362{
 363        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 364        struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
 365        struct extent_tree *et;
 366        struct extent_node *en;
 367        struct extent_info ei;
 368
 369        if (!f2fs_may_extent_tree(inode)) {
 370                /* drop largest extent */
 371                if (i_ext && i_ext->len) {
 372                        f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 373                        i_ext->len = 0;
 374                        set_page_dirty(ipage);
 375                        return;
 376                }
 377                return;
 378        }
 379
 380        et = __grab_extent_tree(inode);
 381
 382        if (!i_ext || !i_ext->len)
 383                return;
 384
 385        get_extent_info(&ei, i_ext);
 386
 387        write_lock(&et->lock);
 388        if (atomic_read(&et->node_cnt))
 389                goto out;
 390
 391        en = __init_extent_tree(sbi, et, &ei);
 392        if (en) {
 393                spin_lock(&sbi->extent_lock);
 394                list_add_tail(&en->list, &sbi->extent_list);
 395                spin_unlock(&sbi->extent_lock);
 396        }
 397out:
 398        write_unlock(&et->lock);
 399}
 400
 401void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
 402{
 403        __f2fs_init_extent_tree(inode, ipage);
 404
 405        if (!F2FS_I(inode)->extent_tree)
 406                set_inode_flag(inode, FI_NO_EXTENT);
 407}
 408
 409static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 410                                                        struct extent_info *ei)
 411{
 412        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 413        struct extent_tree *et = F2FS_I(inode)->extent_tree;
 414        struct extent_node *en;
 415        bool ret = false;
 416
 417        f2fs_bug_on(sbi, !et);
 418
 419        trace_f2fs_lookup_extent_tree_start(inode, pgofs);
 420
 421        read_lock(&et->lock);
 422
 423        if (et->largest.fofs <= pgofs &&
 424                        et->largest.fofs + et->largest.len > pgofs) {
 425                *ei = et->largest;
 426                ret = true;
 427                stat_inc_largest_node_hit(sbi);
 428                goto out;
 429        }
 430
 431        en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
 432                                (struct rb_entry *)et->cached_en, pgofs);
 433        if (!en)
 434                goto out;
 435
 436        if (en == et->cached_en)
 437                stat_inc_cached_node_hit(sbi);
 438        else
 439                stat_inc_rbtree_node_hit(sbi);
 440
 441        *ei = en->ei;
 442        spin_lock(&sbi->extent_lock);
 443        if (!list_empty(&en->list)) {
 444                list_move_tail(&en->list, &sbi->extent_list);
 445                et->cached_en = en;
 446        }
 447        spin_unlock(&sbi->extent_lock);
 448        ret = true;
 449out:
 450        stat_inc_total_hit(sbi);
 451        read_unlock(&et->lock);
 452
 453        trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
 454        return ret;
 455}
 456
 457static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
 458                                struct extent_tree *et, struct extent_info *ei,
 459                                struct extent_node *prev_ex,
 460                                struct extent_node *next_ex)
 461{
 462        struct extent_node *en = NULL;
 463
 464        if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
 465                prev_ex->ei.len += ei->len;
 466                ei = &prev_ex->ei;
 467                en = prev_ex;
 468        }
 469
 470        if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
 471                next_ex->ei.fofs = ei->fofs;
 472                next_ex->ei.blk = ei->blk;
 473                next_ex->ei.len += ei->len;
 474                if (en)
 475                        __release_extent_node(sbi, et, prev_ex);
 476
 477                en = next_ex;
 478        }
 479
 480        if (!en)
 481                return NULL;
 482
 483        __try_update_largest_extent(et, en);
 484
 485        spin_lock(&sbi->extent_lock);
 486        if (!list_empty(&en->list)) {
 487                list_move_tail(&en->list, &sbi->extent_list);
 488                et->cached_en = en;
 489        }
 490        spin_unlock(&sbi->extent_lock);
 491        return en;
 492}
 493
 494static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
 495                                struct extent_tree *et, struct extent_info *ei,
 496                                struct rb_node **insert_p,
 497                                struct rb_node *insert_parent,
 498                                bool leftmost)
 499{
 500        struct rb_node **p;
 501        struct rb_node *parent = NULL;
 502        struct extent_node *en = NULL;
 503
 504        if (insert_p && insert_parent) {
 505                parent = insert_parent;
 506                p = insert_p;
 507                goto do_insert;
 508        }
 509
 510        leftmost = true;
 511
 512        p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
 513                                                ei->fofs, &leftmost);
 514do_insert:
 515        en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
 516        if (!en)
 517                return NULL;
 518
 519        __try_update_largest_extent(et, en);
 520
 521        /* update in global extent list */
 522        spin_lock(&sbi->extent_lock);
 523        list_add_tail(&en->list, &sbi->extent_list);
 524        et->cached_en = en;
 525        spin_unlock(&sbi->extent_lock);
 526        return en;
 527}
 528
 529static void f2fs_update_extent_tree_range(struct inode *inode,
 530                                pgoff_t fofs, block_t blkaddr, unsigned int len)
 531{
 532        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 533        struct extent_tree *et = F2FS_I(inode)->extent_tree;
 534        struct extent_node *en = NULL, *en1 = NULL;
 535        struct extent_node *prev_en = NULL, *next_en = NULL;
 536        struct extent_info ei, dei, prev;
 537        struct rb_node **insert_p = NULL, *insert_parent = NULL;
 538        unsigned int end = fofs + len;
 539        unsigned int pos = (unsigned int)fofs;
 540        bool updated = false;
 541        bool leftmost = false;
 542
 543        if (!et)
 544                return;
 545
 546        trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
 547
 548        write_lock(&et->lock);
 549
 550        if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
 551                write_unlock(&et->lock);
 552                return;
 553        }
 554
 555        prev = et->largest;
 556        dei.len = 0;
 557
 558        /*
 559         * drop largest extent before lookup, in case it's already
 560         * been shrunk from extent tree
 561         */
 562        __drop_largest_extent(et, fofs, len);
 563
 564        /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
 565        en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
 566                                        (struct rb_entry *)et->cached_en, fofs,
 567                                        (struct rb_entry **)&prev_en,
 568                                        (struct rb_entry **)&next_en,
 569                                        &insert_p, &insert_parent, false,
 570                                        &leftmost);
 571        if (!en)
 572                en = next_en;
 573
 574        /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
 575        while (en && en->ei.fofs < end) {
 576                unsigned int org_end;
 577                int parts = 0;  /* # of parts current extent split into */
 578
 579                next_en = en1 = NULL;
 580
 581                dei = en->ei;
 582                org_end = dei.fofs + dei.len;
 583                f2fs_bug_on(sbi, pos >= org_end);
 584
 585                if (pos > dei.fofs &&   pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
 586                        en->ei.len = pos - en->ei.fofs;
 587                        prev_en = en;
 588                        parts = 1;
 589                }
 590
 591                if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
 592                        if (parts) {
 593                                set_extent_info(&ei, end,
 594                                                end - dei.fofs + dei.blk,
 595                                                org_end - end);
 596                                en1 = __insert_extent_tree(sbi, et, &ei,
 597                                                        NULL, NULL, true);
 598                                next_en = en1;
 599                        } else {
 600                                en->ei.fofs = end;
 601                                en->ei.blk += end - dei.fofs;
 602                                en->ei.len -= end - dei.fofs;
 603                                next_en = en;
 604                        }
 605                        parts++;
 606                }
 607
 608                if (!next_en) {
 609                        struct rb_node *node = rb_next(&en->rb_node);
 610
 611                        next_en = rb_entry_safe(node, struct extent_node,
 612                                                rb_node);
 613                }
 614
 615                if (parts)
 616                        __try_update_largest_extent(et, en);
 617                else
 618                        __release_extent_node(sbi, et, en);
 619
 620                /*
 621                 * if original extent is split into zero or two parts, extent
 622                 * tree has been altered by deletion or insertion, therefore
 623                 * invalidate pointers regard to tree.
 624                 */
 625                if (parts != 1) {
 626                        insert_p = NULL;
 627                        insert_parent = NULL;
 628                }
 629                en = next_en;
 630        }
 631
 632        /* 3. update extent in extent cache */
 633        if (blkaddr) {
 634
 635                set_extent_info(&ei, fofs, blkaddr, len);
 636                if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
 637                        __insert_extent_tree(sbi, et, &ei,
 638                                        insert_p, insert_parent, leftmost);
 639
 640                /* give up extent_cache, if split and small updates happen */
 641                if (dei.len >= 1 &&
 642                                prev.len < F2FS_MIN_EXTENT_LEN &&
 643                                et->largest.len < F2FS_MIN_EXTENT_LEN) {
 644                        et->largest.len = 0;
 645                        et->largest_updated = true;
 646                        set_inode_flag(inode, FI_NO_EXTENT);
 647                }
 648        }
 649
 650        if (is_inode_flag_set(inode, FI_NO_EXTENT))
 651                __free_extent_tree(sbi, et);
 652
 653        if (et->largest_updated) {
 654                et->largest_updated = false;
 655                updated = true;
 656        }
 657
 658        write_unlock(&et->lock);
 659
 660        if (updated)
 661                f2fs_mark_inode_dirty_sync(inode, true);
 662}
 663
 664unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 665{
 666        struct extent_tree *et, *next;
 667        struct extent_node *en;
 668        unsigned int node_cnt = 0, tree_cnt = 0;
 669        int remained;
 670
 671        if (!test_opt(sbi, EXTENT_CACHE))
 672                return 0;
 673
 674        if (!atomic_read(&sbi->total_zombie_tree))
 675                goto free_node;
 676
 677        if (!mutex_trylock(&sbi->extent_tree_lock))
 678                goto out;
 679
 680        /* 1. remove unreferenced extent tree */
 681        list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
 682                if (atomic_read(&et->node_cnt)) {
 683                        write_lock(&et->lock);
 684                        node_cnt += __free_extent_tree(sbi, et);
 685                        write_unlock(&et->lock);
 686                }
 687                f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
 688                list_del_init(&et->list);
 689                radix_tree_delete(&sbi->extent_tree_root, et->ino);
 690                kmem_cache_free(extent_tree_slab, et);
 691                atomic_dec(&sbi->total_ext_tree);
 692                atomic_dec(&sbi->total_zombie_tree);
 693                tree_cnt++;
 694
 695                if (node_cnt + tree_cnt >= nr_shrink)
 696                        goto unlock_out;
 697                cond_resched();
 698        }
 699        mutex_unlock(&sbi->extent_tree_lock);
 700
 701free_node:
 702        /* 2. remove LRU extent entries */
 703        if (!mutex_trylock(&sbi->extent_tree_lock))
 704                goto out;
 705
 706        remained = nr_shrink - (node_cnt + tree_cnt);
 707
 708        spin_lock(&sbi->extent_lock);
 709        for (; remained > 0; remained--) {
 710                if (list_empty(&sbi->extent_list))
 711                        break;
 712                en = list_first_entry(&sbi->extent_list,
 713                                        struct extent_node, list);
 714                et = en->et;
 715                if (!write_trylock(&et->lock)) {
 716                        /* refresh this extent node's position in extent list */
 717                        list_move_tail(&en->list, &sbi->extent_list);
 718                        continue;
 719                }
 720
 721                list_del_init(&en->list);
 722                spin_unlock(&sbi->extent_lock);
 723
 724                __detach_extent_node(sbi, et, en);
 725
 726                write_unlock(&et->lock);
 727                node_cnt++;
 728                spin_lock(&sbi->extent_lock);
 729        }
 730        spin_unlock(&sbi->extent_lock);
 731
 732unlock_out:
 733        mutex_unlock(&sbi->extent_tree_lock);
 734out:
 735        trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
 736
 737        return node_cnt + tree_cnt;
 738}
 739
 740unsigned int f2fs_destroy_extent_node(struct inode *inode)
 741{
 742        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 743        struct extent_tree *et = F2FS_I(inode)->extent_tree;
 744        unsigned int node_cnt = 0;
 745
 746        if (!et || !atomic_read(&et->node_cnt))
 747                return 0;
 748
 749        write_lock(&et->lock);
 750        node_cnt = __free_extent_tree(sbi, et);
 751        write_unlock(&et->lock);
 752
 753        return node_cnt;
 754}
 755
 756void f2fs_drop_extent_tree(struct inode *inode)
 757{
 758        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 759        struct extent_tree *et = F2FS_I(inode)->extent_tree;
 760        bool updated = false;
 761
 762        if (!f2fs_may_extent_tree(inode))
 763                return;
 764
 765        set_inode_flag(inode, FI_NO_EXTENT);
 766
 767        write_lock(&et->lock);
 768        __free_extent_tree(sbi, et);
 769        if (et->largest.len) {
 770                et->largest.len = 0;
 771                updated = true;
 772        }
 773        write_unlock(&et->lock);
 774        if (updated)
 775                f2fs_mark_inode_dirty_sync(inode, true);
 776}
 777
 778void f2fs_destroy_extent_tree(struct inode *inode)
 779{
 780        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 781        struct extent_tree *et = F2FS_I(inode)->extent_tree;
 782        unsigned int node_cnt = 0;
 783
 784        if (!et)
 785                return;
 786
 787        if (inode->i_nlink && !is_bad_inode(inode) &&
 788                                        atomic_read(&et->node_cnt)) {
 789                mutex_lock(&sbi->extent_tree_lock);
 790                list_add_tail(&et->list, &sbi->zombie_list);
 791                atomic_inc(&sbi->total_zombie_tree);
 792                mutex_unlock(&sbi->extent_tree_lock);
 793                return;
 794        }
 795
 796        /* free all extent info belong to this extent tree */
 797        node_cnt = f2fs_destroy_extent_node(inode);
 798
 799        /* delete extent tree entry in radix tree */
 800        mutex_lock(&sbi->extent_tree_lock);
 801        f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
 802        radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
 803        kmem_cache_free(extent_tree_slab, et);
 804        atomic_dec(&sbi->total_ext_tree);
 805        mutex_unlock(&sbi->extent_tree_lock);
 806
 807        F2FS_I(inode)->extent_tree = NULL;
 808
 809        trace_f2fs_destroy_extent_tree(inode, node_cnt);
 810}
 811
 812bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
 813                                        struct extent_info *ei)
 814{
 815        if (!f2fs_may_extent_tree(inode))
 816                return false;
 817
 818        return f2fs_lookup_extent_tree(inode, pgofs, ei);
 819}
 820
 821void f2fs_update_extent_cache(struct dnode_of_data *dn)
 822{
 823        pgoff_t fofs;
 824        block_t blkaddr;
 825
 826        if (!f2fs_may_extent_tree(dn->inode))
 827                return;
 828
 829        if (dn->data_blkaddr == NEW_ADDR)
 830                blkaddr = NULL_ADDR;
 831        else
 832                blkaddr = dn->data_blkaddr;
 833
 834        fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
 835                                                                dn->ofs_in_node;
 836        f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
 837}
 838
 839void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
 840                                pgoff_t fofs, block_t blkaddr, unsigned int len)
 841
 842{
 843        if (!f2fs_may_extent_tree(dn->inode))
 844                return;
 845
 846        f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
 847}
 848
 849void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
 850{
 851        INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
 852        mutex_init(&sbi->extent_tree_lock);
 853        INIT_LIST_HEAD(&sbi->extent_list);
 854        spin_lock_init(&sbi->extent_lock);
 855        atomic_set(&sbi->total_ext_tree, 0);
 856        INIT_LIST_HEAD(&sbi->zombie_list);
 857        atomic_set(&sbi->total_zombie_tree, 0);
 858        atomic_set(&sbi->total_ext_node, 0);
 859}
 860
 861int __init f2fs_create_extent_cache(void)
 862{
 863        extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
 864                        sizeof(struct extent_tree));
 865        if (!extent_tree_slab)
 866                return -ENOMEM;
 867        extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
 868                        sizeof(struct extent_node));
 869        if (!extent_node_slab) {
 870                kmem_cache_destroy(extent_tree_slab);
 871                return -ENOMEM;
 872        }
 873        return 0;
 874}
 875
 876void f2fs_destroy_extent_cache(void)
 877{
 878        kmem_cache_destroy(extent_node_slab);
 879        kmem_cache_destroy(extent_tree_slab);
 880}
 881