linux/fs/f2fs/compress.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * f2fs compress support
   4 *
   5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
   6 */
   7
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/moduleparam.h>
  11#include <linux/writeback.h>
  12#include <linux/backing-dev.h>
  13#include <linux/lzo.h>
  14#include <linux/lz4.h>
  15#include <linux/zstd.h>
  16#include <linux/pagevec.h>
  17
  18#include "f2fs.h"
  19#include "node.h"
  20#include "segment.h"
  21#include <trace/events/f2fs.h>
  22
  23static struct kmem_cache *cic_entry_slab;
  24static struct kmem_cache *dic_entry_slab;
  25
  26static void *page_array_alloc(struct inode *inode, int nr)
  27{
  28        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  29        unsigned int size = sizeof(struct page *) * nr;
  30
  31        if (likely(size <= sbi->page_array_slab_size))
  32                return f2fs_kmem_cache_alloc(sbi->page_array_slab,
  33                                        GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
  34        return f2fs_kzalloc(sbi, size, GFP_NOFS);
  35}
  36
  37static void page_array_free(struct inode *inode, void *pages, int nr)
  38{
  39        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  40        unsigned int size = sizeof(struct page *) * nr;
  41
  42        if (!pages)
  43                return;
  44
  45        if (likely(size <= sbi->page_array_slab_size))
  46                kmem_cache_free(sbi->page_array_slab, pages);
  47        else
  48                kfree(pages);
  49}
  50
  51struct f2fs_compress_ops {
  52        int (*init_compress_ctx)(struct compress_ctx *cc);
  53        void (*destroy_compress_ctx)(struct compress_ctx *cc);
  54        int (*compress_pages)(struct compress_ctx *cc);
  55        int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
  56        void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
  57        int (*decompress_pages)(struct decompress_io_ctx *dic);
  58};
  59
  60static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
  61{
  62        return index & (cc->cluster_size - 1);
  63}
  64
  65static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
  66{
  67        return index >> cc->log_cluster_size;
  68}
  69
  70static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
  71{
  72        return cc->cluster_idx << cc->log_cluster_size;
  73}
  74
  75bool f2fs_is_compressed_page(struct page *page)
  76{
  77        if (!PagePrivate(page))
  78                return false;
  79        if (!page_private(page))
  80                return false;
  81        if (page_private_nonpointer(page))
  82                return false;
  83
  84        f2fs_bug_on(F2FS_M_SB(page->mapping),
  85                *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
  86        return true;
  87}
  88
  89static void f2fs_set_compressed_page(struct page *page,
  90                struct inode *inode, pgoff_t index, void *data)
  91{
  92        attach_page_private(page, (void *)data);
  93
  94        /* i_crypto_info and iv index */
  95        page->index = index;
  96        page->mapping = inode->i_mapping;
  97}
  98
  99static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
 100{
 101        int i;
 102
 103        for (i = 0; i < len; i++) {
 104                if (!cc->rpages[i])
 105                        continue;
 106                if (unlock)
 107                        unlock_page(cc->rpages[i]);
 108                else
 109                        put_page(cc->rpages[i]);
 110        }
 111}
 112
 113static void f2fs_put_rpages(struct compress_ctx *cc)
 114{
 115        f2fs_drop_rpages(cc, cc->cluster_size, false);
 116}
 117
 118static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
 119{
 120        f2fs_drop_rpages(cc, len, true);
 121}
 122
 123static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
 124                struct writeback_control *wbc, bool redirty, int unlock)
 125{
 126        unsigned int i;
 127
 128        for (i = 0; i < cc->cluster_size; i++) {
 129                if (!cc->rpages[i])
 130                        continue;
 131                if (redirty)
 132                        redirty_page_for_writepage(wbc, cc->rpages[i]);
 133                f2fs_put_page(cc->rpages[i], unlock);
 134        }
 135}
 136
 137struct page *f2fs_compress_control_page(struct page *page)
 138{
 139        return ((struct compress_io_ctx *)page_private(page))->rpages[0];
 140}
 141
 142int f2fs_init_compress_ctx(struct compress_ctx *cc)
 143{
 144        if (cc->rpages)
 145                return 0;
 146
 147        cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
 148        return cc->rpages ? 0 : -ENOMEM;
 149}
 150
 151void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
 152{
 153        page_array_free(cc->inode, cc->rpages, cc->cluster_size);
 154        cc->rpages = NULL;
 155        cc->nr_rpages = 0;
 156        cc->nr_cpages = 0;
 157        if (!reuse)
 158                cc->cluster_idx = NULL_CLUSTER;
 159}
 160
 161void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
 162{
 163        unsigned int cluster_ofs;
 164
 165        if (!f2fs_cluster_can_merge_page(cc, page->index))
 166                f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
 167
 168        cluster_ofs = offset_in_cluster(cc, page->index);
 169        cc->rpages[cluster_ofs] = page;
 170        cc->nr_rpages++;
 171        cc->cluster_idx = cluster_idx(cc, page->index);
 172}
 173
 174#ifdef CONFIG_F2FS_FS_LZO
 175static int lzo_init_compress_ctx(struct compress_ctx *cc)
 176{
 177        cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
 178                                LZO1X_MEM_COMPRESS, GFP_NOFS);
 179        if (!cc->private)
 180                return -ENOMEM;
 181
 182        cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
 183        return 0;
 184}
 185
 186static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
 187{
 188        kvfree(cc->private);
 189        cc->private = NULL;
 190}
 191
 192static int lzo_compress_pages(struct compress_ctx *cc)
 193{
 194        int ret;
 195
 196        ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
 197                                        &cc->clen, cc->private);
 198        if (ret != LZO_E_OK) {
 199                printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
 200                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
 201                return -EIO;
 202        }
 203        return 0;
 204}
 205
 206static int lzo_decompress_pages(struct decompress_io_ctx *dic)
 207{
 208        int ret;
 209
 210        ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
 211                                                dic->rbuf, &dic->rlen);
 212        if (ret != LZO_E_OK) {
 213                printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
 214                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
 215                return -EIO;
 216        }
 217
 218        if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
 219                printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
 220                                        "expected:%lu\n", KERN_ERR,
 221                                        F2FS_I_SB(dic->inode)->sb->s_id,
 222                                        dic->rlen,
 223                                        PAGE_SIZE << dic->log_cluster_size);
 224                return -EIO;
 225        }
 226        return 0;
 227}
 228
 229static const struct f2fs_compress_ops f2fs_lzo_ops = {
 230        .init_compress_ctx      = lzo_init_compress_ctx,
 231        .destroy_compress_ctx   = lzo_destroy_compress_ctx,
 232        .compress_pages         = lzo_compress_pages,
 233        .decompress_pages       = lzo_decompress_pages,
 234};
 235#endif
 236
 237#ifdef CONFIG_F2FS_FS_LZ4
 238static int lz4_init_compress_ctx(struct compress_ctx *cc)
 239{
 240        unsigned int size = LZ4_MEM_COMPRESS;
 241
 242#ifdef CONFIG_F2FS_FS_LZ4HC
 243        if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
 244                size = LZ4HC_MEM_COMPRESS;
 245#endif
 246
 247        cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
 248        if (!cc->private)
 249                return -ENOMEM;
 250
 251        /*
 252         * we do not change cc->clen to LZ4_compressBound(inputsize) to
 253         * adapt worst compress case, because lz4 compressor can handle
 254         * output budget properly.
 255         */
 256        cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 257        return 0;
 258}
 259
 260static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
 261{
 262        kvfree(cc->private);
 263        cc->private = NULL;
 264}
 265
 266#ifdef CONFIG_F2FS_FS_LZ4HC
 267static int lz4hc_compress_pages(struct compress_ctx *cc)
 268{
 269        unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
 270                                                COMPRESS_LEVEL_OFFSET;
 271        int len;
 272
 273        if (level)
 274                len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 275                                        cc->clen, level, cc->private);
 276        else
 277                len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 278                                                cc->clen, cc->private);
 279        if (!len)
 280                return -EAGAIN;
 281
 282        cc->clen = len;
 283        return 0;
 284}
 285#endif
 286
 287static int lz4_compress_pages(struct compress_ctx *cc)
 288{
 289        int len;
 290
 291#ifdef CONFIG_F2FS_FS_LZ4HC
 292        return lz4hc_compress_pages(cc);
 293#endif
 294        len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 295                                                cc->clen, cc->private);
 296        if (!len)
 297                return -EAGAIN;
 298
 299        cc->clen = len;
 300        return 0;
 301}
 302
 303static int lz4_decompress_pages(struct decompress_io_ctx *dic)
 304{
 305        int ret;
 306
 307        ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
 308                                                dic->clen, dic->rlen);
 309        if (ret < 0) {
 310                printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
 311                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
 312                return -EIO;
 313        }
 314
 315        if (ret != PAGE_SIZE << dic->log_cluster_size) {
 316                printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
 317                                        "expected:%lu\n", KERN_ERR,
 318                                        F2FS_I_SB(dic->inode)->sb->s_id,
 319                                        dic->rlen,
 320                                        PAGE_SIZE << dic->log_cluster_size);
 321                return -EIO;
 322        }
 323        return 0;
 324}
 325
 326static const struct f2fs_compress_ops f2fs_lz4_ops = {
 327        .init_compress_ctx      = lz4_init_compress_ctx,
 328        .destroy_compress_ctx   = lz4_destroy_compress_ctx,
 329        .compress_pages         = lz4_compress_pages,
 330        .decompress_pages       = lz4_decompress_pages,
 331};
 332#endif
 333
 334#ifdef CONFIG_F2FS_FS_ZSTD
 335#define F2FS_ZSTD_DEFAULT_CLEVEL        1
 336
 337static int zstd_init_compress_ctx(struct compress_ctx *cc)
 338{
 339        zstd_parameters params;
 340        zstd_cstream *stream;
 341        void *workspace;
 342        unsigned int workspace_size;
 343        unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
 344                                                COMPRESS_LEVEL_OFFSET;
 345
 346        if (!level)
 347                level = F2FS_ZSTD_DEFAULT_CLEVEL;
 348
 349        params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
 350        workspace_size = zstd_cstream_workspace_bound(&params.cParams);
 351
 352        workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
 353                                        workspace_size, GFP_NOFS);
 354        if (!workspace)
 355                return -ENOMEM;
 356
 357        stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
 358        if (!stream) {
 359                printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
 360                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 361                                __func__);
 362                kvfree(workspace);
 363                return -EIO;
 364        }
 365
 366        cc->private = workspace;
 367        cc->private2 = stream;
 368
 369        cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 370        return 0;
 371}
 372
 373static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
 374{
 375        kvfree(cc->private);
 376        cc->private = NULL;
 377        cc->private2 = NULL;
 378}
 379
 380static int zstd_compress_pages(struct compress_ctx *cc)
 381{
 382        zstd_cstream *stream = cc->private2;
 383        zstd_in_buffer inbuf;
 384        zstd_out_buffer outbuf;
 385        int src_size = cc->rlen;
 386        int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 387        int ret;
 388
 389        inbuf.pos = 0;
 390        inbuf.src = cc->rbuf;
 391        inbuf.size = src_size;
 392
 393        outbuf.pos = 0;
 394        outbuf.dst = cc->cbuf->cdata;
 395        outbuf.size = dst_size;
 396
 397        ret = zstd_compress_stream(stream, &outbuf, &inbuf);
 398        if (zstd_is_error(ret)) {
 399                printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
 400                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 401                                __func__, zstd_get_error_code(ret));
 402                return -EIO;
 403        }
 404
 405        ret = zstd_end_stream(stream, &outbuf);
 406        if (zstd_is_error(ret)) {
 407                printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
 408                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 409                                __func__, zstd_get_error_code(ret));
 410                return -EIO;
 411        }
 412
 413        /*
 414         * there is compressed data remained in intermediate buffer due to
 415         * no more space in cbuf.cdata
 416         */
 417        if (ret)
 418                return -EAGAIN;
 419
 420        cc->clen = outbuf.pos;
 421        return 0;
 422}
 423
 424static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
 425{
 426        zstd_dstream *stream;
 427        void *workspace;
 428        unsigned int workspace_size;
 429        unsigned int max_window_size =
 430                        MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
 431
 432        workspace_size = zstd_dstream_workspace_bound(max_window_size);
 433
 434        workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
 435                                        workspace_size, GFP_NOFS);
 436        if (!workspace)
 437                return -ENOMEM;
 438
 439        stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
 440        if (!stream) {
 441                printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
 442                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
 443                                __func__);
 444                kvfree(workspace);
 445                return -EIO;
 446        }
 447
 448        dic->private = workspace;
 449        dic->private2 = stream;
 450
 451        return 0;
 452}
 453
 454static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
 455{
 456        kvfree(dic->private);
 457        dic->private = NULL;
 458        dic->private2 = NULL;
 459}
 460
 461static int zstd_decompress_pages(struct decompress_io_ctx *dic)
 462{
 463        zstd_dstream *stream = dic->private2;
 464        zstd_in_buffer inbuf;
 465        zstd_out_buffer outbuf;
 466        int ret;
 467
 468        inbuf.pos = 0;
 469        inbuf.src = dic->cbuf->cdata;
 470        inbuf.size = dic->clen;
 471
 472        outbuf.pos = 0;
 473        outbuf.dst = dic->rbuf;
 474        outbuf.size = dic->rlen;
 475
 476        ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
 477        if (zstd_is_error(ret)) {
 478                printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
 479                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
 480                                __func__, zstd_get_error_code(ret));
 481                return -EIO;
 482        }
 483
 484        if (dic->rlen != outbuf.pos) {
 485                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
 486                                "expected:%lu\n", KERN_ERR,
 487                                F2FS_I_SB(dic->inode)->sb->s_id,
 488                                __func__, dic->rlen,
 489                                PAGE_SIZE << dic->log_cluster_size);
 490                return -EIO;
 491        }
 492
 493        return 0;
 494}
 495
 496static const struct f2fs_compress_ops f2fs_zstd_ops = {
 497        .init_compress_ctx      = zstd_init_compress_ctx,
 498        .destroy_compress_ctx   = zstd_destroy_compress_ctx,
 499        .compress_pages         = zstd_compress_pages,
 500        .init_decompress_ctx    = zstd_init_decompress_ctx,
 501        .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
 502        .decompress_pages       = zstd_decompress_pages,
 503};
 504#endif
 505
 506#ifdef CONFIG_F2FS_FS_LZO
 507#ifdef CONFIG_F2FS_FS_LZORLE
 508static int lzorle_compress_pages(struct compress_ctx *cc)
 509{
 510        int ret;
 511
 512        ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
 513                                        &cc->clen, cc->private);
 514        if (ret != LZO_E_OK) {
 515                printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
 516                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
 517                return -EIO;
 518        }
 519        return 0;
 520}
 521
 522static const struct f2fs_compress_ops f2fs_lzorle_ops = {
 523        .init_compress_ctx      = lzo_init_compress_ctx,
 524        .destroy_compress_ctx   = lzo_destroy_compress_ctx,
 525        .compress_pages         = lzorle_compress_pages,
 526        .decompress_pages       = lzo_decompress_pages,
 527};
 528#endif
 529#endif
 530
 531static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
 532#ifdef CONFIG_F2FS_FS_LZO
 533        &f2fs_lzo_ops,
 534#else
 535        NULL,
 536#endif
 537#ifdef CONFIG_F2FS_FS_LZ4
 538        &f2fs_lz4_ops,
 539#else
 540        NULL,
 541#endif
 542#ifdef CONFIG_F2FS_FS_ZSTD
 543        &f2fs_zstd_ops,
 544#else
 545        NULL,
 546#endif
 547#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
 548        &f2fs_lzorle_ops,
 549#else
 550        NULL,
 551#endif
 552};
 553
 554bool f2fs_is_compress_backend_ready(struct inode *inode)
 555{
 556        if (!f2fs_compressed_file(inode))
 557                return true;
 558        return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
 559}
 560
 561static mempool_t *compress_page_pool;
 562static int num_compress_pages = 512;
 563module_param(num_compress_pages, uint, 0444);
 564MODULE_PARM_DESC(num_compress_pages,
 565                "Number of intermediate compress pages to preallocate");
 566
 567int f2fs_init_compress_mempool(void)
 568{
 569        compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
 570        if (!compress_page_pool)
 571                return -ENOMEM;
 572
 573        return 0;
 574}
 575
 576void f2fs_destroy_compress_mempool(void)
 577{
 578        mempool_destroy(compress_page_pool);
 579}
 580
 581static struct page *f2fs_compress_alloc_page(void)
 582{
 583        struct page *page;
 584
 585        page = mempool_alloc(compress_page_pool, GFP_NOFS);
 586        lock_page(page);
 587
 588        return page;
 589}
 590
 591static void f2fs_compress_free_page(struct page *page)
 592{
 593        if (!page)
 594                return;
 595        detach_page_private(page);
 596        page->mapping = NULL;
 597        unlock_page(page);
 598        mempool_free(page, compress_page_pool);
 599}
 600
 601#define MAX_VMAP_RETRIES        3
 602
 603static void *f2fs_vmap(struct page **pages, unsigned int count)
 604{
 605        int i;
 606        void *buf = NULL;
 607
 608        for (i = 0; i < MAX_VMAP_RETRIES; i++) {
 609                buf = vm_map_ram(pages, count, -1);
 610                if (buf)
 611                        break;
 612                vm_unmap_aliases();
 613        }
 614        return buf;
 615}
 616
 617static int f2fs_compress_pages(struct compress_ctx *cc)
 618{
 619        struct f2fs_inode_info *fi = F2FS_I(cc->inode);
 620        const struct f2fs_compress_ops *cops =
 621                                f2fs_cops[fi->i_compress_algorithm];
 622        unsigned int max_len, new_nr_cpages;
 623        struct page **new_cpages;
 624        u32 chksum = 0;
 625        int i, ret;
 626
 627        trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
 628                                cc->cluster_size, fi->i_compress_algorithm);
 629
 630        if (cops->init_compress_ctx) {
 631                ret = cops->init_compress_ctx(cc);
 632                if (ret)
 633                        goto out;
 634        }
 635
 636        max_len = COMPRESS_HEADER_SIZE + cc->clen;
 637        cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
 638
 639        cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
 640        if (!cc->cpages) {
 641                ret = -ENOMEM;
 642                goto destroy_compress_ctx;
 643        }
 644
 645        for (i = 0; i < cc->nr_cpages; i++) {
 646                cc->cpages[i] = f2fs_compress_alloc_page();
 647                if (!cc->cpages[i]) {
 648                        ret = -ENOMEM;
 649                        goto out_free_cpages;
 650                }
 651        }
 652
 653        cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
 654        if (!cc->rbuf) {
 655                ret = -ENOMEM;
 656                goto out_free_cpages;
 657        }
 658
 659        cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
 660        if (!cc->cbuf) {
 661                ret = -ENOMEM;
 662                goto out_vunmap_rbuf;
 663        }
 664
 665        ret = cops->compress_pages(cc);
 666        if (ret)
 667                goto out_vunmap_cbuf;
 668
 669        max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
 670
 671        if (cc->clen > max_len) {
 672                ret = -EAGAIN;
 673                goto out_vunmap_cbuf;
 674        }
 675
 676        cc->cbuf->clen = cpu_to_le32(cc->clen);
 677
 678        if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
 679                chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
 680                                        cc->cbuf->cdata, cc->clen);
 681        cc->cbuf->chksum = cpu_to_le32(chksum);
 682
 683        for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
 684                cc->cbuf->reserved[i] = cpu_to_le32(0);
 685
 686        new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
 687
 688        /* Now we're going to cut unnecessary tail pages */
 689        new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
 690        if (!new_cpages) {
 691                ret = -ENOMEM;
 692                goto out_vunmap_cbuf;
 693        }
 694
 695        /* zero out any unused part of the last page */
 696        memset(&cc->cbuf->cdata[cc->clen], 0,
 697                        (new_nr_cpages * PAGE_SIZE) -
 698                        (cc->clen + COMPRESS_HEADER_SIZE));
 699
 700        vm_unmap_ram(cc->cbuf, cc->nr_cpages);
 701        vm_unmap_ram(cc->rbuf, cc->cluster_size);
 702
 703        for (i = 0; i < cc->nr_cpages; i++) {
 704                if (i < new_nr_cpages) {
 705                        new_cpages[i] = cc->cpages[i];
 706                        continue;
 707                }
 708                f2fs_compress_free_page(cc->cpages[i]);
 709                cc->cpages[i] = NULL;
 710        }
 711
 712        if (cops->destroy_compress_ctx)
 713                cops->destroy_compress_ctx(cc);
 714
 715        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
 716        cc->cpages = new_cpages;
 717        cc->nr_cpages = new_nr_cpages;
 718
 719        trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
 720                                                        cc->clen, ret);
 721        return 0;
 722
 723out_vunmap_cbuf:
 724        vm_unmap_ram(cc->cbuf, cc->nr_cpages);
 725out_vunmap_rbuf:
 726        vm_unmap_ram(cc->rbuf, cc->cluster_size);
 727out_free_cpages:
 728        for (i = 0; i < cc->nr_cpages; i++) {
 729                if (cc->cpages[i])
 730                        f2fs_compress_free_page(cc->cpages[i]);
 731        }
 732        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
 733        cc->cpages = NULL;
 734destroy_compress_ctx:
 735        if (cops->destroy_compress_ctx)
 736                cops->destroy_compress_ctx(cc);
 737out:
 738        trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
 739                                                        cc->clen, ret);
 740        return ret;
 741}
 742
 743void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
 744{
 745        struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
 746        struct f2fs_inode_info *fi = F2FS_I(dic->inode);
 747        const struct f2fs_compress_ops *cops =
 748                        f2fs_cops[fi->i_compress_algorithm];
 749        int ret;
 750        int i;
 751
 752        trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
 753                                dic->cluster_size, fi->i_compress_algorithm);
 754
 755        if (dic->failed) {
 756                ret = -EIO;
 757                goto out_end_io;
 758        }
 759
 760        dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
 761        if (!dic->tpages) {
 762                ret = -ENOMEM;
 763                goto out_end_io;
 764        }
 765
 766        for (i = 0; i < dic->cluster_size; i++) {
 767                if (dic->rpages[i]) {
 768                        dic->tpages[i] = dic->rpages[i];
 769                        continue;
 770                }
 771
 772                dic->tpages[i] = f2fs_compress_alloc_page();
 773                if (!dic->tpages[i]) {
 774                        ret = -ENOMEM;
 775                        goto out_end_io;
 776                }
 777        }
 778
 779        if (cops->init_decompress_ctx) {
 780                ret = cops->init_decompress_ctx(dic);
 781                if (ret)
 782                        goto out_end_io;
 783        }
 784
 785        dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
 786        if (!dic->rbuf) {
 787                ret = -ENOMEM;
 788                goto out_destroy_decompress_ctx;
 789        }
 790
 791        dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
 792        if (!dic->cbuf) {
 793                ret = -ENOMEM;
 794                goto out_vunmap_rbuf;
 795        }
 796
 797        dic->clen = le32_to_cpu(dic->cbuf->clen);
 798        dic->rlen = PAGE_SIZE << dic->log_cluster_size;
 799
 800        if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
 801                ret = -EFSCORRUPTED;
 802                goto out_vunmap_cbuf;
 803        }
 804
 805        ret = cops->decompress_pages(dic);
 806
 807        if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
 808                u32 provided = le32_to_cpu(dic->cbuf->chksum);
 809                u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
 810
 811                if (provided != calculated) {
 812                        if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
 813                                set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
 814                                printk_ratelimited(
 815                                        "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
 816                                        KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
 817                                        provided, calculated);
 818                        }
 819                        set_sbi_flag(sbi, SBI_NEED_FSCK);
 820                }
 821        }
 822
 823out_vunmap_cbuf:
 824        vm_unmap_ram(dic->cbuf, dic->nr_cpages);
 825out_vunmap_rbuf:
 826        vm_unmap_ram(dic->rbuf, dic->cluster_size);
 827out_destroy_decompress_ctx:
 828        if (cops->destroy_decompress_ctx)
 829                cops->destroy_decompress_ctx(dic);
 830out_end_io:
 831        trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
 832                                                        dic->clen, ret);
 833        f2fs_decompress_end_io(dic, ret);
 834}
 835
 836/*
 837 * This is called when a page of a compressed cluster has been read from disk
 838 * (or failed to be read from disk).  It checks whether this page was the last
 839 * page being waited on in the cluster, and if so, it decompresses the cluster
 840 * (or in the case of a failure, cleans up without actually decompressing).
 841 */
 842void f2fs_end_read_compressed_page(struct page *page, bool failed,
 843                                                block_t blkaddr)
 844{
 845        struct decompress_io_ctx *dic =
 846                        (struct decompress_io_ctx *)page_private(page);
 847        struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
 848
 849        dec_page_count(sbi, F2FS_RD_DATA);
 850
 851        if (failed)
 852                WRITE_ONCE(dic->failed, true);
 853        else if (blkaddr)
 854                f2fs_cache_compressed_page(sbi, page,
 855                                        dic->inode->i_ino, blkaddr);
 856
 857        if (atomic_dec_and_test(&dic->remaining_pages))
 858                f2fs_decompress_cluster(dic);
 859}
 860
 861static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
 862{
 863        if (cc->cluster_idx == NULL_CLUSTER)
 864                return true;
 865        return cc->cluster_idx == cluster_idx(cc, index);
 866}
 867
 868bool f2fs_cluster_is_empty(struct compress_ctx *cc)
 869{
 870        return cc->nr_rpages == 0;
 871}
 872
 873static bool f2fs_cluster_is_full(struct compress_ctx *cc)
 874{
 875        return cc->cluster_size == cc->nr_rpages;
 876}
 877
 878bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
 879{
 880        if (f2fs_cluster_is_empty(cc))
 881                return true;
 882        return is_page_in_cluster(cc, index);
 883}
 884
 885bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
 886                                int index, int nr_pages)
 887{
 888        unsigned long pgidx;
 889        int i;
 890
 891        if (nr_pages - index < cc->cluster_size)
 892                return false;
 893
 894        pgidx = pvec->pages[index]->index;
 895
 896        for (i = 1; i < cc->cluster_size; i++) {
 897                if (pvec->pages[index + i]->index != pgidx + i)
 898                        return false;
 899        }
 900
 901        return true;
 902}
 903
 904static bool cluster_has_invalid_data(struct compress_ctx *cc)
 905{
 906        loff_t i_size = i_size_read(cc->inode);
 907        unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
 908        int i;
 909
 910        for (i = 0; i < cc->cluster_size; i++) {
 911                struct page *page = cc->rpages[i];
 912
 913                f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
 914
 915                /* beyond EOF */
 916                if (page->index >= nr_pages)
 917                        return true;
 918        }
 919        return false;
 920}
 921
 922bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
 923{
 924        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 925        unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
 926        bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
 927        int cluster_end = 0;
 928        int i;
 929        char *reason = "";
 930
 931        if (!compressed)
 932                return false;
 933
 934        /* [..., COMPR_ADDR, ...] */
 935        if (dn->ofs_in_node % cluster_size) {
 936                reason = "[*|C|*|*]";
 937                goto out;
 938        }
 939
 940        for (i = 1; i < cluster_size; i++) {
 941                block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
 942                                                        dn->ofs_in_node + i);
 943
 944                /* [COMPR_ADDR, ..., COMPR_ADDR] */
 945                if (blkaddr == COMPRESS_ADDR) {
 946                        reason = "[C|*|C|*]";
 947                        goto out;
 948                }
 949                if (compressed) {
 950                        if (!__is_valid_data_blkaddr(blkaddr)) {
 951                                if (!cluster_end)
 952                                        cluster_end = i;
 953                                continue;
 954                        }
 955                        /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
 956                        if (cluster_end) {
 957                                reason = "[C|N|N|V]";
 958                                goto out;
 959                        }
 960                }
 961        }
 962        return false;
 963out:
 964        f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
 965                        dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
 966        set_sbi_flag(sbi, SBI_NEED_FSCK);
 967        return true;
 968}
 969
 970static int __f2fs_cluster_blocks(struct inode *inode,
 971                                unsigned int cluster_idx, bool compr)
 972{
 973        struct dnode_of_data dn;
 974        unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
 975        unsigned int start_idx = cluster_idx <<
 976                                F2FS_I(inode)->i_log_cluster_size;
 977        int ret;
 978
 979        set_new_dnode(&dn, inode, NULL, NULL, 0);
 980        ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
 981        if (ret) {
 982                if (ret == -ENOENT)
 983                        ret = 0;
 984                goto fail;
 985        }
 986
 987        if (f2fs_sanity_check_cluster(&dn)) {
 988                ret = -EFSCORRUPTED;
 989                goto fail;
 990        }
 991
 992        if (dn.data_blkaddr == COMPRESS_ADDR) {
 993                int i;
 994
 995                ret = 1;
 996                for (i = 1; i < cluster_size; i++) {
 997                        block_t blkaddr;
 998
 999                        blkaddr = data_blkaddr(dn.inode,
1000                                        dn.node_page, dn.ofs_in_node + i);
1001                        if (compr) {
1002                                if (__is_valid_data_blkaddr(blkaddr))
1003                                        ret++;
1004                        } else {
1005                                if (blkaddr != NULL_ADDR)
1006                                        ret++;
1007                        }
1008                }
1009
1010                f2fs_bug_on(F2FS_I_SB(inode),
1011                        !compr && ret != cluster_size &&
1012                        !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
1013        }
1014fail:
1015        f2fs_put_dnode(&dn);
1016        return ret;
1017}
1018
1019/* return # of compressed blocks in compressed cluster */
1020static int f2fs_compressed_blocks(struct compress_ctx *cc)
1021{
1022        return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
1023}
1024
1025/* return # of valid blocks in compressed cluster */
1026int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1027{
1028        return __f2fs_cluster_blocks(inode,
1029                index >> F2FS_I(inode)->i_log_cluster_size,
1030                false);
1031}
1032
1033static bool cluster_may_compress(struct compress_ctx *cc)
1034{
1035        if (!f2fs_need_compress_data(cc->inode))
1036                return false;
1037        if (f2fs_is_atomic_file(cc->inode))
1038                return false;
1039        if (!f2fs_cluster_is_full(cc))
1040                return false;
1041        if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1042                return false;
1043        return !cluster_has_invalid_data(cc);
1044}
1045
1046static void set_cluster_writeback(struct compress_ctx *cc)
1047{
1048        int i;
1049
1050        for (i = 0; i < cc->cluster_size; i++) {
1051                if (cc->rpages[i])
1052                        set_page_writeback(cc->rpages[i]);
1053        }
1054}
1055
1056static void set_cluster_dirty(struct compress_ctx *cc)
1057{
1058        int i;
1059
1060        for (i = 0; i < cc->cluster_size; i++)
1061                if (cc->rpages[i])
1062                        set_page_dirty(cc->rpages[i]);
1063}
1064
1065static int prepare_compress_overwrite(struct compress_ctx *cc,
1066                struct page **pagep, pgoff_t index, void **fsdata)
1067{
1068        struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1069        struct address_space *mapping = cc->inode->i_mapping;
1070        struct page *page;
1071        sector_t last_block_in_bio;
1072        unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1073        pgoff_t start_idx = start_idx_of_cluster(cc);
1074        int i, ret;
1075
1076retry:
1077        ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1078        if (ret <= 0)
1079                return ret;
1080
1081        ret = f2fs_init_compress_ctx(cc);
1082        if (ret)
1083                return ret;
1084
1085        /* keep page reference to avoid page reclaim */
1086        for (i = 0; i < cc->cluster_size; i++) {
1087                page = f2fs_pagecache_get_page(mapping, start_idx + i,
1088                                                        fgp_flag, GFP_NOFS);
1089                if (!page) {
1090                        ret = -ENOMEM;
1091                        goto unlock_pages;
1092                }
1093
1094                if (PageUptodate(page))
1095                        f2fs_put_page(page, 1);
1096                else
1097                        f2fs_compress_ctx_add_page(cc, page);
1098        }
1099
1100        if (!f2fs_cluster_is_empty(cc)) {
1101                struct bio *bio = NULL;
1102
1103                ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1104                                        &last_block_in_bio, false, true);
1105                f2fs_put_rpages(cc);
1106                f2fs_destroy_compress_ctx(cc, true);
1107                if (ret)
1108                        goto out;
1109                if (bio)
1110                        f2fs_submit_bio(sbi, bio, DATA);
1111
1112                ret = f2fs_init_compress_ctx(cc);
1113                if (ret)
1114                        goto out;
1115        }
1116
1117        for (i = 0; i < cc->cluster_size; i++) {
1118                f2fs_bug_on(sbi, cc->rpages[i]);
1119
1120                page = find_lock_page(mapping, start_idx + i);
1121                if (!page) {
1122                        /* page can be truncated */
1123                        goto release_and_retry;
1124                }
1125
1126                f2fs_wait_on_page_writeback(page, DATA, true, true);
1127                f2fs_compress_ctx_add_page(cc, page);
1128
1129                if (!PageUptodate(page)) {
1130release_and_retry:
1131                        f2fs_put_rpages(cc);
1132                        f2fs_unlock_rpages(cc, i + 1);
1133                        f2fs_destroy_compress_ctx(cc, true);
1134                        goto retry;
1135                }
1136        }
1137
1138        if (likely(!ret)) {
1139                *fsdata = cc->rpages;
1140                *pagep = cc->rpages[offset_in_cluster(cc, index)];
1141                return cc->cluster_size;
1142        }
1143
1144unlock_pages:
1145        f2fs_put_rpages(cc);
1146        f2fs_unlock_rpages(cc, i);
1147        f2fs_destroy_compress_ctx(cc, true);
1148out:
1149        return ret;
1150}
1151
1152int f2fs_prepare_compress_overwrite(struct inode *inode,
1153                struct page **pagep, pgoff_t index, void **fsdata)
1154{
1155        struct compress_ctx cc = {
1156                .inode = inode,
1157                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1158                .cluster_size = F2FS_I(inode)->i_cluster_size,
1159                .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1160                .rpages = NULL,
1161                .nr_rpages = 0,
1162        };
1163
1164        return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1165}
1166
1167bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1168                                        pgoff_t index, unsigned copied)
1169
1170{
1171        struct compress_ctx cc = {
1172                .inode = inode,
1173                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1174                .cluster_size = F2FS_I(inode)->i_cluster_size,
1175                .rpages = fsdata,
1176        };
1177        bool first_index = (index == cc.rpages[0]->index);
1178
1179        if (copied)
1180                set_cluster_dirty(&cc);
1181
1182        f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1183        f2fs_destroy_compress_ctx(&cc, false);
1184
1185        return first_index;
1186}
1187
1188int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1189{
1190        void *fsdata = NULL;
1191        struct page *pagep;
1192        int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1193        pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1194                                                        log_cluster_size;
1195        int err;
1196
1197        err = f2fs_is_compressed_cluster(inode, start_idx);
1198        if (err < 0)
1199                return err;
1200
1201        /* truncate normal cluster */
1202        if (!err)
1203                return f2fs_do_truncate_blocks(inode, from, lock);
1204
1205        /* truncate compressed cluster */
1206        err = f2fs_prepare_compress_overwrite(inode, &pagep,
1207                                                start_idx, &fsdata);
1208
1209        /* should not be a normal cluster */
1210        f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1211
1212        if (err <= 0)
1213                return err;
1214
1215        if (err > 0) {
1216                struct page **rpages = fsdata;
1217                int cluster_size = F2FS_I(inode)->i_cluster_size;
1218                int i;
1219
1220                for (i = cluster_size - 1; i >= 0; i--) {
1221                        loff_t start = rpages[i]->index << PAGE_SHIFT;
1222
1223                        if (from <= start) {
1224                                zero_user_segment(rpages[i], 0, PAGE_SIZE);
1225                        } else {
1226                                zero_user_segment(rpages[i], from - start,
1227                                                                PAGE_SIZE);
1228                                break;
1229                        }
1230                }
1231
1232                f2fs_compress_write_end(inode, fsdata, start_idx, true);
1233        }
1234        return 0;
1235}
1236
1237static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1238                                        int *submitted,
1239                                        struct writeback_control *wbc,
1240                                        enum iostat_type io_type)
1241{
1242        struct inode *inode = cc->inode;
1243        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1244        struct f2fs_inode_info *fi = F2FS_I(inode);
1245        struct f2fs_io_info fio = {
1246                .sbi = sbi,
1247                .ino = cc->inode->i_ino,
1248                .type = DATA,
1249                .op = REQ_OP_WRITE,
1250                .op_flags = wbc_to_write_flags(wbc),
1251                .old_blkaddr = NEW_ADDR,
1252                .page = NULL,
1253                .encrypted_page = NULL,
1254                .compressed_page = NULL,
1255                .submitted = false,
1256                .io_type = io_type,
1257                .io_wbc = wbc,
1258                .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1259        };
1260        struct dnode_of_data dn;
1261        struct node_info ni;
1262        struct compress_io_ctx *cic;
1263        pgoff_t start_idx = start_idx_of_cluster(cc);
1264        unsigned int last_index = cc->cluster_size - 1;
1265        loff_t psize;
1266        int i, err;
1267
1268        /* we should bypass data pages to proceed the kworkder jobs */
1269        if (unlikely(f2fs_cp_error(sbi))) {
1270                mapping_set_error(cc->rpages[0]->mapping, -EIO);
1271                goto out_free;
1272        }
1273
1274        if (IS_NOQUOTA(inode)) {
1275                /*
1276                 * We need to wait for node_write to avoid block allocation during
1277                 * checkpoint. This can only happen to quota writes which can cause
1278                 * the below discard race condition.
1279                 */
1280                down_read(&sbi->node_write);
1281        } else if (!f2fs_trylock_op(sbi)) {
1282                goto out_free;
1283        }
1284
1285        set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1286
1287        err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1288        if (err)
1289                goto out_unlock_op;
1290
1291        for (i = 0; i < cc->cluster_size; i++) {
1292                if (data_blkaddr(dn.inode, dn.node_page,
1293                                        dn.ofs_in_node + i) == NULL_ADDR)
1294                        goto out_put_dnode;
1295        }
1296
1297        psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1298
1299        err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1300        if (err)
1301                goto out_put_dnode;
1302
1303        fio.version = ni.version;
1304
1305        cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1306        if (!cic)
1307                goto out_put_dnode;
1308
1309        cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1310        cic->inode = inode;
1311        atomic_set(&cic->pending_pages, cc->nr_cpages);
1312        cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1313        if (!cic->rpages)
1314                goto out_put_cic;
1315
1316        cic->nr_rpages = cc->cluster_size;
1317
1318        for (i = 0; i < cc->nr_cpages; i++) {
1319                f2fs_set_compressed_page(cc->cpages[i], inode,
1320                                        cc->rpages[i + 1]->index, cic);
1321                fio.compressed_page = cc->cpages[i];
1322
1323                fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1324                                                dn.ofs_in_node + i + 1);
1325
1326                /* wait for GCed page writeback via META_MAPPING */
1327                f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1328
1329                if (fio.encrypted) {
1330                        fio.page = cc->rpages[i + 1];
1331                        err = f2fs_encrypt_one_page(&fio);
1332                        if (err)
1333                                goto out_destroy_crypt;
1334                        cc->cpages[i] = fio.encrypted_page;
1335                }
1336        }
1337
1338        set_cluster_writeback(cc);
1339
1340        for (i = 0; i < cc->cluster_size; i++)
1341                cic->rpages[i] = cc->rpages[i];
1342
1343        for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1344                block_t blkaddr;
1345
1346                blkaddr = f2fs_data_blkaddr(&dn);
1347                fio.page = cc->rpages[i];
1348                fio.old_blkaddr = blkaddr;
1349
1350                /* cluster header */
1351                if (i == 0) {
1352                        if (blkaddr == COMPRESS_ADDR)
1353                                fio.compr_blocks++;
1354                        if (__is_valid_data_blkaddr(blkaddr))
1355                                f2fs_invalidate_blocks(sbi, blkaddr);
1356                        f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1357                        goto unlock_continue;
1358                }
1359
1360                if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1361                        fio.compr_blocks++;
1362
1363                if (i > cc->nr_cpages) {
1364                        if (__is_valid_data_blkaddr(blkaddr)) {
1365                                f2fs_invalidate_blocks(sbi, blkaddr);
1366                                f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1367                        }
1368                        goto unlock_continue;
1369                }
1370
1371                f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1372
1373                if (fio.encrypted)
1374                        fio.encrypted_page = cc->cpages[i - 1];
1375                else
1376                        fio.compressed_page = cc->cpages[i - 1];
1377
1378                cc->cpages[i - 1] = NULL;
1379                f2fs_outplace_write_data(&dn, &fio);
1380                (*submitted)++;
1381unlock_continue:
1382                inode_dec_dirty_pages(cc->inode);
1383                unlock_page(fio.page);
1384        }
1385
1386        if (fio.compr_blocks)
1387                f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1388        f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1389        add_compr_block_stat(inode, cc->nr_cpages);
1390
1391        set_inode_flag(cc->inode, FI_APPEND_WRITE);
1392        if (cc->cluster_idx == 0)
1393                set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1394
1395        f2fs_put_dnode(&dn);
1396        if (IS_NOQUOTA(inode))
1397                up_read(&sbi->node_write);
1398        else
1399                f2fs_unlock_op(sbi);
1400
1401        spin_lock(&fi->i_size_lock);
1402        if (fi->last_disk_size < psize)
1403                fi->last_disk_size = psize;
1404        spin_unlock(&fi->i_size_lock);
1405
1406        f2fs_put_rpages(cc);
1407        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1408        cc->cpages = NULL;
1409        f2fs_destroy_compress_ctx(cc, false);
1410        return 0;
1411
1412out_destroy_crypt:
1413        page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1414
1415        for (--i; i >= 0; i--)
1416                fscrypt_finalize_bounce_page(&cc->cpages[i]);
1417out_put_cic:
1418        kmem_cache_free(cic_entry_slab, cic);
1419out_put_dnode:
1420        f2fs_put_dnode(&dn);
1421out_unlock_op:
1422        if (IS_NOQUOTA(inode))
1423                up_read(&sbi->node_write);
1424        else
1425                f2fs_unlock_op(sbi);
1426out_free:
1427        for (i = 0; i < cc->nr_cpages; i++) {
1428                if (!cc->cpages[i])
1429                        continue;
1430                f2fs_compress_free_page(cc->cpages[i]);
1431                cc->cpages[i] = NULL;
1432        }
1433        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1434        cc->cpages = NULL;
1435        return -EAGAIN;
1436}
1437
1438void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1439{
1440        struct f2fs_sb_info *sbi = bio->bi_private;
1441        struct compress_io_ctx *cic =
1442                        (struct compress_io_ctx *)page_private(page);
1443        int i;
1444
1445        if (unlikely(bio->bi_status))
1446                mapping_set_error(cic->inode->i_mapping, -EIO);
1447
1448        f2fs_compress_free_page(page);
1449
1450        dec_page_count(sbi, F2FS_WB_DATA);
1451
1452        if (atomic_dec_return(&cic->pending_pages))
1453                return;
1454
1455        for (i = 0; i < cic->nr_rpages; i++) {
1456                WARN_ON(!cic->rpages[i]);
1457                clear_page_private_gcing(cic->rpages[i]);
1458                end_page_writeback(cic->rpages[i]);
1459        }
1460
1461        page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1462        kmem_cache_free(cic_entry_slab, cic);
1463}
1464
1465static int f2fs_write_raw_pages(struct compress_ctx *cc,
1466                                        int *submitted,
1467                                        struct writeback_control *wbc,
1468                                        enum iostat_type io_type)
1469{
1470        struct address_space *mapping = cc->inode->i_mapping;
1471        int _submitted, compr_blocks, ret;
1472        int i = -1, err = 0;
1473
1474        compr_blocks = f2fs_compressed_blocks(cc);
1475        if (compr_blocks < 0) {
1476                err = compr_blocks;
1477                goto out_err;
1478        }
1479
1480        for (i = 0; i < cc->cluster_size; i++) {
1481                if (!cc->rpages[i])
1482                        continue;
1483retry_write:
1484                if (cc->rpages[i]->mapping != mapping) {
1485                        unlock_page(cc->rpages[i]);
1486                        continue;
1487                }
1488
1489                BUG_ON(!PageLocked(cc->rpages[i]));
1490
1491                ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1492                                                NULL, NULL, wbc, io_type,
1493                                                compr_blocks, false);
1494                if (ret) {
1495                        if (ret == AOP_WRITEPAGE_ACTIVATE) {
1496                                unlock_page(cc->rpages[i]);
1497                                ret = 0;
1498                        } else if (ret == -EAGAIN) {
1499                                /*
1500                                 * for quota file, just redirty left pages to
1501                                 * avoid deadlock caused by cluster update race
1502                                 * from foreground operation.
1503                                 */
1504                                if (IS_NOQUOTA(cc->inode)) {
1505                                        err = 0;
1506                                        goto out_err;
1507                                }
1508                                ret = 0;
1509                                cond_resched();
1510                                congestion_wait(BLK_RW_ASYNC,
1511                                                DEFAULT_IO_TIMEOUT);
1512                                lock_page(cc->rpages[i]);
1513
1514                                if (!PageDirty(cc->rpages[i])) {
1515                                        unlock_page(cc->rpages[i]);
1516                                        continue;
1517                                }
1518
1519                                clear_page_dirty_for_io(cc->rpages[i]);
1520                                goto retry_write;
1521                        }
1522                        err = ret;
1523                        goto out_err;
1524                }
1525
1526                *submitted += _submitted;
1527        }
1528
1529        f2fs_balance_fs(F2FS_M_SB(mapping), true);
1530
1531        return 0;
1532out_err:
1533        for (++i; i < cc->cluster_size; i++) {
1534                if (!cc->rpages[i])
1535                        continue;
1536                redirty_page_for_writepage(wbc, cc->rpages[i]);
1537                unlock_page(cc->rpages[i]);
1538        }
1539        return err;
1540}
1541
1542int f2fs_write_multi_pages(struct compress_ctx *cc,
1543                                        int *submitted,
1544                                        struct writeback_control *wbc,
1545                                        enum iostat_type io_type)
1546{
1547        int err;
1548
1549        *submitted = 0;
1550        if (cluster_may_compress(cc)) {
1551                err = f2fs_compress_pages(cc);
1552                if (err == -EAGAIN) {
1553                        add_compr_block_stat(cc->inode, cc->cluster_size);
1554                        goto write;
1555                } else if (err) {
1556                        f2fs_put_rpages_wbc(cc, wbc, true, 1);
1557                        goto destroy_out;
1558                }
1559
1560                err = f2fs_write_compressed_pages(cc, submitted,
1561                                                        wbc, io_type);
1562                if (!err)
1563                        return 0;
1564                f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1565        }
1566write:
1567        f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1568
1569        err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1570        f2fs_put_rpages_wbc(cc, wbc, false, 0);
1571destroy_out:
1572        f2fs_destroy_compress_ctx(cc, false);
1573        return err;
1574}
1575
1576static void f2fs_free_dic(struct decompress_io_ctx *dic);
1577
1578struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1579{
1580        struct decompress_io_ctx *dic;
1581        pgoff_t start_idx = start_idx_of_cluster(cc);
1582        int i;
1583
1584        dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
1585                                        false, F2FS_I_SB(cc->inode));
1586        if (!dic)
1587                return ERR_PTR(-ENOMEM);
1588
1589        dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1590        if (!dic->rpages) {
1591                kmem_cache_free(dic_entry_slab, dic);
1592                return ERR_PTR(-ENOMEM);
1593        }
1594
1595        dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1596        dic->inode = cc->inode;
1597        atomic_set(&dic->remaining_pages, cc->nr_cpages);
1598        dic->cluster_idx = cc->cluster_idx;
1599        dic->cluster_size = cc->cluster_size;
1600        dic->log_cluster_size = cc->log_cluster_size;
1601        dic->nr_cpages = cc->nr_cpages;
1602        refcount_set(&dic->refcnt, 1);
1603        dic->failed = false;
1604        dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1605
1606        for (i = 0; i < dic->cluster_size; i++)
1607                dic->rpages[i] = cc->rpages[i];
1608        dic->nr_rpages = cc->cluster_size;
1609
1610        dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1611        if (!dic->cpages)
1612                goto out_free;
1613
1614        for (i = 0; i < dic->nr_cpages; i++) {
1615                struct page *page;
1616
1617                page = f2fs_compress_alloc_page();
1618                if (!page)
1619                        goto out_free;
1620
1621                f2fs_set_compressed_page(page, cc->inode,
1622                                        start_idx + i + 1, dic);
1623                dic->cpages[i] = page;
1624        }
1625
1626        return dic;
1627
1628out_free:
1629        f2fs_free_dic(dic);
1630        return ERR_PTR(-ENOMEM);
1631}
1632
1633static void f2fs_free_dic(struct decompress_io_ctx *dic)
1634{
1635        int i;
1636
1637        if (dic->tpages) {
1638                for (i = 0; i < dic->cluster_size; i++) {
1639                        if (dic->rpages[i])
1640                                continue;
1641                        if (!dic->tpages[i])
1642                                continue;
1643                        f2fs_compress_free_page(dic->tpages[i]);
1644                }
1645                page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1646        }
1647
1648        if (dic->cpages) {
1649                for (i = 0; i < dic->nr_cpages; i++) {
1650                        if (!dic->cpages[i])
1651                                continue;
1652                        f2fs_compress_free_page(dic->cpages[i]);
1653                }
1654                page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1655        }
1656
1657        page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1658        kmem_cache_free(dic_entry_slab, dic);
1659}
1660
1661static void f2fs_put_dic(struct decompress_io_ctx *dic)
1662{
1663        if (refcount_dec_and_test(&dic->refcnt))
1664                f2fs_free_dic(dic);
1665}
1666
1667/*
1668 * Update and unlock the cluster's pagecache pages, and release the reference to
1669 * the decompress_io_ctx that was being held for I/O completion.
1670 */
1671static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1672{
1673        int i;
1674
1675        for (i = 0; i < dic->cluster_size; i++) {
1676                struct page *rpage = dic->rpages[i];
1677
1678                if (!rpage)
1679                        continue;
1680
1681                /* PG_error was set if verity failed. */
1682                if (failed || PageError(rpage)) {
1683                        ClearPageUptodate(rpage);
1684                        /* will re-read again later */
1685                        ClearPageError(rpage);
1686                } else {
1687                        SetPageUptodate(rpage);
1688                }
1689                unlock_page(rpage);
1690        }
1691
1692        f2fs_put_dic(dic);
1693}
1694
1695static void f2fs_verify_cluster(struct work_struct *work)
1696{
1697        struct decompress_io_ctx *dic =
1698                container_of(work, struct decompress_io_ctx, verity_work);
1699        int i;
1700
1701        /* Verify the cluster's decompressed pages with fs-verity. */
1702        for (i = 0; i < dic->cluster_size; i++) {
1703                struct page *rpage = dic->rpages[i];
1704
1705                if (rpage && !fsverity_verify_page(rpage))
1706                        SetPageError(rpage);
1707        }
1708
1709        __f2fs_decompress_end_io(dic, false);
1710}
1711
1712/*
1713 * This is called when a compressed cluster has been decompressed
1714 * (or failed to be read and/or decompressed).
1715 */
1716void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1717{
1718        if (!failed && dic->need_verity) {
1719                /*
1720                 * Note that to avoid deadlocks, the verity work can't be done
1721                 * on the decompression workqueue.  This is because verifying
1722                 * the data pages can involve reading metadata pages from the
1723                 * file, and these metadata pages may be compressed.
1724                 */
1725                INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1726                fsverity_enqueue_verify_work(&dic->verity_work);
1727        } else {
1728                __f2fs_decompress_end_io(dic, failed);
1729        }
1730}
1731
1732/*
1733 * Put a reference to a compressed page's decompress_io_ctx.
1734 *
1735 * This is called when the page is no longer needed and can be freed.
1736 */
1737void f2fs_put_page_dic(struct page *page)
1738{
1739        struct decompress_io_ctx *dic =
1740                        (struct decompress_io_ctx *)page_private(page);
1741
1742        f2fs_put_dic(dic);
1743}
1744
1745/*
1746 * check whether cluster blocks are contiguous, and add extent cache entry
1747 * only if cluster blocks are logically and physically contiguous.
1748 */
1749unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
1750{
1751        bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
1752        int i = compressed ? 1 : 0;
1753        block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1754                                                dn->ofs_in_node + i);
1755
1756        for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1757                block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1758                                                dn->ofs_in_node + i);
1759
1760                if (!__is_valid_data_blkaddr(blkaddr))
1761                        break;
1762                if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1763                        return 0;
1764        }
1765
1766        return compressed ? i - 1 : i;
1767}
1768
1769const struct address_space_operations f2fs_compress_aops = {
1770        .releasepage = f2fs_release_page,
1771        .invalidatepage = f2fs_invalidate_page,
1772};
1773
1774struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1775{
1776        return sbi->compress_inode->i_mapping;
1777}
1778
1779void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1780{
1781        if (!sbi->compress_inode)
1782                return;
1783        invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1784}
1785
1786void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1787                                                nid_t ino, block_t blkaddr)
1788{
1789        struct page *cpage;
1790        int ret;
1791
1792        if (!test_opt(sbi, COMPRESS_CACHE))
1793                return;
1794
1795        if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1796                return;
1797
1798        if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1799                return;
1800
1801        cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1802        if (cpage) {
1803                f2fs_put_page(cpage, 0);
1804                return;
1805        }
1806
1807        cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1808        if (!cpage)
1809                return;
1810
1811        ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1812                                                blkaddr, GFP_NOFS);
1813        if (ret) {
1814                f2fs_put_page(cpage, 0);
1815                return;
1816        }
1817
1818        set_page_private_data(cpage, ino);
1819
1820        if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1821                goto out;
1822
1823        memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1824        SetPageUptodate(cpage);
1825out:
1826        f2fs_put_page(cpage, 1);
1827}
1828
1829bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1830                                                                block_t blkaddr)
1831{
1832        struct page *cpage;
1833        bool hitted = false;
1834
1835        if (!test_opt(sbi, COMPRESS_CACHE))
1836                return false;
1837
1838        cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1839                                blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1840        if (cpage) {
1841                if (PageUptodate(cpage)) {
1842                        atomic_inc(&sbi->compress_page_hit);
1843                        memcpy(page_address(page),
1844                                page_address(cpage), PAGE_SIZE);
1845                        hitted = true;
1846                }
1847                f2fs_put_page(cpage, 1);
1848        }
1849
1850        return hitted;
1851}
1852
1853void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1854{
1855        struct address_space *mapping = sbi->compress_inode->i_mapping;
1856        struct pagevec pvec;
1857        pgoff_t index = 0;
1858        pgoff_t end = MAX_BLKADDR(sbi);
1859
1860        if (!mapping->nrpages)
1861                return;
1862
1863        pagevec_init(&pvec);
1864
1865        do {
1866                unsigned int nr_pages;
1867                int i;
1868
1869                nr_pages = pagevec_lookup_range(&pvec, mapping,
1870                                                &index, end - 1);
1871                if (!nr_pages)
1872                        break;
1873
1874                for (i = 0; i < nr_pages; i++) {
1875                        struct page *page = pvec.pages[i];
1876
1877                        if (page->index > end)
1878                                break;
1879
1880                        lock_page(page);
1881                        if (page->mapping != mapping) {
1882                                unlock_page(page);
1883                                continue;
1884                        }
1885
1886                        if (ino != get_page_private_data(page)) {
1887                                unlock_page(page);
1888                                continue;
1889                        }
1890
1891                        generic_error_remove_page(mapping, page);
1892                        unlock_page(page);
1893                }
1894                pagevec_release(&pvec);
1895                cond_resched();
1896        } while (index < end);
1897}
1898
1899int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1900{
1901        struct inode *inode;
1902
1903        if (!test_opt(sbi, COMPRESS_CACHE))
1904                return 0;
1905
1906        inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1907        if (IS_ERR(inode))
1908                return PTR_ERR(inode);
1909        sbi->compress_inode = inode;
1910
1911        sbi->compress_percent = COMPRESS_PERCENT;
1912        sbi->compress_watermark = COMPRESS_WATERMARK;
1913
1914        atomic_set(&sbi->compress_page_hit, 0);
1915
1916        return 0;
1917}
1918
1919void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1920{
1921        if (!sbi->compress_inode)
1922                return;
1923        iput(sbi->compress_inode);
1924        sbi->compress_inode = NULL;
1925}
1926
1927int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1928{
1929        dev_t dev = sbi->sb->s_bdev->bd_dev;
1930        char slab_name[32];
1931
1932        sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1933
1934        sbi->page_array_slab_size = sizeof(struct page *) <<
1935                                        F2FS_OPTION(sbi).compress_log_size;
1936
1937        sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1938                                        sbi->page_array_slab_size);
1939        if (!sbi->page_array_slab)
1940                return -ENOMEM;
1941        return 0;
1942}
1943
1944void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1945{
1946        kmem_cache_destroy(sbi->page_array_slab);
1947}
1948
1949static int __init f2fs_init_cic_cache(void)
1950{
1951        cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1952                                        sizeof(struct compress_io_ctx));
1953        if (!cic_entry_slab)
1954                return -ENOMEM;
1955        return 0;
1956}
1957
1958static void f2fs_destroy_cic_cache(void)
1959{
1960        kmem_cache_destroy(cic_entry_slab);
1961}
1962
1963static int __init f2fs_init_dic_cache(void)
1964{
1965        dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1966                                        sizeof(struct decompress_io_ctx));
1967        if (!dic_entry_slab)
1968                return -ENOMEM;
1969        return 0;
1970}
1971
1972static void f2fs_destroy_dic_cache(void)
1973{
1974        kmem_cache_destroy(dic_entry_slab);
1975}
1976
1977int __init f2fs_init_compress_cache(void)
1978{
1979        int err;
1980
1981        err = f2fs_init_cic_cache();
1982        if (err)
1983                goto out;
1984        err = f2fs_init_dic_cache();
1985        if (err)
1986                goto free_cic;
1987        return 0;
1988free_cic:
1989        f2fs_destroy_cic_cache();
1990out:
1991        return -ENOMEM;
1992}
1993
1994void f2fs_destroy_compress_cache(void)
1995{
1996        f2fs_destroy_dic_cache();
1997        f2fs_destroy_cic_cache();
1998}
1999