linux/fs/f2fs/compress.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * f2fs compress support
   4 *
   5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
   6 */
   7
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/writeback.h>
  11#include <linux/backing-dev.h>
  12#include <linux/lzo.h>
  13#include <linux/lz4.h>
  14#include <linux/zstd.h>
  15
  16#include "f2fs.h"
  17#include "node.h"
  18#include <trace/events/f2fs.h>
  19
  20static struct kmem_cache *cic_entry_slab;
  21static struct kmem_cache *dic_entry_slab;
  22
  23static void *page_array_alloc(struct inode *inode, int nr)
  24{
  25        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  26        unsigned int size = sizeof(struct page *) * nr;
  27
  28        if (likely(size <= sbi->page_array_slab_size))
  29                return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
  30        return f2fs_kzalloc(sbi, size, GFP_NOFS);
  31}
  32
  33static void page_array_free(struct inode *inode, void *pages, int nr)
  34{
  35        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  36        unsigned int size = sizeof(struct page *) * nr;
  37
  38        if (!pages)
  39                return;
  40
  41        if (likely(size <= sbi->page_array_slab_size))
  42                kmem_cache_free(sbi->page_array_slab, pages);
  43        else
  44                kfree(pages);
  45}
  46
  47struct f2fs_compress_ops {
  48        int (*init_compress_ctx)(struct compress_ctx *cc);
  49        void (*destroy_compress_ctx)(struct compress_ctx *cc);
  50        int (*compress_pages)(struct compress_ctx *cc);
  51        int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
  52        void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
  53        int (*decompress_pages)(struct decompress_io_ctx *dic);
  54};
  55
  56static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
  57{
  58        return index & (cc->cluster_size - 1);
  59}
  60
  61static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
  62{
  63        return index >> cc->log_cluster_size;
  64}
  65
  66static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
  67{
  68        return cc->cluster_idx << cc->log_cluster_size;
  69}
  70
  71bool f2fs_is_compressed_page(struct page *page)
  72{
  73        if (!PagePrivate(page))
  74                return false;
  75        if (!page_private(page))
  76                return false;
  77        if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
  78                return false;
  79        /*
  80         * page->private may be set with pid.
  81         * pid_max is enough to check if it is traced.
  82         */
  83        if (IS_IO_TRACED_PAGE(page))
  84                return false;
  85
  86        f2fs_bug_on(F2FS_M_SB(page->mapping),
  87                *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
  88        return true;
  89}
  90
  91static void f2fs_set_compressed_page(struct page *page,
  92                struct inode *inode, pgoff_t index, void *data)
  93{
  94        SetPagePrivate(page);
  95        set_page_private(page, (unsigned long)data);
  96
  97        /* i_crypto_info and iv index */
  98        page->index = index;
  99        page->mapping = inode->i_mapping;
 100}
 101
 102static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
 103{
 104        int i;
 105
 106        for (i = 0; i < len; i++) {
 107                if (!cc->rpages[i])
 108                        continue;
 109                if (unlock)
 110                        unlock_page(cc->rpages[i]);
 111                else
 112                        put_page(cc->rpages[i]);
 113        }
 114}
 115
 116static void f2fs_put_rpages(struct compress_ctx *cc)
 117{
 118        f2fs_drop_rpages(cc, cc->cluster_size, false);
 119}
 120
 121static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
 122{
 123        f2fs_drop_rpages(cc, len, true);
 124}
 125
 126static void f2fs_put_rpages_mapping(struct address_space *mapping,
 127                                pgoff_t start, int len)
 128{
 129        int i;
 130
 131        for (i = 0; i < len; i++) {
 132                struct page *page = find_get_page(mapping, start + i);
 133
 134                put_page(page);
 135                put_page(page);
 136        }
 137}
 138
 139static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
 140                struct writeback_control *wbc, bool redirty, int unlock)
 141{
 142        unsigned int i;
 143
 144        for (i = 0; i < cc->cluster_size; i++) {
 145                if (!cc->rpages[i])
 146                        continue;
 147                if (redirty)
 148                        redirty_page_for_writepage(wbc, cc->rpages[i]);
 149                f2fs_put_page(cc->rpages[i], unlock);
 150        }
 151}
 152
 153struct page *f2fs_compress_control_page(struct page *page)
 154{
 155        return ((struct compress_io_ctx *)page_private(page))->rpages[0];
 156}
 157
 158int f2fs_init_compress_ctx(struct compress_ctx *cc)
 159{
 160        if (cc->rpages)
 161                return 0;
 162
 163        cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
 164        return cc->rpages ? 0 : -ENOMEM;
 165}
 166
 167void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
 168{
 169        page_array_free(cc->inode, cc->rpages, cc->cluster_size);
 170        cc->rpages = NULL;
 171        cc->nr_rpages = 0;
 172        cc->nr_cpages = 0;
 173        cc->cluster_idx = NULL_CLUSTER;
 174}
 175
 176void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
 177{
 178        unsigned int cluster_ofs;
 179
 180        if (!f2fs_cluster_can_merge_page(cc, page->index))
 181                f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
 182
 183        cluster_ofs = offset_in_cluster(cc, page->index);
 184        cc->rpages[cluster_ofs] = page;
 185        cc->nr_rpages++;
 186        cc->cluster_idx = cluster_idx(cc, page->index);
 187}
 188
 189#ifdef CONFIG_F2FS_FS_LZO
 190static int lzo_init_compress_ctx(struct compress_ctx *cc)
 191{
 192        cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
 193                                LZO1X_MEM_COMPRESS, GFP_NOFS);
 194        if (!cc->private)
 195                return -ENOMEM;
 196
 197        cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
 198        return 0;
 199}
 200
 201static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
 202{
 203        kvfree(cc->private);
 204        cc->private = NULL;
 205}
 206
 207static int lzo_compress_pages(struct compress_ctx *cc)
 208{
 209        int ret;
 210
 211        ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
 212                                        &cc->clen, cc->private);
 213        if (ret != LZO_E_OK) {
 214                printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
 215                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
 216                return -EIO;
 217        }
 218        return 0;
 219}
 220
 221static int lzo_decompress_pages(struct decompress_io_ctx *dic)
 222{
 223        int ret;
 224
 225        ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
 226                                                dic->rbuf, &dic->rlen);
 227        if (ret != LZO_E_OK) {
 228                printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
 229                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
 230                return -EIO;
 231        }
 232
 233        if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
 234                printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
 235                                        "expected:%lu\n", KERN_ERR,
 236                                        F2FS_I_SB(dic->inode)->sb->s_id,
 237                                        dic->rlen,
 238                                        PAGE_SIZE << dic->log_cluster_size);
 239                return -EIO;
 240        }
 241        return 0;
 242}
 243
 244static const struct f2fs_compress_ops f2fs_lzo_ops = {
 245        .init_compress_ctx      = lzo_init_compress_ctx,
 246        .destroy_compress_ctx   = lzo_destroy_compress_ctx,
 247        .compress_pages         = lzo_compress_pages,
 248        .decompress_pages       = lzo_decompress_pages,
 249};
 250#endif
 251
 252#ifdef CONFIG_F2FS_FS_LZ4
 253static int lz4_init_compress_ctx(struct compress_ctx *cc)
 254{
 255        unsigned int size = LZ4_MEM_COMPRESS;
 256
 257#ifdef CONFIG_F2FS_FS_LZ4HC
 258        if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
 259                size = LZ4HC_MEM_COMPRESS;
 260#endif
 261
 262        cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
 263        if (!cc->private)
 264                return -ENOMEM;
 265
 266        /*
 267         * we do not change cc->clen to LZ4_compressBound(inputsize) to
 268         * adapt worst compress case, because lz4 compressor can handle
 269         * output budget properly.
 270         */
 271        cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 272        return 0;
 273}
 274
 275static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
 276{
 277        kvfree(cc->private);
 278        cc->private = NULL;
 279}
 280
 281#ifdef CONFIG_F2FS_FS_LZ4HC
 282static int lz4hc_compress_pages(struct compress_ctx *cc)
 283{
 284        unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
 285                                                COMPRESS_LEVEL_OFFSET;
 286        int len;
 287
 288        if (level)
 289                len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 290                                        cc->clen, level, cc->private);
 291        else
 292                len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 293                                                cc->clen, cc->private);
 294        if (!len)
 295                return -EAGAIN;
 296
 297        cc->clen = len;
 298        return 0;
 299}
 300#endif
 301
 302static int lz4_compress_pages(struct compress_ctx *cc)
 303{
 304        int len;
 305
 306#ifdef CONFIG_F2FS_FS_LZ4HC
 307        return lz4hc_compress_pages(cc);
 308#endif
 309        len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 310                                                cc->clen, cc->private);
 311        if (!len)
 312                return -EAGAIN;
 313
 314        cc->clen = len;
 315        return 0;
 316}
 317
 318static int lz4_decompress_pages(struct decompress_io_ctx *dic)
 319{
 320        int ret;
 321
 322        ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
 323                                                dic->clen, dic->rlen);
 324        if (ret < 0) {
 325                printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
 326                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
 327                return -EIO;
 328        }
 329
 330        if (ret != PAGE_SIZE << dic->log_cluster_size) {
 331                printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
 332                                        "expected:%lu\n", KERN_ERR,
 333                                        F2FS_I_SB(dic->inode)->sb->s_id,
 334                                        dic->rlen,
 335                                        PAGE_SIZE << dic->log_cluster_size);
 336                return -EIO;
 337        }
 338        return 0;
 339}
 340
 341static const struct f2fs_compress_ops f2fs_lz4_ops = {
 342        .init_compress_ctx      = lz4_init_compress_ctx,
 343        .destroy_compress_ctx   = lz4_destroy_compress_ctx,
 344        .compress_pages         = lz4_compress_pages,
 345        .decompress_pages       = lz4_decompress_pages,
 346};
 347#endif
 348
 349#ifdef CONFIG_F2FS_FS_ZSTD
 350#define F2FS_ZSTD_DEFAULT_CLEVEL        1
 351
 352static int zstd_init_compress_ctx(struct compress_ctx *cc)
 353{
 354        ZSTD_parameters params;
 355        ZSTD_CStream *stream;
 356        void *workspace;
 357        unsigned int workspace_size;
 358        unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
 359                                                COMPRESS_LEVEL_OFFSET;
 360
 361        if (!level)
 362                level = F2FS_ZSTD_DEFAULT_CLEVEL;
 363
 364        params = ZSTD_getParams(level, cc->rlen, 0);
 365        workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
 366
 367        workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
 368                                        workspace_size, GFP_NOFS);
 369        if (!workspace)
 370                return -ENOMEM;
 371
 372        stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
 373        if (!stream) {
 374                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
 375                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 376                                __func__);
 377                kvfree(workspace);
 378                return -EIO;
 379        }
 380
 381        cc->private = workspace;
 382        cc->private2 = stream;
 383
 384        cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 385        return 0;
 386}
 387
 388static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
 389{
 390        kvfree(cc->private);
 391        cc->private = NULL;
 392        cc->private2 = NULL;
 393}
 394
 395static int zstd_compress_pages(struct compress_ctx *cc)
 396{
 397        ZSTD_CStream *stream = cc->private2;
 398        ZSTD_inBuffer inbuf;
 399        ZSTD_outBuffer outbuf;
 400        int src_size = cc->rlen;
 401        int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 402        int ret;
 403
 404        inbuf.pos = 0;
 405        inbuf.src = cc->rbuf;
 406        inbuf.size = src_size;
 407
 408        outbuf.pos = 0;
 409        outbuf.dst = cc->cbuf->cdata;
 410        outbuf.size = dst_size;
 411
 412        ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
 413        if (ZSTD_isError(ret)) {
 414                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
 415                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 416                                __func__, ZSTD_getErrorCode(ret));
 417                return -EIO;
 418        }
 419
 420        ret = ZSTD_endStream(stream, &outbuf);
 421        if (ZSTD_isError(ret)) {
 422                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
 423                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 424                                __func__, ZSTD_getErrorCode(ret));
 425                return -EIO;
 426        }
 427
 428        /*
 429         * there is compressed data remained in intermediate buffer due to
 430         * no more space in cbuf.cdata
 431         */
 432        if (ret)
 433                return -EAGAIN;
 434
 435        cc->clen = outbuf.pos;
 436        return 0;
 437}
 438
 439static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
 440{
 441        ZSTD_DStream *stream;
 442        void *workspace;
 443        unsigned int workspace_size;
 444        unsigned int max_window_size =
 445                        MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
 446
 447        workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
 448
 449        workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
 450                                        workspace_size, GFP_NOFS);
 451        if (!workspace)
 452                return -ENOMEM;
 453
 454        stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
 455        if (!stream) {
 456                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
 457                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
 458                                __func__);
 459                kvfree(workspace);
 460                return -EIO;
 461        }
 462
 463        dic->private = workspace;
 464        dic->private2 = stream;
 465
 466        return 0;
 467}
 468
 469static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
 470{
 471        kvfree(dic->private);
 472        dic->private = NULL;
 473        dic->private2 = NULL;
 474}
 475
 476static int zstd_decompress_pages(struct decompress_io_ctx *dic)
 477{
 478        ZSTD_DStream *stream = dic->private2;
 479        ZSTD_inBuffer inbuf;
 480        ZSTD_outBuffer outbuf;
 481        int ret;
 482
 483        inbuf.pos = 0;
 484        inbuf.src = dic->cbuf->cdata;
 485        inbuf.size = dic->clen;
 486
 487        outbuf.pos = 0;
 488        outbuf.dst = dic->rbuf;
 489        outbuf.size = dic->rlen;
 490
 491        ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
 492        if (ZSTD_isError(ret)) {
 493                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
 494                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
 495                                __func__, ZSTD_getErrorCode(ret));
 496                return -EIO;
 497        }
 498
 499        if (dic->rlen != outbuf.pos) {
 500                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
 501                                "expected:%lu\n", KERN_ERR,
 502                                F2FS_I_SB(dic->inode)->sb->s_id,
 503                                __func__, dic->rlen,
 504                                PAGE_SIZE << dic->log_cluster_size);
 505                return -EIO;
 506        }
 507
 508        return 0;
 509}
 510
 511static const struct f2fs_compress_ops f2fs_zstd_ops = {
 512        .init_compress_ctx      = zstd_init_compress_ctx,
 513        .destroy_compress_ctx   = zstd_destroy_compress_ctx,
 514        .compress_pages         = zstd_compress_pages,
 515        .init_decompress_ctx    = zstd_init_decompress_ctx,
 516        .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
 517        .decompress_pages       = zstd_decompress_pages,
 518};
 519#endif
 520
 521#ifdef CONFIG_F2FS_FS_LZO
 522#ifdef CONFIG_F2FS_FS_LZORLE
 523static int lzorle_compress_pages(struct compress_ctx *cc)
 524{
 525        int ret;
 526
 527        ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
 528                                        &cc->clen, cc->private);
 529        if (ret != LZO_E_OK) {
 530                printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
 531                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
 532                return -EIO;
 533        }
 534        return 0;
 535}
 536
 537static const struct f2fs_compress_ops f2fs_lzorle_ops = {
 538        .init_compress_ctx      = lzo_init_compress_ctx,
 539        .destroy_compress_ctx   = lzo_destroy_compress_ctx,
 540        .compress_pages         = lzorle_compress_pages,
 541        .decompress_pages       = lzo_decompress_pages,
 542};
 543#endif
 544#endif
 545
 546static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
 547#ifdef CONFIG_F2FS_FS_LZO
 548        &f2fs_lzo_ops,
 549#else
 550        NULL,
 551#endif
 552#ifdef CONFIG_F2FS_FS_LZ4
 553        &f2fs_lz4_ops,
 554#else
 555        NULL,
 556#endif
 557#ifdef CONFIG_F2FS_FS_ZSTD
 558        &f2fs_zstd_ops,
 559#else
 560        NULL,
 561#endif
 562#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
 563        &f2fs_lzorle_ops,
 564#else
 565        NULL,
 566#endif
 567};
 568
 569bool f2fs_is_compress_backend_ready(struct inode *inode)
 570{
 571        if (!f2fs_compressed_file(inode))
 572                return true;
 573        return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
 574}
 575
 576static mempool_t *compress_page_pool;
 577static int num_compress_pages = 512;
 578module_param(num_compress_pages, uint, 0444);
 579MODULE_PARM_DESC(num_compress_pages,
 580                "Number of intermediate compress pages to preallocate");
 581
 582int f2fs_init_compress_mempool(void)
 583{
 584        compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
 585        if (!compress_page_pool)
 586                return -ENOMEM;
 587
 588        return 0;
 589}
 590
 591void f2fs_destroy_compress_mempool(void)
 592{
 593        mempool_destroy(compress_page_pool);
 594}
 595
 596static struct page *f2fs_compress_alloc_page(void)
 597{
 598        struct page *page;
 599
 600        page = mempool_alloc(compress_page_pool, GFP_NOFS);
 601        lock_page(page);
 602
 603        return page;
 604}
 605
 606static void f2fs_compress_free_page(struct page *page)
 607{
 608        if (!page)
 609                return;
 610        set_page_private(page, (unsigned long)NULL);
 611        ClearPagePrivate(page);
 612        page->mapping = NULL;
 613        unlock_page(page);
 614        mempool_free(page, compress_page_pool);
 615}
 616
 617#define MAX_VMAP_RETRIES        3
 618
 619static void *f2fs_vmap(struct page **pages, unsigned int count)
 620{
 621        int i;
 622        void *buf = NULL;
 623
 624        for (i = 0; i < MAX_VMAP_RETRIES; i++) {
 625                buf = vm_map_ram(pages, count, -1);
 626                if (buf)
 627                        break;
 628                vm_unmap_aliases();
 629        }
 630        return buf;
 631}
 632
 633static int f2fs_compress_pages(struct compress_ctx *cc)
 634{
 635        struct f2fs_inode_info *fi = F2FS_I(cc->inode);
 636        const struct f2fs_compress_ops *cops =
 637                                f2fs_cops[fi->i_compress_algorithm];
 638        unsigned int max_len, new_nr_cpages;
 639        struct page **new_cpages;
 640        u32 chksum = 0;
 641        int i, ret;
 642
 643        trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
 644                                cc->cluster_size, fi->i_compress_algorithm);
 645
 646        if (cops->init_compress_ctx) {
 647                ret = cops->init_compress_ctx(cc);
 648                if (ret)
 649                        goto out;
 650        }
 651
 652        max_len = COMPRESS_HEADER_SIZE + cc->clen;
 653        cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
 654
 655        cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
 656        if (!cc->cpages) {
 657                ret = -ENOMEM;
 658                goto destroy_compress_ctx;
 659        }
 660
 661        for (i = 0; i < cc->nr_cpages; i++) {
 662                cc->cpages[i] = f2fs_compress_alloc_page();
 663                if (!cc->cpages[i]) {
 664                        ret = -ENOMEM;
 665                        goto out_free_cpages;
 666                }
 667        }
 668
 669        cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
 670        if (!cc->rbuf) {
 671                ret = -ENOMEM;
 672                goto out_free_cpages;
 673        }
 674
 675        cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
 676        if (!cc->cbuf) {
 677                ret = -ENOMEM;
 678                goto out_vunmap_rbuf;
 679        }
 680
 681        ret = cops->compress_pages(cc);
 682        if (ret)
 683                goto out_vunmap_cbuf;
 684
 685        max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
 686
 687        if (cc->clen > max_len) {
 688                ret = -EAGAIN;
 689                goto out_vunmap_cbuf;
 690        }
 691
 692        cc->cbuf->clen = cpu_to_le32(cc->clen);
 693
 694        if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
 695                chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
 696                                        cc->cbuf->cdata, cc->clen);
 697        cc->cbuf->chksum = cpu_to_le32(chksum);
 698
 699        for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
 700                cc->cbuf->reserved[i] = cpu_to_le32(0);
 701
 702        new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
 703
 704        /* Now we're going to cut unnecessary tail pages */
 705        new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
 706        if (!new_cpages) {
 707                ret = -ENOMEM;
 708                goto out_vunmap_cbuf;
 709        }
 710
 711        /* zero out any unused part of the last page */
 712        memset(&cc->cbuf->cdata[cc->clen], 0,
 713                        (new_nr_cpages * PAGE_SIZE) -
 714                        (cc->clen + COMPRESS_HEADER_SIZE));
 715
 716        vm_unmap_ram(cc->cbuf, cc->nr_cpages);
 717        vm_unmap_ram(cc->rbuf, cc->cluster_size);
 718
 719        for (i = 0; i < cc->nr_cpages; i++) {
 720                if (i < new_nr_cpages) {
 721                        new_cpages[i] = cc->cpages[i];
 722                        continue;
 723                }
 724                f2fs_compress_free_page(cc->cpages[i]);
 725                cc->cpages[i] = NULL;
 726        }
 727
 728        if (cops->destroy_compress_ctx)
 729                cops->destroy_compress_ctx(cc);
 730
 731        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
 732        cc->cpages = new_cpages;
 733        cc->nr_cpages = new_nr_cpages;
 734
 735        trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
 736                                                        cc->clen, ret);
 737        return 0;
 738
 739out_vunmap_cbuf:
 740        vm_unmap_ram(cc->cbuf, cc->nr_cpages);
 741out_vunmap_rbuf:
 742        vm_unmap_ram(cc->rbuf, cc->cluster_size);
 743out_free_cpages:
 744        for (i = 0; i < cc->nr_cpages; i++) {
 745                if (cc->cpages[i])
 746                        f2fs_compress_free_page(cc->cpages[i]);
 747        }
 748        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
 749        cc->cpages = NULL;
 750destroy_compress_ctx:
 751        if (cops->destroy_compress_ctx)
 752                cops->destroy_compress_ctx(cc);
 753out:
 754        trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
 755                                                        cc->clen, ret);
 756        return ret;
 757}
 758
 759static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
 760{
 761        struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
 762        struct f2fs_inode_info *fi = F2FS_I(dic->inode);
 763        const struct f2fs_compress_ops *cops =
 764                        f2fs_cops[fi->i_compress_algorithm];
 765        int ret;
 766        int i;
 767
 768        trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
 769                                dic->cluster_size, fi->i_compress_algorithm);
 770
 771        if (dic->failed) {
 772                ret = -EIO;
 773                goto out_end_io;
 774        }
 775
 776        dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
 777        if (!dic->tpages) {
 778                ret = -ENOMEM;
 779                goto out_end_io;
 780        }
 781
 782        for (i = 0; i < dic->cluster_size; i++) {
 783                if (dic->rpages[i]) {
 784                        dic->tpages[i] = dic->rpages[i];
 785                        continue;
 786                }
 787
 788                dic->tpages[i] = f2fs_compress_alloc_page();
 789                if (!dic->tpages[i]) {
 790                        ret = -ENOMEM;
 791                        goto out_end_io;
 792                }
 793        }
 794
 795        if (cops->init_decompress_ctx) {
 796                ret = cops->init_decompress_ctx(dic);
 797                if (ret)
 798                        goto out_end_io;
 799        }
 800
 801        dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
 802        if (!dic->rbuf) {
 803                ret = -ENOMEM;
 804                goto out_destroy_decompress_ctx;
 805        }
 806
 807        dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
 808        if (!dic->cbuf) {
 809                ret = -ENOMEM;
 810                goto out_vunmap_rbuf;
 811        }
 812
 813        dic->clen = le32_to_cpu(dic->cbuf->clen);
 814        dic->rlen = PAGE_SIZE << dic->log_cluster_size;
 815
 816        if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
 817                ret = -EFSCORRUPTED;
 818                goto out_vunmap_cbuf;
 819        }
 820
 821        ret = cops->decompress_pages(dic);
 822
 823        if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
 824                u32 provided = le32_to_cpu(dic->cbuf->chksum);
 825                u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
 826
 827                if (provided != calculated) {
 828                        if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
 829                                set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
 830                                printk_ratelimited(
 831                                        "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
 832                                        KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
 833                                        provided, calculated);
 834                        }
 835                        set_sbi_flag(sbi, SBI_NEED_FSCK);
 836                }
 837        }
 838
 839out_vunmap_cbuf:
 840        vm_unmap_ram(dic->cbuf, dic->nr_cpages);
 841out_vunmap_rbuf:
 842        vm_unmap_ram(dic->rbuf, dic->cluster_size);
 843out_destroy_decompress_ctx:
 844        if (cops->destroy_decompress_ctx)
 845                cops->destroy_decompress_ctx(dic);
 846out_end_io:
 847        trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
 848                                                        dic->clen, ret);
 849        f2fs_decompress_end_io(dic, ret);
 850}
 851
 852/*
 853 * This is called when a page of a compressed cluster has been read from disk
 854 * (or failed to be read from disk).  It checks whether this page was the last
 855 * page being waited on in the cluster, and if so, it decompresses the cluster
 856 * (or in the case of a failure, cleans up without actually decompressing).
 857 */
 858void f2fs_end_read_compressed_page(struct page *page, bool failed)
 859{
 860        struct decompress_io_ctx *dic =
 861                        (struct decompress_io_ctx *)page_private(page);
 862        struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
 863
 864        dec_page_count(sbi, F2FS_RD_DATA);
 865
 866        if (failed)
 867                WRITE_ONCE(dic->failed, true);
 868
 869        if (atomic_dec_and_test(&dic->remaining_pages))
 870                f2fs_decompress_cluster(dic);
 871}
 872
 873static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
 874{
 875        if (cc->cluster_idx == NULL_CLUSTER)
 876                return true;
 877        return cc->cluster_idx == cluster_idx(cc, index);
 878}
 879
 880bool f2fs_cluster_is_empty(struct compress_ctx *cc)
 881{
 882        return cc->nr_rpages == 0;
 883}
 884
 885static bool f2fs_cluster_is_full(struct compress_ctx *cc)
 886{
 887        return cc->cluster_size == cc->nr_rpages;
 888}
 889
 890bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
 891{
 892        if (f2fs_cluster_is_empty(cc))
 893                return true;
 894        return is_page_in_cluster(cc, index);
 895}
 896
 897static bool __cluster_may_compress(struct compress_ctx *cc)
 898{
 899        struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
 900        loff_t i_size = i_size_read(cc->inode);
 901        unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
 902        int i;
 903
 904        for (i = 0; i < cc->cluster_size; i++) {
 905                struct page *page = cc->rpages[i];
 906
 907                f2fs_bug_on(sbi, !page);
 908
 909                if (unlikely(f2fs_cp_error(sbi)))
 910                        return false;
 911                if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 912                        return false;
 913
 914                /* beyond EOF */
 915                if (page->index >= nr_pages)
 916                        return false;
 917        }
 918        return true;
 919}
 920
 921static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
 922{
 923        struct dnode_of_data dn;
 924        int ret;
 925
 926        set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
 927        ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
 928                                                        LOOKUP_NODE);
 929        if (ret) {
 930                if (ret == -ENOENT)
 931                        ret = 0;
 932                goto fail;
 933        }
 934
 935        if (dn.data_blkaddr == COMPRESS_ADDR) {
 936                int i;
 937
 938                ret = 1;
 939                for (i = 1; i < cc->cluster_size; i++) {
 940                        block_t blkaddr;
 941
 942                        blkaddr = data_blkaddr(dn.inode,
 943                                        dn.node_page, dn.ofs_in_node + i);
 944                        if (compr) {
 945                                if (__is_valid_data_blkaddr(blkaddr))
 946                                        ret++;
 947                        } else {
 948                                if (blkaddr != NULL_ADDR)
 949                                        ret++;
 950                        }
 951                }
 952        }
 953fail:
 954        f2fs_put_dnode(&dn);
 955        return ret;
 956}
 957
 958/* return # of compressed blocks in compressed cluster */
 959static int f2fs_compressed_blocks(struct compress_ctx *cc)
 960{
 961        return __f2fs_cluster_blocks(cc, true);
 962}
 963
 964/* return # of valid blocks in compressed cluster */
 965static int f2fs_cluster_blocks(struct compress_ctx *cc)
 966{
 967        return __f2fs_cluster_blocks(cc, false);
 968}
 969
 970int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
 971{
 972        struct compress_ctx cc = {
 973                .inode = inode,
 974                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
 975                .cluster_size = F2FS_I(inode)->i_cluster_size,
 976                .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
 977        };
 978
 979        return f2fs_cluster_blocks(&cc);
 980}
 981
 982static bool cluster_may_compress(struct compress_ctx *cc)
 983{
 984        if (!f2fs_need_compress_data(cc->inode))
 985                return false;
 986        if (f2fs_is_atomic_file(cc->inode))
 987                return false;
 988        if (f2fs_is_mmap_file(cc->inode))
 989                return false;
 990        if (!f2fs_cluster_is_full(cc))
 991                return false;
 992        if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
 993                return false;
 994        return __cluster_may_compress(cc);
 995}
 996
 997static void set_cluster_writeback(struct compress_ctx *cc)
 998{
 999        int i;
1000
1001        for (i = 0; i < cc->cluster_size; i++) {
1002                if (cc->rpages[i])
1003                        set_page_writeback(cc->rpages[i]);
1004        }
1005}
1006
1007static void set_cluster_dirty(struct compress_ctx *cc)
1008{
1009        int i;
1010
1011        for (i = 0; i < cc->cluster_size; i++)
1012                if (cc->rpages[i])
1013                        set_page_dirty(cc->rpages[i]);
1014}
1015
1016static int prepare_compress_overwrite(struct compress_ctx *cc,
1017                struct page **pagep, pgoff_t index, void **fsdata)
1018{
1019        struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1020        struct address_space *mapping = cc->inode->i_mapping;
1021        struct page *page;
1022        struct dnode_of_data dn;
1023        sector_t last_block_in_bio;
1024        unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1025        pgoff_t start_idx = start_idx_of_cluster(cc);
1026        int i, ret;
1027        bool prealloc;
1028
1029retry:
1030        ret = f2fs_cluster_blocks(cc);
1031        if (ret <= 0)
1032                return ret;
1033
1034        /* compressed case */
1035        prealloc = (ret < cc->cluster_size);
1036
1037        ret = f2fs_init_compress_ctx(cc);
1038        if (ret)
1039                return ret;
1040
1041        /* keep page reference to avoid page reclaim */
1042        for (i = 0; i < cc->cluster_size; i++) {
1043                page = f2fs_pagecache_get_page(mapping, start_idx + i,
1044                                                        fgp_flag, GFP_NOFS);
1045                if (!page) {
1046                        ret = -ENOMEM;
1047                        goto unlock_pages;
1048                }
1049
1050                if (PageUptodate(page))
1051                        unlock_page(page);
1052                else
1053                        f2fs_compress_ctx_add_page(cc, page);
1054        }
1055
1056        if (!f2fs_cluster_is_empty(cc)) {
1057                struct bio *bio = NULL;
1058
1059                ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1060                                        &last_block_in_bio, false, true);
1061                f2fs_destroy_compress_ctx(cc);
1062                if (ret)
1063                        goto release_pages;
1064                if (bio)
1065                        f2fs_submit_bio(sbi, bio, DATA);
1066
1067                ret = f2fs_init_compress_ctx(cc);
1068                if (ret)
1069                        goto release_pages;
1070        }
1071
1072        for (i = 0; i < cc->cluster_size; i++) {
1073                f2fs_bug_on(sbi, cc->rpages[i]);
1074
1075                page = find_lock_page(mapping, start_idx + i);
1076                f2fs_bug_on(sbi, !page);
1077
1078                f2fs_wait_on_page_writeback(page, DATA, true, true);
1079
1080                f2fs_compress_ctx_add_page(cc, page);
1081                f2fs_put_page(page, 0);
1082
1083                if (!PageUptodate(page)) {
1084                        f2fs_unlock_rpages(cc, i + 1);
1085                        f2fs_put_rpages_mapping(mapping, start_idx,
1086                                        cc->cluster_size);
1087                        f2fs_destroy_compress_ctx(cc);
1088                        goto retry;
1089                }
1090        }
1091
1092        if (prealloc) {
1093                f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
1094
1095                set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1096
1097                for (i = cc->cluster_size - 1; i > 0; i--) {
1098                        ret = f2fs_get_block(&dn, start_idx + i);
1099                        if (ret) {
1100                                i = cc->cluster_size;
1101                                break;
1102                        }
1103
1104                        if (dn.data_blkaddr != NEW_ADDR)
1105                                break;
1106                }
1107
1108                f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
1109        }
1110
1111        if (likely(!ret)) {
1112                *fsdata = cc->rpages;
1113                *pagep = cc->rpages[offset_in_cluster(cc, index)];
1114                return cc->cluster_size;
1115        }
1116
1117unlock_pages:
1118        f2fs_unlock_rpages(cc, i);
1119release_pages:
1120        f2fs_put_rpages_mapping(mapping, start_idx, i);
1121        f2fs_destroy_compress_ctx(cc);
1122        return ret;
1123}
1124
1125int f2fs_prepare_compress_overwrite(struct inode *inode,
1126                struct page **pagep, pgoff_t index, void **fsdata)
1127{
1128        struct compress_ctx cc = {
1129                .inode = inode,
1130                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1131                .cluster_size = F2FS_I(inode)->i_cluster_size,
1132                .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1133                .rpages = NULL,
1134                .nr_rpages = 0,
1135        };
1136
1137        return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1138}
1139
1140bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1141                                        pgoff_t index, unsigned copied)
1142
1143{
1144        struct compress_ctx cc = {
1145                .inode = inode,
1146                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1147                .cluster_size = F2FS_I(inode)->i_cluster_size,
1148                .rpages = fsdata,
1149        };
1150        bool first_index = (index == cc.rpages[0]->index);
1151
1152        if (copied)
1153                set_cluster_dirty(&cc);
1154
1155        f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1156        f2fs_destroy_compress_ctx(&cc);
1157
1158        return first_index;
1159}
1160
1161int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1162{
1163        void *fsdata = NULL;
1164        struct page *pagep;
1165        int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1166        pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1167                                                        log_cluster_size;
1168        int err;
1169
1170        err = f2fs_is_compressed_cluster(inode, start_idx);
1171        if (err < 0)
1172                return err;
1173
1174        /* truncate normal cluster */
1175        if (!err)
1176                return f2fs_do_truncate_blocks(inode, from, lock);
1177
1178        /* truncate compressed cluster */
1179        err = f2fs_prepare_compress_overwrite(inode, &pagep,
1180                                                start_idx, &fsdata);
1181
1182        /* should not be a normal cluster */
1183        f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1184
1185        if (err <= 0)
1186                return err;
1187
1188        if (err > 0) {
1189                struct page **rpages = fsdata;
1190                int cluster_size = F2FS_I(inode)->i_cluster_size;
1191                int i;
1192
1193                for (i = cluster_size - 1; i >= 0; i--) {
1194                        loff_t start = rpages[i]->index << PAGE_SHIFT;
1195
1196                        if (from <= start) {
1197                                zero_user_segment(rpages[i], 0, PAGE_SIZE);
1198                        } else {
1199                                zero_user_segment(rpages[i], from - start,
1200                                                                PAGE_SIZE);
1201                                break;
1202                        }
1203                }
1204
1205                f2fs_compress_write_end(inode, fsdata, start_idx, true);
1206        }
1207        return 0;
1208}
1209
1210static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1211                                        int *submitted,
1212                                        struct writeback_control *wbc,
1213                                        enum iostat_type io_type)
1214{
1215        struct inode *inode = cc->inode;
1216        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1217        struct f2fs_inode_info *fi = F2FS_I(inode);
1218        struct f2fs_io_info fio = {
1219                .sbi = sbi,
1220                .ino = cc->inode->i_ino,
1221                .type = DATA,
1222                .op = REQ_OP_WRITE,
1223                .op_flags = wbc_to_write_flags(wbc),
1224                .old_blkaddr = NEW_ADDR,
1225                .page = NULL,
1226                .encrypted_page = NULL,
1227                .compressed_page = NULL,
1228                .submitted = false,
1229                .io_type = io_type,
1230                .io_wbc = wbc,
1231                .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1232        };
1233        struct dnode_of_data dn;
1234        struct node_info ni;
1235        struct compress_io_ctx *cic;
1236        pgoff_t start_idx = start_idx_of_cluster(cc);
1237        unsigned int last_index = cc->cluster_size - 1;
1238        loff_t psize;
1239        int i, err;
1240
1241        if (IS_NOQUOTA(inode)) {
1242                /*
1243                 * We need to wait for node_write to avoid block allocation during
1244                 * checkpoint. This can only happen to quota writes which can cause
1245                 * the below discard race condition.
1246                 */
1247                down_read(&sbi->node_write);
1248        } else if (!f2fs_trylock_op(sbi)) {
1249                goto out_free;
1250        }
1251
1252        set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1253
1254        err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1255        if (err)
1256                goto out_unlock_op;
1257
1258        for (i = 0; i < cc->cluster_size; i++) {
1259                if (data_blkaddr(dn.inode, dn.node_page,
1260                                        dn.ofs_in_node + i) == NULL_ADDR)
1261                        goto out_put_dnode;
1262        }
1263
1264        psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1265
1266        err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1267        if (err)
1268                goto out_put_dnode;
1269
1270        fio.version = ni.version;
1271
1272        cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
1273        if (!cic)
1274                goto out_put_dnode;
1275
1276        cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1277        cic->inode = inode;
1278        atomic_set(&cic->pending_pages, cc->nr_cpages);
1279        cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1280        if (!cic->rpages)
1281                goto out_put_cic;
1282
1283        cic->nr_rpages = cc->cluster_size;
1284
1285        for (i = 0; i < cc->nr_cpages; i++) {
1286                f2fs_set_compressed_page(cc->cpages[i], inode,
1287                                        cc->rpages[i + 1]->index, cic);
1288                fio.compressed_page = cc->cpages[i];
1289
1290                fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1291                                                dn.ofs_in_node + i + 1);
1292
1293                /* wait for GCed page writeback via META_MAPPING */
1294                f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1295
1296                if (fio.encrypted) {
1297                        fio.page = cc->rpages[i + 1];
1298                        err = f2fs_encrypt_one_page(&fio);
1299                        if (err)
1300                                goto out_destroy_crypt;
1301                        cc->cpages[i] = fio.encrypted_page;
1302                }
1303        }
1304
1305        set_cluster_writeback(cc);
1306
1307        for (i = 0; i < cc->cluster_size; i++)
1308                cic->rpages[i] = cc->rpages[i];
1309
1310        for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1311                block_t blkaddr;
1312
1313                blkaddr = f2fs_data_blkaddr(&dn);
1314                fio.page = cc->rpages[i];
1315                fio.old_blkaddr = blkaddr;
1316
1317                /* cluster header */
1318                if (i == 0) {
1319                        if (blkaddr == COMPRESS_ADDR)
1320                                fio.compr_blocks++;
1321                        if (__is_valid_data_blkaddr(blkaddr))
1322                                f2fs_invalidate_blocks(sbi, blkaddr);
1323                        f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1324                        goto unlock_continue;
1325                }
1326
1327                if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1328                        fio.compr_blocks++;
1329
1330                if (i > cc->nr_cpages) {
1331                        if (__is_valid_data_blkaddr(blkaddr)) {
1332                                f2fs_invalidate_blocks(sbi, blkaddr);
1333                                f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1334                        }
1335                        goto unlock_continue;
1336                }
1337
1338                f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1339
1340                if (fio.encrypted)
1341                        fio.encrypted_page = cc->cpages[i - 1];
1342                else
1343                        fio.compressed_page = cc->cpages[i - 1];
1344
1345                cc->cpages[i - 1] = NULL;
1346                f2fs_outplace_write_data(&dn, &fio);
1347                (*submitted)++;
1348unlock_continue:
1349                inode_dec_dirty_pages(cc->inode);
1350                unlock_page(fio.page);
1351        }
1352
1353        if (fio.compr_blocks)
1354                f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1355        f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1356
1357        set_inode_flag(cc->inode, FI_APPEND_WRITE);
1358        if (cc->cluster_idx == 0)
1359                set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1360
1361        f2fs_put_dnode(&dn);
1362        if (IS_NOQUOTA(inode))
1363                up_read(&sbi->node_write);
1364        else
1365                f2fs_unlock_op(sbi);
1366
1367        spin_lock(&fi->i_size_lock);
1368        if (fi->last_disk_size < psize)
1369                fi->last_disk_size = psize;
1370        spin_unlock(&fi->i_size_lock);
1371
1372        f2fs_put_rpages(cc);
1373        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1374        cc->cpages = NULL;
1375        f2fs_destroy_compress_ctx(cc);
1376        return 0;
1377
1378out_destroy_crypt:
1379        page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1380
1381        for (--i; i >= 0; i--)
1382                fscrypt_finalize_bounce_page(&cc->cpages[i]);
1383        for (i = 0; i < cc->nr_cpages; i++) {
1384                if (!cc->cpages[i])
1385                        continue;
1386                f2fs_put_page(cc->cpages[i], 1);
1387        }
1388out_put_cic:
1389        kmem_cache_free(cic_entry_slab, cic);
1390out_put_dnode:
1391        f2fs_put_dnode(&dn);
1392out_unlock_op:
1393        if (IS_NOQUOTA(inode))
1394                up_read(&sbi->node_write);
1395        else
1396                f2fs_unlock_op(sbi);
1397out_free:
1398        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1399        cc->cpages = NULL;
1400        return -EAGAIN;
1401}
1402
1403void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1404{
1405        struct f2fs_sb_info *sbi = bio->bi_private;
1406        struct compress_io_ctx *cic =
1407                        (struct compress_io_ctx *)page_private(page);
1408        int i;
1409
1410        if (unlikely(bio->bi_status))
1411                mapping_set_error(cic->inode->i_mapping, -EIO);
1412
1413        f2fs_compress_free_page(page);
1414
1415        dec_page_count(sbi, F2FS_WB_DATA);
1416
1417        if (atomic_dec_return(&cic->pending_pages))
1418                return;
1419
1420        for (i = 0; i < cic->nr_rpages; i++) {
1421                WARN_ON(!cic->rpages[i]);
1422                clear_cold_data(cic->rpages[i]);
1423                end_page_writeback(cic->rpages[i]);
1424        }
1425
1426        page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1427        kmem_cache_free(cic_entry_slab, cic);
1428}
1429
1430static int f2fs_write_raw_pages(struct compress_ctx *cc,
1431                                        int *submitted,
1432                                        struct writeback_control *wbc,
1433                                        enum iostat_type io_type)
1434{
1435        struct address_space *mapping = cc->inode->i_mapping;
1436        int _submitted, compr_blocks, ret;
1437        int i = -1, err = 0;
1438
1439        compr_blocks = f2fs_compressed_blocks(cc);
1440        if (compr_blocks < 0) {
1441                err = compr_blocks;
1442                goto out_err;
1443        }
1444
1445        for (i = 0; i < cc->cluster_size; i++) {
1446                if (!cc->rpages[i])
1447                        continue;
1448retry_write:
1449                if (cc->rpages[i]->mapping != mapping) {
1450                        unlock_page(cc->rpages[i]);
1451                        continue;
1452                }
1453
1454                BUG_ON(!PageLocked(cc->rpages[i]));
1455
1456                ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1457                                                NULL, NULL, wbc, io_type,
1458                                                compr_blocks, false);
1459                if (ret) {
1460                        if (ret == AOP_WRITEPAGE_ACTIVATE) {
1461                                unlock_page(cc->rpages[i]);
1462                                ret = 0;
1463                        } else if (ret == -EAGAIN) {
1464                                /*
1465                                 * for quota file, just redirty left pages to
1466                                 * avoid deadlock caused by cluster update race
1467                                 * from foreground operation.
1468                                 */
1469                                if (IS_NOQUOTA(cc->inode)) {
1470                                        err = 0;
1471                                        goto out_err;
1472                                }
1473                                ret = 0;
1474                                cond_resched();
1475                                congestion_wait(BLK_RW_ASYNC,
1476                                                DEFAULT_IO_TIMEOUT);
1477                                lock_page(cc->rpages[i]);
1478
1479                                if (!PageDirty(cc->rpages[i])) {
1480                                        unlock_page(cc->rpages[i]);
1481                                        continue;
1482                                }
1483
1484                                clear_page_dirty_for_io(cc->rpages[i]);
1485                                goto retry_write;
1486                        }
1487                        err = ret;
1488                        goto out_err;
1489                }
1490
1491                *submitted += _submitted;
1492        }
1493
1494        f2fs_balance_fs(F2FS_M_SB(mapping), true);
1495
1496        return 0;
1497out_err:
1498        for (++i; i < cc->cluster_size; i++) {
1499                if (!cc->rpages[i])
1500                        continue;
1501                redirty_page_for_writepage(wbc, cc->rpages[i]);
1502                unlock_page(cc->rpages[i]);
1503        }
1504        return err;
1505}
1506
1507int f2fs_write_multi_pages(struct compress_ctx *cc,
1508                                        int *submitted,
1509                                        struct writeback_control *wbc,
1510                                        enum iostat_type io_type)
1511{
1512        int err;
1513
1514        *submitted = 0;
1515        if (cluster_may_compress(cc)) {
1516                err = f2fs_compress_pages(cc);
1517                if (err == -EAGAIN) {
1518                        goto write;
1519                } else if (err) {
1520                        f2fs_put_rpages_wbc(cc, wbc, true, 1);
1521                        goto destroy_out;
1522                }
1523
1524                err = f2fs_write_compressed_pages(cc, submitted,
1525                                                        wbc, io_type);
1526                if (!err)
1527                        return 0;
1528                f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1529        }
1530write:
1531        f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1532
1533        err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1534        f2fs_put_rpages_wbc(cc, wbc, false, 0);
1535destroy_out:
1536        f2fs_destroy_compress_ctx(cc);
1537        return err;
1538}
1539
1540static void f2fs_free_dic(struct decompress_io_ctx *dic);
1541
1542struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1543{
1544        struct decompress_io_ctx *dic;
1545        pgoff_t start_idx = start_idx_of_cluster(cc);
1546        int i;
1547
1548        dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
1549        if (!dic)
1550                return ERR_PTR(-ENOMEM);
1551
1552        dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1553        if (!dic->rpages) {
1554                kmem_cache_free(dic_entry_slab, dic);
1555                return ERR_PTR(-ENOMEM);
1556        }
1557
1558        dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1559        dic->inode = cc->inode;
1560        atomic_set(&dic->remaining_pages, cc->nr_cpages);
1561        dic->cluster_idx = cc->cluster_idx;
1562        dic->cluster_size = cc->cluster_size;
1563        dic->log_cluster_size = cc->log_cluster_size;
1564        dic->nr_cpages = cc->nr_cpages;
1565        refcount_set(&dic->refcnt, 1);
1566        dic->failed = false;
1567        dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1568
1569        for (i = 0; i < dic->cluster_size; i++)
1570                dic->rpages[i] = cc->rpages[i];
1571        dic->nr_rpages = cc->cluster_size;
1572
1573        dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1574        if (!dic->cpages)
1575                goto out_free;
1576
1577        for (i = 0; i < dic->nr_cpages; i++) {
1578                struct page *page;
1579
1580                page = f2fs_compress_alloc_page();
1581                if (!page)
1582                        goto out_free;
1583
1584                f2fs_set_compressed_page(page, cc->inode,
1585                                        start_idx + i + 1, dic);
1586                dic->cpages[i] = page;
1587        }
1588
1589        return dic;
1590
1591out_free:
1592        f2fs_free_dic(dic);
1593        return ERR_PTR(-ENOMEM);
1594}
1595
1596static void f2fs_free_dic(struct decompress_io_ctx *dic)
1597{
1598        int i;
1599
1600        if (dic->tpages) {
1601                for (i = 0; i < dic->cluster_size; i++) {
1602                        if (dic->rpages[i])
1603                                continue;
1604                        if (!dic->tpages[i])
1605                                continue;
1606                        f2fs_compress_free_page(dic->tpages[i]);
1607                }
1608                page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1609        }
1610
1611        if (dic->cpages) {
1612                for (i = 0; i < dic->nr_cpages; i++) {
1613                        if (!dic->cpages[i])
1614                                continue;
1615                        f2fs_compress_free_page(dic->cpages[i]);
1616                }
1617                page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1618        }
1619
1620        page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1621        kmem_cache_free(dic_entry_slab, dic);
1622}
1623
1624static void f2fs_put_dic(struct decompress_io_ctx *dic)
1625{
1626        if (refcount_dec_and_test(&dic->refcnt))
1627                f2fs_free_dic(dic);
1628}
1629
1630/*
1631 * Update and unlock the cluster's pagecache pages, and release the reference to
1632 * the decompress_io_ctx that was being held for I/O completion.
1633 */
1634static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1635{
1636        int i;
1637
1638        for (i = 0; i < dic->cluster_size; i++) {
1639                struct page *rpage = dic->rpages[i];
1640
1641                if (!rpage)
1642                        continue;
1643
1644                /* PG_error was set if verity failed. */
1645                if (failed || PageError(rpage)) {
1646                        ClearPageUptodate(rpage);
1647                        /* will re-read again later */
1648                        ClearPageError(rpage);
1649                } else {
1650                        SetPageUptodate(rpage);
1651                }
1652                unlock_page(rpage);
1653        }
1654
1655        f2fs_put_dic(dic);
1656}
1657
1658static void f2fs_verify_cluster(struct work_struct *work)
1659{
1660        struct decompress_io_ctx *dic =
1661                container_of(work, struct decompress_io_ctx, verity_work);
1662        int i;
1663
1664        /* Verify the cluster's decompressed pages with fs-verity. */
1665        for (i = 0; i < dic->cluster_size; i++) {
1666                struct page *rpage = dic->rpages[i];
1667
1668                if (rpage && !fsverity_verify_page(rpage))
1669                        SetPageError(rpage);
1670        }
1671
1672        __f2fs_decompress_end_io(dic, false);
1673}
1674
1675/*
1676 * This is called when a compressed cluster has been decompressed
1677 * (or failed to be read and/or decompressed).
1678 */
1679void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1680{
1681        if (!failed && dic->need_verity) {
1682                /*
1683                 * Note that to avoid deadlocks, the verity work can't be done
1684                 * on the decompression workqueue.  This is because verifying
1685                 * the data pages can involve reading metadata pages from the
1686                 * file, and these metadata pages may be compressed.
1687                 */
1688                INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1689                fsverity_enqueue_verify_work(&dic->verity_work);
1690        } else {
1691                __f2fs_decompress_end_io(dic, failed);
1692        }
1693}
1694
1695/*
1696 * Put a reference to a compressed page's decompress_io_ctx.
1697 *
1698 * This is called when the page is no longer needed and can be freed.
1699 */
1700void f2fs_put_page_dic(struct page *page)
1701{
1702        struct decompress_io_ctx *dic =
1703                        (struct decompress_io_ctx *)page_private(page);
1704
1705        f2fs_put_dic(dic);
1706}
1707
1708int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1709{
1710        dev_t dev = sbi->sb->s_bdev->bd_dev;
1711        char slab_name[32];
1712
1713        sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1714
1715        sbi->page_array_slab_size = sizeof(struct page *) <<
1716                                        F2FS_OPTION(sbi).compress_log_size;
1717
1718        sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1719                                        sbi->page_array_slab_size);
1720        if (!sbi->page_array_slab)
1721                return -ENOMEM;
1722        return 0;
1723}
1724
1725void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1726{
1727        kmem_cache_destroy(sbi->page_array_slab);
1728}
1729
1730static int __init f2fs_init_cic_cache(void)
1731{
1732        cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1733                                        sizeof(struct compress_io_ctx));
1734        if (!cic_entry_slab)
1735                return -ENOMEM;
1736        return 0;
1737}
1738
1739static void f2fs_destroy_cic_cache(void)
1740{
1741        kmem_cache_destroy(cic_entry_slab);
1742}
1743
1744static int __init f2fs_init_dic_cache(void)
1745{
1746        dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1747                                        sizeof(struct decompress_io_ctx));
1748        if (!dic_entry_slab)
1749                return -ENOMEM;
1750        return 0;
1751}
1752
1753static void f2fs_destroy_dic_cache(void)
1754{
1755        kmem_cache_destroy(dic_entry_slab);
1756}
1757
1758int __init f2fs_init_compress_cache(void)
1759{
1760        int err;
1761
1762        err = f2fs_init_cic_cache();
1763        if (err)
1764                goto out;
1765        err = f2fs_init_dic_cache();
1766        if (err)
1767                goto free_cic;
1768        return 0;
1769free_cic:
1770        f2fs_destroy_cic_cache();
1771out:
1772        return -ENOMEM;
1773}
1774
1775void f2fs_destroy_compress_cache(void)
1776{
1777        f2fs_destroy_dic_cache();
1778        f2fs_destroy_cic_cache();
1779}
1780