linux/fs/f2fs/compress.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * f2fs compress support
   4 *
   5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
   6 */
   7
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/writeback.h>
  11#include <linux/backing-dev.h>
  12#include <linux/lzo.h>
  13#include <linux/lz4.h>
  14#include <linux/zstd.h>
  15
  16#include "f2fs.h"
  17#include "node.h"
  18#include <trace/events/f2fs.h>
  19
  20static struct kmem_cache *cic_entry_slab;
  21static struct kmem_cache *dic_entry_slab;
  22
  23static void *page_array_alloc(struct inode *inode, int nr)
  24{
  25        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  26        unsigned int size = sizeof(struct page *) * nr;
  27
  28        if (likely(size <= sbi->page_array_slab_size))
  29                return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
  30        return f2fs_kzalloc(sbi, size, GFP_NOFS);
  31}
  32
  33static void page_array_free(struct inode *inode, void *pages, int nr)
  34{
  35        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  36        unsigned int size = sizeof(struct page *) * nr;
  37
  38        if (!pages)
  39                return;
  40
  41        if (likely(size <= sbi->page_array_slab_size))
  42                kmem_cache_free(sbi->page_array_slab, pages);
  43        else
  44                kfree(pages);
  45}
  46
  47struct f2fs_compress_ops {
  48        int (*init_compress_ctx)(struct compress_ctx *cc);
  49        void (*destroy_compress_ctx)(struct compress_ctx *cc);
  50        int (*compress_pages)(struct compress_ctx *cc);
  51        int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
  52        void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
  53        int (*decompress_pages)(struct decompress_io_ctx *dic);
  54};
  55
  56static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
  57{
  58        return index & (cc->cluster_size - 1);
  59}
  60
  61static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
  62{
  63        return index >> cc->log_cluster_size;
  64}
  65
  66static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
  67{
  68        return cc->cluster_idx << cc->log_cluster_size;
  69}
  70
  71bool f2fs_is_compressed_page(struct page *page)
  72{
  73        if (!PagePrivate(page))
  74                return false;
  75        if (!page_private(page))
  76                return false;
  77        if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
  78                return false;
  79        /*
  80         * page->private may be set with pid.
  81         * pid_max is enough to check if it is traced.
  82         */
  83        if (IS_IO_TRACED_PAGE(page))
  84                return false;
  85
  86        f2fs_bug_on(F2FS_M_SB(page->mapping),
  87                *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
  88        return true;
  89}
  90
  91static void f2fs_set_compressed_page(struct page *page,
  92                struct inode *inode, pgoff_t index, void *data)
  93{
  94        SetPagePrivate(page);
  95        set_page_private(page, (unsigned long)data);
  96
  97        /* i_crypto_info and iv index */
  98        page->index = index;
  99        page->mapping = inode->i_mapping;
 100}
 101
 102static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
 103{
 104        int i;
 105
 106        for (i = 0; i < len; i++) {
 107                if (!cc->rpages[i])
 108                        continue;
 109                if (unlock)
 110                        unlock_page(cc->rpages[i]);
 111                else
 112                        put_page(cc->rpages[i]);
 113        }
 114}
 115
 116static void f2fs_put_rpages(struct compress_ctx *cc)
 117{
 118        f2fs_drop_rpages(cc, cc->cluster_size, false);
 119}
 120
 121static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
 122{
 123        f2fs_drop_rpages(cc, len, true);
 124}
 125
 126static void f2fs_put_rpages_mapping(struct address_space *mapping,
 127                                pgoff_t start, int len)
 128{
 129        int i;
 130
 131        for (i = 0; i < len; i++) {
 132                struct page *page = find_get_page(mapping, start + i);
 133
 134                put_page(page);
 135                put_page(page);
 136        }
 137}
 138
 139static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
 140                struct writeback_control *wbc, bool redirty, int unlock)
 141{
 142        unsigned int i;
 143
 144        for (i = 0; i < cc->cluster_size; i++) {
 145                if (!cc->rpages[i])
 146                        continue;
 147                if (redirty)
 148                        redirty_page_for_writepage(wbc, cc->rpages[i]);
 149                f2fs_put_page(cc->rpages[i], unlock);
 150        }
 151}
 152
 153struct page *f2fs_compress_control_page(struct page *page)
 154{
 155        return ((struct compress_io_ctx *)page_private(page))->rpages[0];
 156}
 157
 158int f2fs_init_compress_ctx(struct compress_ctx *cc)
 159{
 160        if (cc->rpages)
 161                return 0;
 162
 163        cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
 164        return cc->rpages ? 0 : -ENOMEM;
 165}
 166
 167void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
 168{
 169        page_array_free(cc->inode, cc->rpages, cc->cluster_size);
 170        cc->rpages = NULL;
 171        cc->nr_rpages = 0;
 172        cc->nr_cpages = 0;
 173        cc->cluster_idx = NULL_CLUSTER;
 174}
 175
 176void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
 177{
 178        unsigned int cluster_ofs;
 179
 180        if (!f2fs_cluster_can_merge_page(cc, page->index))
 181                f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
 182
 183        cluster_ofs = offset_in_cluster(cc, page->index);
 184        cc->rpages[cluster_ofs] = page;
 185        cc->nr_rpages++;
 186        cc->cluster_idx = cluster_idx(cc, page->index);
 187}
 188
 189#ifdef CONFIG_F2FS_FS_LZO
 190static int lzo_init_compress_ctx(struct compress_ctx *cc)
 191{
 192        cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
 193                                LZO1X_MEM_COMPRESS, GFP_NOFS);
 194        if (!cc->private)
 195                return -ENOMEM;
 196
 197        cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
 198        return 0;
 199}
 200
 201static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
 202{
 203        kvfree(cc->private);
 204        cc->private = NULL;
 205}
 206
 207static int lzo_compress_pages(struct compress_ctx *cc)
 208{
 209        int ret;
 210
 211        ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
 212                                        &cc->clen, cc->private);
 213        if (ret != LZO_E_OK) {
 214                printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
 215                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
 216                return -EIO;
 217        }
 218        return 0;
 219}
 220
 221static int lzo_decompress_pages(struct decompress_io_ctx *dic)
 222{
 223        int ret;
 224
 225        ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
 226                                                dic->rbuf, &dic->rlen);
 227        if (ret != LZO_E_OK) {
 228                printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
 229                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
 230                return -EIO;
 231        }
 232
 233        if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
 234                printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
 235                                        "expected:%lu\n", KERN_ERR,
 236                                        F2FS_I_SB(dic->inode)->sb->s_id,
 237                                        dic->rlen,
 238                                        PAGE_SIZE << dic->log_cluster_size);
 239                return -EIO;
 240        }
 241        return 0;
 242}
 243
 244static const struct f2fs_compress_ops f2fs_lzo_ops = {
 245        .init_compress_ctx      = lzo_init_compress_ctx,
 246        .destroy_compress_ctx   = lzo_destroy_compress_ctx,
 247        .compress_pages         = lzo_compress_pages,
 248        .decompress_pages       = lzo_decompress_pages,
 249};
 250#endif
 251
 252#ifdef CONFIG_F2FS_FS_LZ4
 253static int lz4_init_compress_ctx(struct compress_ctx *cc)
 254{
 255        cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
 256                                LZ4_MEM_COMPRESS, GFP_NOFS);
 257        if (!cc->private)
 258                return -ENOMEM;
 259
 260        /*
 261         * we do not change cc->clen to LZ4_compressBound(inputsize) to
 262         * adapt worst compress case, because lz4 compressor can handle
 263         * output budget properly.
 264         */
 265        cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 266        return 0;
 267}
 268
 269static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
 270{
 271        kvfree(cc->private);
 272        cc->private = NULL;
 273}
 274
 275static int lz4_compress_pages(struct compress_ctx *cc)
 276{
 277        int len;
 278
 279        len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 280                                                cc->clen, cc->private);
 281        if (!len)
 282                return -EAGAIN;
 283
 284        cc->clen = len;
 285        return 0;
 286}
 287
 288static int lz4_decompress_pages(struct decompress_io_ctx *dic)
 289{
 290        int ret;
 291
 292        ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
 293                                                dic->clen, dic->rlen);
 294        if (ret < 0) {
 295                printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
 296                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
 297                return -EIO;
 298        }
 299
 300        if (ret != PAGE_SIZE << dic->log_cluster_size) {
 301                printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
 302                                        "expected:%lu\n", KERN_ERR,
 303                                        F2FS_I_SB(dic->inode)->sb->s_id,
 304                                        dic->rlen,
 305                                        PAGE_SIZE << dic->log_cluster_size);
 306                return -EIO;
 307        }
 308        return 0;
 309}
 310
 311static const struct f2fs_compress_ops f2fs_lz4_ops = {
 312        .init_compress_ctx      = lz4_init_compress_ctx,
 313        .destroy_compress_ctx   = lz4_destroy_compress_ctx,
 314        .compress_pages         = lz4_compress_pages,
 315        .decompress_pages       = lz4_decompress_pages,
 316};
 317#endif
 318
 319#ifdef CONFIG_F2FS_FS_ZSTD
 320#define F2FS_ZSTD_DEFAULT_CLEVEL        1
 321
 322static int zstd_init_compress_ctx(struct compress_ctx *cc)
 323{
 324        ZSTD_parameters params;
 325        ZSTD_CStream *stream;
 326        void *workspace;
 327        unsigned int workspace_size;
 328
 329        params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
 330        workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
 331
 332        workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
 333                                        workspace_size, GFP_NOFS);
 334        if (!workspace)
 335                return -ENOMEM;
 336
 337        stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
 338        if (!stream) {
 339                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
 340                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 341                                __func__);
 342                kvfree(workspace);
 343                return -EIO;
 344        }
 345
 346        cc->private = workspace;
 347        cc->private2 = stream;
 348
 349        cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 350        return 0;
 351}
 352
 353static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
 354{
 355        kvfree(cc->private);
 356        cc->private = NULL;
 357        cc->private2 = NULL;
 358}
 359
 360static int zstd_compress_pages(struct compress_ctx *cc)
 361{
 362        ZSTD_CStream *stream = cc->private2;
 363        ZSTD_inBuffer inbuf;
 364        ZSTD_outBuffer outbuf;
 365        int src_size = cc->rlen;
 366        int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 367        int ret;
 368
 369        inbuf.pos = 0;
 370        inbuf.src = cc->rbuf;
 371        inbuf.size = src_size;
 372
 373        outbuf.pos = 0;
 374        outbuf.dst = cc->cbuf->cdata;
 375        outbuf.size = dst_size;
 376
 377        ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
 378        if (ZSTD_isError(ret)) {
 379                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
 380                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 381                                __func__, ZSTD_getErrorCode(ret));
 382                return -EIO;
 383        }
 384
 385        ret = ZSTD_endStream(stream, &outbuf);
 386        if (ZSTD_isError(ret)) {
 387                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
 388                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 389                                __func__, ZSTD_getErrorCode(ret));
 390                return -EIO;
 391        }
 392
 393        /*
 394         * there is compressed data remained in intermediate buffer due to
 395         * no more space in cbuf.cdata
 396         */
 397        if (ret)
 398                return -EAGAIN;
 399
 400        cc->clen = outbuf.pos;
 401        return 0;
 402}
 403
 404static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
 405{
 406        ZSTD_DStream *stream;
 407        void *workspace;
 408        unsigned int workspace_size;
 409        unsigned int max_window_size =
 410                        MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
 411
 412        workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
 413
 414        workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
 415                                        workspace_size, GFP_NOFS);
 416        if (!workspace)
 417                return -ENOMEM;
 418
 419        stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
 420        if (!stream) {
 421                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
 422                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
 423                                __func__);
 424                kvfree(workspace);
 425                return -EIO;
 426        }
 427
 428        dic->private = workspace;
 429        dic->private2 = stream;
 430
 431        return 0;
 432}
 433
 434static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
 435{
 436        kvfree(dic->private);
 437        dic->private = NULL;
 438        dic->private2 = NULL;
 439}
 440
 441static int zstd_decompress_pages(struct decompress_io_ctx *dic)
 442{
 443        ZSTD_DStream *stream = dic->private2;
 444        ZSTD_inBuffer inbuf;
 445        ZSTD_outBuffer outbuf;
 446        int ret;
 447
 448        inbuf.pos = 0;
 449        inbuf.src = dic->cbuf->cdata;
 450        inbuf.size = dic->clen;
 451
 452        outbuf.pos = 0;
 453        outbuf.dst = dic->rbuf;
 454        outbuf.size = dic->rlen;
 455
 456        ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
 457        if (ZSTD_isError(ret)) {
 458                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
 459                                KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
 460                                __func__, ZSTD_getErrorCode(ret));
 461                return -EIO;
 462        }
 463
 464        if (dic->rlen != outbuf.pos) {
 465                printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
 466                                "expected:%lu\n", KERN_ERR,
 467                                F2FS_I_SB(dic->inode)->sb->s_id,
 468                                __func__, dic->rlen,
 469                                PAGE_SIZE << dic->log_cluster_size);
 470                return -EIO;
 471        }
 472
 473        return 0;
 474}
 475
 476static const struct f2fs_compress_ops f2fs_zstd_ops = {
 477        .init_compress_ctx      = zstd_init_compress_ctx,
 478        .destroy_compress_ctx   = zstd_destroy_compress_ctx,
 479        .compress_pages         = zstd_compress_pages,
 480        .init_decompress_ctx    = zstd_init_decompress_ctx,
 481        .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
 482        .decompress_pages       = zstd_decompress_pages,
 483};
 484#endif
 485
 486#ifdef CONFIG_F2FS_FS_LZO
 487#ifdef CONFIG_F2FS_FS_LZORLE
 488static int lzorle_compress_pages(struct compress_ctx *cc)
 489{
 490        int ret;
 491
 492        ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
 493                                        &cc->clen, cc->private);
 494        if (ret != LZO_E_OK) {
 495                printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
 496                                KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
 497                return -EIO;
 498        }
 499        return 0;
 500}
 501
 502static const struct f2fs_compress_ops f2fs_lzorle_ops = {
 503        .init_compress_ctx      = lzo_init_compress_ctx,
 504        .destroy_compress_ctx   = lzo_destroy_compress_ctx,
 505        .compress_pages         = lzorle_compress_pages,
 506        .decompress_pages       = lzo_decompress_pages,
 507};
 508#endif
 509#endif
 510
 511static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
 512#ifdef CONFIG_F2FS_FS_LZO
 513        &f2fs_lzo_ops,
 514#else
 515        NULL,
 516#endif
 517#ifdef CONFIG_F2FS_FS_LZ4
 518        &f2fs_lz4_ops,
 519#else
 520        NULL,
 521#endif
 522#ifdef CONFIG_F2FS_FS_ZSTD
 523        &f2fs_zstd_ops,
 524#else
 525        NULL,
 526#endif
 527#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
 528        &f2fs_lzorle_ops,
 529#else
 530        NULL,
 531#endif
 532};
 533
 534bool f2fs_is_compress_backend_ready(struct inode *inode)
 535{
 536        if (!f2fs_compressed_file(inode))
 537                return true;
 538        return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
 539}
 540
 541static mempool_t *compress_page_pool;
 542static int num_compress_pages = 512;
 543module_param(num_compress_pages, uint, 0444);
 544MODULE_PARM_DESC(num_compress_pages,
 545                "Number of intermediate compress pages to preallocate");
 546
 547int f2fs_init_compress_mempool(void)
 548{
 549        compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
 550        if (!compress_page_pool)
 551                return -ENOMEM;
 552
 553        return 0;
 554}
 555
 556void f2fs_destroy_compress_mempool(void)
 557{
 558        mempool_destroy(compress_page_pool);
 559}
 560
 561static struct page *f2fs_compress_alloc_page(void)
 562{
 563        struct page *page;
 564
 565        page = mempool_alloc(compress_page_pool, GFP_NOFS);
 566        lock_page(page);
 567
 568        return page;
 569}
 570
 571static void f2fs_compress_free_page(struct page *page)
 572{
 573        if (!page)
 574                return;
 575        set_page_private(page, (unsigned long)NULL);
 576        ClearPagePrivate(page);
 577        page->mapping = NULL;
 578        unlock_page(page);
 579        mempool_free(page, compress_page_pool);
 580}
 581
 582#define MAX_VMAP_RETRIES        3
 583
 584static void *f2fs_vmap(struct page **pages, unsigned int count)
 585{
 586        int i;
 587        void *buf = NULL;
 588
 589        for (i = 0; i < MAX_VMAP_RETRIES; i++) {
 590                buf = vm_map_ram(pages, count, -1);
 591                if (buf)
 592                        break;
 593                vm_unmap_aliases();
 594        }
 595        return buf;
 596}
 597
 598static int f2fs_compress_pages(struct compress_ctx *cc)
 599{
 600        struct f2fs_inode_info *fi = F2FS_I(cc->inode);
 601        const struct f2fs_compress_ops *cops =
 602                                f2fs_cops[fi->i_compress_algorithm];
 603        unsigned int max_len, new_nr_cpages;
 604        struct page **new_cpages;
 605        u32 chksum = 0;
 606        int i, ret;
 607
 608        trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
 609                                cc->cluster_size, fi->i_compress_algorithm);
 610
 611        if (cops->init_compress_ctx) {
 612                ret = cops->init_compress_ctx(cc);
 613                if (ret)
 614                        goto out;
 615        }
 616
 617        max_len = COMPRESS_HEADER_SIZE + cc->clen;
 618        cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
 619
 620        cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
 621        if (!cc->cpages) {
 622                ret = -ENOMEM;
 623                goto destroy_compress_ctx;
 624        }
 625
 626        for (i = 0; i < cc->nr_cpages; i++) {
 627                cc->cpages[i] = f2fs_compress_alloc_page();
 628                if (!cc->cpages[i]) {
 629                        ret = -ENOMEM;
 630                        goto out_free_cpages;
 631                }
 632        }
 633
 634        cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
 635        if (!cc->rbuf) {
 636                ret = -ENOMEM;
 637                goto out_free_cpages;
 638        }
 639
 640        cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
 641        if (!cc->cbuf) {
 642                ret = -ENOMEM;
 643                goto out_vunmap_rbuf;
 644        }
 645
 646        ret = cops->compress_pages(cc);
 647        if (ret)
 648                goto out_vunmap_cbuf;
 649
 650        max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
 651
 652        if (cc->clen > max_len) {
 653                ret = -EAGAIN;
 654                goto out_vunmap_cbuf;
 655        }
 656
 657        cc->cbuf->clen = cpu_to_le32(cc->clen);
 658
 659        if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
 660                chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
 661                                        cc->cbuf->cdata, cc->clen);
 662        cc->cbuf->chksum = cpu_to_le32(chksum);
 663
 664        for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
 665                cc->cbuf->reserved[i] = cpu_to_le32(0);
 666
 667        new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
 668
 669        /* Now we're going to cut unnecessary tail pages */
 670        new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
 671        if (!new_cpages) {
 672                ret = -ENOMEM;
 673                goto out_vunmap_cbuf;
 674        }
 675
 676        /* zero out any unused part of the last page */
 677        memset(&cc->cbuf->cdata[cc->clen], 0,
 678                        (new_nr_cpages * PAGE_SIZE) -
 679                        (cc->clen + COMPRESS_HEADER_SIZE));
 680
 681        vm_unmap_ram(cc->cbuf, cc->nr_cpages);
 682        vm_unmap_ram(cc->rbuf, cc->cluster_size);
 683
 684        for (i = 0; i < cc->nr_cpages; i++) {
 685                if (i < new_nr_cpages) {
 686                        new_cpages[i] = cc->cpages[i];
 687                        continue;
 688                }
 689                f2fs_compress_free_page(cc->cpages[i]);
 690                cc->cpages[i] = NULL;
 691        }
 692
 693        if (cops->destroy_compress_ctx)
 694                cops->destroy_compress_ctx(cc);
 695
 696        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
 697        cc->cpages = new_cpages;
 698        cc->nr_cpages = new_nr_cpages;
 699
 700        trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
 701                                                        cc->clen, ret);
 702        return 0;
 703
 704out_vunmap_cbuf:
 705        vm_unmap_ram(cc->cbuf, cc->nr_cpages);
 706out_vunmap_rbuf:
 707        vm_unmap_ram(cc->rbuf, cc->cluster_size);
 708out_free_cpages:
 709        for (i = 0; i < cc->nr_cpages; i++) {
 710                if (cc->cpages[i])
 711                        f2fs_compress_free_page(cc->cpages[i]);
 712        }
 713        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
 714        cc->cpages = NULL;
 715destroy_compress_ctx:
 716        if (cops->destroy_compress_ctx)
 717                cops->destroy_compress_ctx(cc);
 718out:
 719        trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
 720                                                        cc->clen, ret);
 721        return ret;
 722}
 723
 724void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
 725{
 726        struct decompress_io_ctx *dic =
 727                        (struct decompress_io_ctx *)page_private(page);
 728        struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
 729        struct f2fs_inode_info *fi= F2FS_I(dic->inode);
 730        const struct f2fs_compress_ops *cops =
 731                        f2fs_cops[fi->i_compress_algorithm];
 732        int ret;
 733        int i;
 734
 735        dec_page_count(sbi, F2FS_RD_DATA);
 736
 737        if (bio->bi_status || PageError(page))
 738                dic->failed = true;
 739
 740        if (atomic_dec_return(&dic->pending_pages))
 741                return;
 742
 743        trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
 744                                dic->cluster_size, fi->i_compress_algorithm);
 745
 746        /* submit partial compressed pages */
 747        if (dic->failed) {
 748                ret = -EIO;
 749                goto out_free_dic;
 750        }
 751
 752        dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
 753        if (!dic->tpages) {
 754                ret = -ENOMEM;
 755                goto out_free_dic;
 756        }
 757
 758        for (i = 0; i < dic->cluster_size; i++) {
 759                if (dic->rpages[i]) {
 760                        dic->tpages[i] = dic->rpages[i];
 761                        continue;
 762                }
 763
 764                dic->tpages[i] = f2fs_compress_alloc_page();
 765                if (!dic->tpages[i]) {
 766                        ret = -ENOMEM;
 767                        goto out_free_dic;
 768                }
 769        }
 770
 771        if (cops->init_decompress_ctx) {
 772                ret = cops->init_decompress_ctx(dic);
 773                if (ret)
 774                        goto out_free_dic;
 775        }
 776
 777        dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
 778        if (!dic->rbuf) {
 779                ret = -ENOMEM;
 780                goto destroy_decompress_ctx;
 781        }
 782
 783        dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
 784        if (!dic->cbuf) {
 785                ret = -ENOMEM;
 786                goto out_vunmap_rbuf;
 787        }
 788
 789        dic->clen = le32_to_cpu(dic->cbuf->clen);
 790        dic->rlen = PAGE_SIZE << dic->log_cluster_size;
 791
 792        if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
 793                ret = -EFSCORRUPTED;
 794                goto out_vunmap_cbuf;
 795        }
 796
 797        ret = cops->decompress_pages(dic);
 798
 799        if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
 800                u32 provided = le32_to_cpu(dic->cbuf->chksum);
 801                u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
 802
 803                if (provided != calculated) {
 804                        if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
 805                                set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
 806                                printk_ratelimited(
 807                                        "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
 808                                        KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
 809                                        provided, calculated);
 810                        }
 811                        set_sbi_flag(sbi, SBI_NEED_FSCK);
 812                }
 813        }
 814
 815out_vunmap_cbuf:
 816        vm_unmap_ram(dic->cbuf, dic->nr_cpages);
 817out_vunmap_rbuf:
 818        vm_unmap_ram(dic->rbuf, dic->cluster_size);
 819destroy_decompress_ctx:
 820        if (cops->destroy_decompress_ctx)
 821                cops->destroy_decompress_ctx(dic);
 822out_free_dic:
 823        if (!verity)
 824                f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
 825                                                                ret, false);
 826
 827        trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
 828                                                        dic->clen, ret);
 829        if (!verity)
 830                f2fs_free_dic(dic);
 831}
 832
 833static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
 834{
 835        if (cc->cluster_idx == NULL_CLUSTER)
 836                return true;
 837        return cc->cluster_idx == cluster_idx(cc, index);
 838}
 839
 840bool f2fs_cluster_is_empty(struct compress_ctx *cc)
 841{
 842        return cc->nr_rpages == 0;
 843}
 844
 845static bool f2fs_cluster_is_full(struct compress_ctx *cc)
 846{
 847        return cc->cluster_size == cc->nr_rpages;
 848}
 849
 850bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
 851{
 852        if (f2fs_cluster_is_empty(cc))
 853                return true;
 854        return is_page_in_cluster(cc, index);
 855}
 856
 857static bool __cluster_may_compress(struct compress_ctx *cc)
 858{
 859        struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
 860        loff_t i_size = i_size_read(cc->inode);
 861        unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
 862        int i;
 863
 864        for (i = 0; i < cc->cluster_size; i++) {
 865                struct page *page = cc->rpages[i];
 866
 867                f2fs_bug_on(sbi, !page);
 868
 869                if (unlikely(f2fs_cp_error(sbi)))
 870                        return false;
 871                if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 872                        return false;
 873
 874                /* beyond EOF */
 875                if (page->index >= nr_pages)
 876                        return false;
 877        }
 878        return true;
 879}
 880
 881static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
 882{
 883        struct dnode_of_data dn;
 884        int ret;
 885
 886        set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
 887        ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
 888                                                        LOOKUP_NODE);
 889        if (ret) {
 890                if (ret == -ENOENT)
 891                        ret = 0;
 892                goto fail;
 893        }
 894
 895        if (dn.data_blkaddr == COMPRESS_ADDR) {
 896                int i;
 897
 898                ret = 1;
 899                for (i = 1; i < cc->cluster_size; i++) {
 900                        block_t blkaddr;
 901
 902                        blkaddr = data_blkaddr(dn.inode,
 903                                        dn.node_page, dn.ofs_in_node + i);
 904                        if (compr) {
 905                                if (__is_valid_data_blkaddr(blkaddr))
 906                                        ret++;
 907                        } else {
 908                                if (blkaddr != NULL_ADDR)
 909                                        ret++;
 910                        }
 911                }
 912        }
 913fail:
 914        f2fs_put_dnode(&dn);
 915        return ret;
 916}
 917
 918/* return # of compressed blocks in compressed cluster */
 919static int f2fs_compressed_blocks(struct compress_ctx *cc)
 920{
 921        return __f2fs_cluster_blocks(cc, true);
 922}
 923
 924/* return # of valid blocks in compressed cluster */
 925static int f2fs_cluster_blocks(struct compress_ctx *cc)
 926{
 927        return __f2fs_cluster_blocks(cc, false);
 928}
 929
 930int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
 931{
 932        struct compress_ctx cc = {
 933                .inode = inode,
 934                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
 935                .cluster_size = F2FS_I(inode)->i_cluster_size,
 936                .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
 937        };
 938
 939        return f2fs_cluster_blocks(&cc);
 940}
 941
 942static bool cluster_may_compress(struct compress_ctx *cc)
 943{
 944        if (!f2fs_need_compress_data(cc->inode))
 945                return false;
 946        if (f2fs_is_atomic_file(cc->inode))
 947                return false;
 948        if (f2fs_is_mmap_file(cc->inode))
 949                return false;
 950        if (!f2fs_cluster_is_full(cc))
 951                return false;
 952        if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
 953                return false;
 954        return __cluster_may_compress(cc);
 955}
 956
 957static void set_cluster_writeback(struct compress_ctx *cc)
 958{
 959        int i;
 960
 961        for (i = 0; i < cc->cluster_size; i++) {
 962                if (cc->rpages[i])
 963                        set_page_writeback(cc->rpages[i]);
 964        }
 965}
 966
 967static void set_cluster_dirty(struct compress_ctx *cc)
 968{
 969        int i;
 970
 971        for (i = 0; i < cc->cluster_size; i++)
 972                if (cc->rpages[i])
 973                        set_page_dirty(cc->rpages[i]);
 974}
 975
 976static int prepare_compress_overwrite(struct compress_ctx *cc,
 977                struct page **pagep, pgoff_t index, void **fsdata)
 978{
 979        struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
 980        struct address_space *mapping = cc->inode->i_mapping;
 981        struct page *page;
 982        struct dnode_of_data dn;
 983        sector_t last_block_in_bio;
 984        unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
 985        pgoff_t start_idx = start_idx_of_cluster(cc);
 986        int i, ret;
 987        bool prealloc;
 988
 989retry:
 990        ret = f2fs_cluster_blocks(cc);
 991        if (ret <= 0)
 992                return ret;
 993
 994        /* compressed case */
 995        prealloc = (ret < cc->cluster_size);
 996
 997        ret = f2fs_init_compress_ctx(cc);
 998        if (ret)
 999                return ret;
1000
1001        /* keep page reference to avoid page reclaim */
1002        for (i = 0; i < cc->cluster_size; i++) {
1003                page = f2fs_pagecache_get_page(mapping, start_idx + i,
1004                                                        fgp_flag, GFP_NOFS);
1005                if (!page) {
1006                        ret = -ENOMEM;
1007                        goto unlock_pages;
1008                }
1009
1010                if (PageUptodate(page))
1011                        unlock_page(page);
1012                else
1013                        f2fs_compress_ctx_add_page(cc, page);
1014        }
1015
1016        if (!f2fs_cluster_is_empty(cc)) {
1017                struct bio *bio = NULL;
1018
1019                ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1020                                        &last_block_in_bio, false, true);
1021                f2fs_destroy_compress_ctx(cc);
1022                if (ret)
1023                        goto release_pages;
1024                if (bio)
1025                        f2fs_submit_bio(sbi, bio, DATA);
1026
1027                ret = f2fs_init_compress_ctx(cc);
1028                if (ret)
1029                        goto release_pages;
1030        }
1031
1032        for (i = 0; i < cc->cluster_size; i++) {
1033                f2fs_bug_on(sbi, cc->rpages[i]);
1034
1035                page = find_lock_page(mapping, start_idx + i);
1036                f2fs_bug_on(sbi, !page);
1037
1038                f2fs_wait_on_page_writeback(page, DATA, true, true);
1039
1040                f2fs_compress_ctx_add_page(cc, page);
1041                f2fs_put_page(page, 0);
1042
1043                if (!PageUptodate(page)) {
1044                        f2fs_unlock_rpages(cc, i + 1);
1045                        f2fs_put_rpages_mapping(mapping, start_idx,
1046                                        cc->cluster_size);
1047                        f2fs_destroy_compress_ctx(cc);
1048                        goto retry;
1049                }
1050        }
1051
1052        if (prealloc) {
1053                f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
1054
1055                set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1056
1057                for (i = cc->cluster_size - 1; i > 0; i--) {
1058                        ret = f2fs_get_block(&dn, start_idx + i);
1059                        if (ret) {
1060                                i = cc->cluster_size;
1061                                break;
1062                        }
1063
1064                        if (dn.data_blkaddr != NEW_ADDR)
1065                                break;
1066                }
1067
1068                f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
1069        }
1070
1071        if (likely(!ret)) {
1072                *fsdata = cc->rpages;
1073                *pagep = cc->rpages[offset_in_cluster(cc, index)];
1074                return cc->cluster_size;
1075        }
1076
1077unlock_pages:
1078        f2fs_unlock_rpages(cc, i);
1079release_pages:
1080        f2fs_put_rpages_mapping(mapping, start_idx, i);
1081        f2fs_destroy_compress_ctx(cc);
1082        return ret;
1083}
1084
1085int f2fs_prepare_compress_overwrite(struct inode *inode,
1086                struct page **pagep, pgoff_t index, void **fsdata)
1087{
1088        struct compress_ctx cc = {
1089                .inode = inode,
1090                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1091                .cluster_size = F2FS_I(inode)->i_cluster_size,
1092                .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1093                .rpages = NULL,
1094                .nr_rpages = 0,
1095        };
1096
1097        return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1098}
1099
1100bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1101                                        pgoff_t index, unsigned copied)
1102
1103{
1104        struct compress_ctx cc = {
1105                .inode = inode,
1106                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1107                .cluster_size = F2FS_I(inode)->i_cluster_size,
1108                .rpages = fsdata,
1109        };
1110        bool first_index = (index == cc.rpages[0]->index);
1111
1112        if (copied)
1113                set_cluster_dirty(&cc);
1114
1115        f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1116        f2fs_destroy_compress_ctx(&cc);
1117
1118        return first_index;
1119}
1120
1121int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1122{
1123        void *fsdata = NULL;
1124        struct page *pagep;
1125        int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1126        pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1127                                                        log_cluster_size;
1128        int err;
1129
1130        err = f2fs_is_compressed_cluster(inode, start_idx);
1131        if (err < 0)
1132                return err;
1133
1134        /* truncate normal cluster */
1135        if (!err)
1136                return f2fs_do_truncate_blocks(inode, from, lock);
1137
1138        /* truncate compressed cluster */
1139        err = f2fs_prepare_compress_overwrite(inode, &pagep,
1140                                                start_idx, &fsdata);
1141
1142        /* should not be a normal cluster */
1143        f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1144
1145        if (err <= 0)
1146                return err;
1147
1148        if (err > 0) {
1149                struct page **rpages = fsdata;
1150                int cluster_size = F2FS_I(inode)->i_cluster_size;
1151                int i;
1152
1153                for (i = cluster_size - 1; i >= 0; i--) {
1154                        loff_t start = rpages[i]->index << PAGE_SHIFT;
1155
1156                        if (from <= start) {
1157                                zero_user_segment(rpages[i], 0, PAGE_SIZE);
1158                        } else {
1159                                zero_user_segment(rpages[i], from - start,
1160                                                                PAGE_SIZE);
1161                                break;
1162                        }
1163                }
1164
1165                f2fs_compress_write_end(inode, fsdata, start_idx, true);
1166        }
1167        return 0;
1168}
1169
1170static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1171                                        int *submitted,
1172                                        struct writeback_control *wbc,
1173                                        enum iostat_type io_type)
1174{
1175        struct inode *inode = cc->inode;
1176        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1177        struct f2fs_inode_info *fi = F2FS_I(inode);
1178        struct f2fs_io_info fio = {
1179                .sbi = sbi,
1180                .ino = cc->inode->i_ino,
1181                .type = DATA,
1182                .op = REQ_OP_WRITE,
1183                .op_flags = wbc_to_write_flags(wbc),
1184                .old_blkaddr = NEW_ADDR,
1185                .page = NULL,
1186                .encrypted_page = NULL,
1187                .compressed_page = NULL,
1188                .submitted = false,
1189                .io_type = io_type,
1190                .io_wbc = wbc,
1191                .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1192        };
1193        struct dnode_of_data dn;
1194        struct node_info ni;
1195        struct compress_io_ctx *cic;
1196        pgoff_t start_idx = start_idx_of_cluster(cc);
1197        unsigned int last_index = cc->cluster_size - 1;
1198        loff_t psize;
1199        int i, err;
1200
1201        if (IS_NOQUOTA(inode)) {
1202                /*
1203                 * We need to wait for node_write to avoid block allocation during
1204                 * checkpoint. This can only happen to quota writes which can cause
1205                 * the below discard race condition.
1206                 */
1207                down_read(&sbi->node_write);
1208        } else if (!f2fs_trylock_op(sbi)) {
1209                goto out_free;
1210        }
1211
1212        set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1213
1214        err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1215        if (err)
1216                goto out_unlock_op;
1217
1218        for (i = 0; i < cc->cluster_size; i++) {
1219                if (data_blkaddr(dn.inode, dn.node_page,
1220                                        dn.ofs_in_node + i) == NULL_ADDR)
1221                        goto out_put_dnode;
1222        }
1223
1224        psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1225
1226        err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1227        if (err)
1228                goto out_put_dnode;
1229
1230        fio.version = ni.version;
1231
1232        cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
1233        if (!cic)
1234                goto out_put_dnode;
1235
1236        cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1237        cic->inode = inode;
1238        atomic_set(&cic->pending_pages, cc->nr_cpages);
1239        cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1240        if (!cic->rpages)
1241                goto out_put_cic;
1242
1243        cic->nr_rpages = cc->cluster_size;
1244
1245        for (i = 0; i < cc->nr_cpages; i++) {
1246                f2fs_set_compressed_page(cc->cpages[i], inode,
1247                                        cc->rpages[i + 1]->index, cic);
1248                fio.compressed_page = cc->cpages[i];
1249
1250                fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1251                                                dn.ofs_in_node + i + 1);
1252
1253                /* wait for GCed page writeback via META_MAPPING */
1254                f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1255
1256                if (fio.encrypted) {
1257                        fio.page = cc->rpages[i + 1];
1258                        err = f2fs_encrypt_one_page(&fio);
1259                        if (err)
1260                                goto out_destroy_crypt;
1261                        cc->cpages[i] = fio.encrypted_page;
1262                }
1263        }
1264
1265        set_cluster_writeback(cc);
1266
1267        for (i = 0; i < cc->cluster_size; i++)
1268                cic->rpages[i] = cc->rpages[i];
1269
1270        for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1271                block_t blkaddr;
1272
1273                blkaddr = f2fs_data_blkaddr(&dn);
1274                fio.page = cc->rpages[i];
1275                fio.old_blkaddr = blkaddr;
1276
1277                /* cluster header */
1278                if (i == 0) {
1279                        if (blkaddr == COMPRESS_ADDR)
1280                                fio.compr_blocks++;
1281                        if (__is_valid_data_blkaddr(blkaddr))
1282                                f2fs_invalidate_blocks(sbi, blkaddr);
1283                        f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1284                        goto unlock_continue;
1285                }
1286
1287                if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1288                        fio.compr_blocks++;
1289
1290                if (i > cc->nr_cpages) {
1291                        if (__is_valid_data_blkaddr(blkaddr)) {
1292                                f2fs_invalidate_blocks(sbi, blkaddr);
1293                                f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1294                        }
1295                        goto unlock_continue;
1296                }
1297
1298                f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1299
1300                if (fio.encrypted)
1301                        fio.encrypted_page = cc->cpages[i - 1];
1302                else
1303                        fio.compressed_page = cc->cpages[i - 1];
1304
1305                cc->cpages[i - 1] = NULL;
1306                f2fs_outplace_write_data(&dn, &fio);
1307                (*submitted)++;
1308unlock_continue:
1309                inode_dec_dirty_pages(cc->inode);
1310                unlock_page(fio.page);
1311        }
1312
1313        if (fio.compr_blocks)
1314                f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1315        f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1316
1317        set_inode_flag(cc->inode, FI_APPEND_WRITE);
1318        if (cc->cluster_idx == 0)
1319                set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1320
1321        f2fs_put_dnode(&dn);
1322        if (IS_NOQUOTA(inode))
1323                up_read(&sbi->node_write);
1324        else
1325                f2fs_unlock_op(sbi);
1326
1327        spin_lock(&fi->i_size_lock);
1328        if (fi->last_disk_size < psize)
1329                fi->last_disk_size = psize;
1330        spin_unlock(&fi->i_size_lock);
1331
1332        f2fs_put_rpages(cc);
1333        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1334        cc->cpages = NULL;
1335        f2fs_destroy_compress_ctx(cc);
1336        return 0;
1337
1338out_destroy_crypt:
1339        page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1340
1341        for (--i; i >= 0; i--)
1342                fscrypt_finalize_bounce_page(&cc->cpages[i]);
1343        for (i = 0; i < cc->nr_cpages; i++) {
1344                if (!cc->cpages[i])
1345                        continue;
1346                f2fs_put_page(cc->cpages[i], 1);
1347        }
1348out_put_cic:
1349        kmem_cache_free(cic_entry_slab, cic);
1350out_put_dnode:
1351        f2fs_put_dnode(&dn);
1352out_unlock_op:
1353        if (IS_NOQUOTA(inode))
1354                up_read(&sbi->node_write);
1355        else
1356                f2fs_unlock_op(sbi);
1357out_free:
1358        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1359        cc->cpages = NULL;
1360        return -EAGAIN;
1361}
1362
1363void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1364{
1365        struct f2fs_sb_info *sbi = bio->bi_private;
1366        struct compress_io_ctx *cic =
1367                        (struct compress_io_ctx *)page_private(page);
1368        int i;
1369
1370        if (unlikely(bio->bi_status))
1371                mapping_set_error(cic->inode->i_mapping, -EIO);
1372
1373        f2fs_compress_free_page(page);
1374
1375        dec_page_count(sbi, F2FS_WB_DATA);
1376
1377        if (atomic_dec_return(&cic->pending_pages))
1378                return;
1379
1380        for (i = 0; i < cic->nr_rpages; i++) {
1381                WARN_ON(!cic->rpages[i]);
1382                clear_cold_data(cic->rpages[i]);
1383                end_page_writeback(cic->rpages[i]);
1384        }
1385
1386        page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1387        kmem_cache_free(cic_entry_slab, cic);
1388}
1389
1390static int f2fs_write_raw_pages(struct compress_ctx *cc,
1391                                        int *submitted,
1392                                        struct writeback_control *wbc,
1393                                        enum iostat_type io_type)
1394{
1395        struct address_space *mapping = cc->inode->i_mapping;
1396        int _submitted, compr_blocks, ret;
1397        int i = -1, err = 0;
1398
1399        compr_blocks = f2fs_compressed_blocks(cc);
1400        if (compr_blocks < 0) {
1401                err = compr_blocks;
1402                goto out_err;
1403        }
1404
1405        for (i = 0; i < cc->cluster_size; i++) {
1406                if (!cc->rpages[i])
1407                        continue;
1408retry_write:
1409                if (cc->rpages[i]->mapping != mapping) {
1410                        unlock_page(cc->rpages[i]);
1411                        continue;
1412                }
1413
1414                BUG_ON(!PageLocked(cc->rpages[i]));
1415
1416                ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1417                                                NULL, NULL, wbc, io_type,
1418                                                compr_blocks);
1419                if (ret) {
1420                        if (ret == AOP_WRITEPAGE_ACTIVATE) {
1421                                unlock_page(cc->rpages[i]);
1422                                ret = 0;
1423                        } else if (ret == -EAGAIN) {
1424                                /*
1425                                 * for quota file, just redirty left pages to
1426                                 * avoid deadlock caused by cluster update race
1427                                 * from foreground operation.
1428                                 */
1429                                if (IS_NOQUOTA(cc->inode)) {
1430                                        err = 0;
1431                                        goto out_err;
1432                                }
1433                                ret = 0;
1434                                cond_resched();
1435                                congestion_wait(BLK_RW_ASYNC,
1436                                                DEFAULT_IO_TIMEOUT);
1437                                lock_page(cc->rpages[i]);
1438
1439                                if (!PageDirty(cc->rpages[i])) {
1440                                        unlock_page(cc->rpages[i]);
1441                                        continue;
1442                                }
1443
1444                                clear_page_dirty_for_io(cc->rpages[i]);
1445                                goto retry_write;
1446                        }
1447                        err = ret;
1448                        goto out_err;
1449                }
1450
1451                *submitted += _submitted;
1452        }
1453        return 0;
1454out_err:
1455        for (++i; i < cc->cluster_size; i++) {
1456                if (!cc->rpages[i])
1457                        continue;
1458                redirty_page_for_writepage(wbc, cc->rpages[i]);
1459                unlock_page(cc->rpages[i]);
1460        }
1461        return err;
1462}
1463
1464int f2fs_write_multi_pages(struct compress_ctx *cc,
1465                                        int *submitted,
1466                                        struct writeback_control *wbc,
1467                                        enum iostat_type io_type)
1468{
1469        int err;
1470
1471        *submitted = 0;
1472        if (cluster_may_compress(cc)) {
1473                err = f2fs_compress_pages(cc);
1474                if (err == -EAGAIN) {
1475                        goto write;
1476                } else if (err) {
1477                        f2fs_put_rpages_wbc(cc, wbc, true, 1);
1478                        goto destroy_out;
1479                }
1480
1481                err = f2fs_write_compressed_pages(cc, submitted,
1482                                                        wbc, io_type);
1483                if (!err)
1484                        return 0;
1485                f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1486        }
1487write:
1488        f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1489
1490        err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1491        f2fs_put_rpages_wbc(cc, wbc, false, 0);
1492destroy_out:
1493        f2fs_destroy_compress_ctx(cc);
1494        return err;
1495}
1496
1497struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1498{
1499        struct decompress_io_ctx *dic;
1500        pgoff_t start_idx = start_idx_of_cluster(cc);
1501        int i;
1502
1503        dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
1504        if (!dic)
1505                return ERR_PTR(-ENOMEM);
1506
1507        dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1508        if (!dic->rpages) {
1509                kmem_cache_free(dic_entry_slab, dic);
1510                return ERR_PTR(-ENOMEM);
1511        }
1512
1513        dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1514        dic->inode = cc->inode;
1515        atomic_set(&dic->pending_pages, cc->nr_cpages);
1516        dic->cluster_idx = cc->cluster_idx;
1517        dic->cluster_size = cc->cluster_size;
1518        dic->log_cluster_size = cc->log_cluster_size;
1519        dic->nr_cpages = cc->nr_cpages;
1520        dic->failed = false;
1521
1522        for (i = 0; i < dic->cluster_size; i++)
1523                dic->rpages[i] = cc->rpages[i];
1524        dic->nr_rpages = cc->cluster_size;
1525
1526        dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1527        if (!dic->cpages)
1528                goto out_free;
1529
1530        for (i = 0; i < dic->nr_cpages; i++) {
1531                struct page *page;
1532
1533                page = f2fs_compress_alloc_page();
1534                if (!page)
1535                        goto out_free;
1536
1537                f2fs_set_compressed_page(page, cc->inode,
1538                                        start_idx + i + 1, dic);
1539                dic->cpages[i] = page;
1540        }
1541
1542        return dic;
1543
1544out_free:
1545        f2fs_free_dic(dic);
1546        return ERR_PTR(-ENOMEM);
1547}
1548
1549void f2fs_free_dic(struct decompress_io_ctx *dic)
1550{
1551        int i;
1552
1553        if (dic->tpages) {
1554                for (i = 0; i < dic->cluster_size; i++) {
1555                        if (dic->rpages[i])
1556                                continue;
1557                        if (!dic->tpages[i])
1558                                continue;
1559                        f2fs_compress_free_page(dic->tpages[i]);
1560                }
1561                page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1562        }
1563
1564        if (dic->cpages) {
1565                for (i = 0; i < dic->nr_cpages; i++) {
1566                        if (!dic->cpages[i])
1567                                continue;
1568                        f2fs_compress_free_page(dic->cpages[i]);
1569                }
1570                page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1571        }
1572
1573        page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1574        kmem_cache_free(dic_entry_slab, dic);
1575}
1576
1577void f2fs_decompress_end_io(struct page **rpages,
1578                        unsigned int cluster_size, bool err, bool verity)
1579{
1580        int i;
1581
1582        for (i = 0; i < cluster_size; i++) {
1583                struct page *rpage = rpages[i];
1584
1585                if (!rpage)
1586                        continue;
1587
1588                if (err || PageError(rpage))
1589                        goto clear_uptodate;
1590
1591                if (!verity || fsverity_verify_page(rpage)) {
1592                        SetPageUptodate(rpage);
1593                        goto unlock;
1594                }
1595clear_uptodate:
1596                ClearPageUptodate(rpage);
1597                ClearPageError(rpage);
1598unlock:
1599                unlock_page(rpage);
1600        }
1601}
1602
1603int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1604{
1605        dev_t dev = sbi->sb->s_bdev->bd_dev;
1606        char slab_name[32];
1607
1608        sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1609
1610        sbi->page_array_slab_size = sizeof(struct page *) <<
1611                                        F2FS_OPTION(sbi).compress_log_size;
1612
1613        sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1614                                        sbi->page_array_slab_size);
1615        if (!sbi->page_array_slab)
1616                return -ENOMEM;
1617        return 0;
1618}
1619
1620void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1621{
1622        kmem_cache_destroy(sbi->page_array_slab);
1623}
1624
1625static int __init f2fs_init_cic_cache(void)
1626{
1627        cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1628                                        sizeof(struct compress_io_ctx));
1629        if (!cic_entry_slab)
1630                return -ENOMEM;
1631        return 0;
1632}
1633
1634static void f2fs_destroy_cic_cache(void)
1635{
1636        kmem_cache_destroy(cic_entry_slab);
1637}
1638
1639static int __init f2fs_init_dic_cache(void)
1640{
1641        dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1642                                        sizeof(struct decompress_io_ctx));
1643        if (!dic_entry_slab)
1644                return -ENOMEM;
1645        return 0;
1646}
1647
1648static void f2fs_destroy_dic_cache(void)
1649{
1650        kmem_cache_destroy(dic_entry_slab);
1651}
1652
1653int __init f2fs_init_compress_cache(void)
1654{
1655        int err;
1656
1657        err = f2fs_init_cic_cache();
1658        if (err)
1659                goto out;
1660        err = f2fs_init_dic_cache();
1661        if (err)
1662                goto free_cic;
1663        return 0;
1664free_cic:
1665        f2fs_destroy_cic_cache();
1666out:
1667        return -ENOMEM;
1668}
1669
1670void f2fs_destroy_compress_cache(void)
1671{
1672        f2fs_destroy_dic_cache();
1673        f2fs_destroy_cic_cache();
1674}
1675