linux/fs/f2fs/data.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/data.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/buffer_head.h>
  11#include <linux/mpage.h>
  12#include <linux/writeback.h>
  13#include <linux/backing-dev.h>
  14#include <linux/pagevec.h>
  15#include <linux/blkdev.h>
  16#include <linux/bio.h>
  17#include <linux/swap.h>
  18#include <linux/prefetch.h>
  19#include <linux/uio.h>
  20#include <linux/cleancache.h>
  21#include <linux/sched/signal.h>
  22#include <linux/fiemap.h>
  23
  24#include "f2fs.h"
  25#include "node.h"
  26#include "segment.h"
  27#include "trace.h"
  28#include <trace/events/f2fs.h>
  29
  30#define NUM_PREALLOC_POST_READ_CTXS     128
  31
  32static struct kmem_cache *bio_post_read_ctx_cache;
  33static struct kmem_cache *bio_entry_slab;
  34static mempool_t *bio_post_read_ctx_pool;
  35static struct bio_set f2fs_bioset;
  36
  37#define F2FS_BIO_POOL_SIZE      NR_CURSEG_TYPE
  38
  39int __init f2fs_init_bioset(void)
  40{
  41        if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
  42                                        0, BIOSET_NEED_BVECS))
  43                return -ENOMEM;
  44        return 0;
  45}
  46
  47void f2fs_destroy_bioset(void)
  48{
  49        bioset_exit(&f2fs_bioset);
  50}
  51
  52static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
  53                                                unsigned int nr_iovecs)
  54{
  55        return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
  56}
  57
  58struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio)
  59{
  60        if (noio) {
  61                /* No failure on bio allocation */
  62                return __f2fs_bio_alloc(GFP_NOIO, npages);
  63        }
  64
  65        if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
  66                f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
  67                return NULL;
  68        }
  69
  70        return __f2fs_bio_alloc(GFP_KERNEL, npages);
  71}
  72
  73static bool __is_cp_guaranteed(struct page *page)
  74{
  75        struct address_space *mapping = page->mapping;
  76        struct inode *inode;
  77        struct f2fs_sb_info *sbi;
  78
  79        if (!mapping)
  80                return false;
  81
  82        if (f2fs_is_compressed_page(page))
  83                return false;
  84
  85        inode = mapping->host;
  86        sbi = F2FS_I_SB(inode);
  87
  88        if (inode->i_ino == F2FS_META_INO(sbi) ||
  89                        inode->i_ino ==  F2FS_NODE_INO(sbi) ||
  90                        S_ISDIR(inode->i_mode) ||
  91                        (S_ISREG(inode->i_mode) &&
  92                        (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
  93                        is_cold_data(page))
  94                return true;
  95        return false;
  96}
  97
  98static enum count_type __read_io_type(struct page *page)
  99{
 100        struct address_space *mapping = page_file_mapping(page);
 101
 102        if (mapping) {
 103                struct inode *inode = mapping->host;
 104                struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 105
 106                if (inode->i_ino == F2FS_META_INO(sbi))
 107                        return F2FS_RD_META;
 108
 109                if (inode->i_ino == F2FS_NODE_INO(sbi))
 110                        return F2FS_RD_NODE;
 111        }
 112        return F2FS_RD_DATA;
 113}
 114
 115/* postprocessing steps for read bios */
 116enum bio_post_read_step {
 117        STEP_DECRYPT,
 118        STEP_DECOMPRESS_NOWQ,           /* handle normal cluster data inplace */
 119        STEP_DECOMPRESS,                /* handle compressed cluster data in workqueue */
 120        STEP_VERITY,
 121};
 122
 123struct bio_post_read_ctx {
 124        struct bio *bio;
 125        struct f2fs_sb_info *sbi;
 126        struct work_struct work;
 127        unsigned int enabled_steps;
 128};
 129
 130static void __read_end_io(struct bio *bio, bool compr, bool verity)
 131{
 132        struct page *page;
 133        struct bio_vec *bv;
 134        struct bvec_iter_all iter_all;
 135
 136        bio_for_each_segment_all(bv, bio, iter_all) {
 137                page = bv->bv_page;
 138
 139#ifdef CONFIG_F2FS_FS_COMPRESSION
 140                if (compr && f2fs_is_compressed_page(page)) {
 141                        f2fs_decompress_pages(bio, page, verity);
 142                        continue;
 143                }
 144                if (verity)
 145                        continue;
 146#endif
 147
 148                /* PG_error was set if any post_read step failed */
 149                if (bio->bi_status || PageError(page)) {
 150                        ClearPageUptodate(page);
 151                        /* will re-read again later */
 152                        ClearPageError(page);
 153                } else {
 154                        SetPageUptodate(page);
 155                }
 156                dec_page_count(F2FS_P_SB(page), __read_io_type(page));
 157                unlock_page(page);
 158        }
 159}
 160
 161static void f2fs_release_read_bio(struct bio *bio);
 162static void __f2fs_read_end_io(struct bio *bio, bool compr, bool verity)
 163{
 164        if (!compr)
 165                __read_end_io(bio, false, verity);
 166        f2fs_release_read_bio(bio);
 167}
 168
 169static void f2fs_decompress_bio(struct bio *bio, bool verity)
 170{
 171        __read_end_io(bio, true, verity);
 172}
 173
 174static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
 175
 176static void f2fs_decrypt_work(struct bio_post_read_ctx *ctx)
 177{
 178        fscrypt_decrypt_bio(ctx->bio);
 179}
 180
 181static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
 182{
 183        f2fs_decompress_bio(ctx->bio, ctx->enabled_steps & (1 << STEP_VERITY));
 184}
 185
 186#ifdef CONFIG_F2FS_FS_COMPRESSION
 187static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
 188{
 189        f2fs_decompress_end_io(rpages, cluster_size, false, true);
 190}
 191
 192static void f2fs_verify_bio(struct bio *bio)
 193{
 194        struct bio_vec *bv;
 195        struct bvec_iter_all iter_all;
 196
 197        bio_for_each_segment_all(bv, bio, iter_all) {
 198                struct page *page = bv->bv_page;
 199                struct decompress_io_ctx *dic;
 200
 201                dic = (struct decompress_io_ctx *)page_private(page);
 202
 203                if (dic) {
 204                        if (refcount_dec_not_one(&dic->ref))
 205                                continue;
 206                        f2fs_verify_pages(dic->rpages,
 207                                                dic->cluster_size);
 208                        f2fs_free_dic(dic);
 209                        continue;
 210                }
 211
 212                if (bio->bi_status || PageError(page))
 213                        goto clear_uptodate;
 214
 215                if (fsverity_verify_page(page)) {
 216                        SetPageUptodate(page);
 217                        goto unlock;
 218                }
 219clear_uptodate:
 220                ClearPageUptodate(page);
 221                ClearPageError(page);
 222unlock:
 223                dec_page_count(F2FS_P_SB(page), __read_io_type(page));
 224                unlock_page(page);
 225        }
 226}
 227#endif
 228
 229static void f2fs_verity_work(struct work_struct *work)
 230{
 231        struct bio_post_read_ctx *ctx =
 232                container_of(work, struct bio_post_read_ctx, work);
 233        struct bio *bio = ctx->bio;
 234#ifdef CONFIG_F2FS_FS_COMPRESSION
 235        unsigned int enabled_steps = ctx->enabled_steps;
 236#endif
 237
 238        /*
 239         * fsverity_verify_bio() may call readpages() again, and while verity
 240         * will be disabled for this, decryption may still be needed, resulting
 241         * in another bio_post_read_ctx being allocated.  So to prevent
 242         * deadlocks we need to release the current ctx to the mempool first.
 243         * This assumes that verity is the last post-read step.
 244         */
 245        mempool_free(ctx, bio_post_read_ctx_pool);
 246        bio->bi_private = NULL;
 247
 248#ifdef CONFIG_F2FS_FS_COMPRESSION
 249        /* previous step is decompression */
 250        if (enabled_steps & (1 << STEP_DECOMPRESS)) {
 251                f2fs_verify_bio(bio);
 252                f2fs_release_read_bio(bio);
 253                return;
 254        }
 255#endif
 256
 257        fsverity_verify_bio(bio);
 258        __f2fs_read_end_io(bio, false, false);
 259}
 260
 261static void f2fs_post_read_work(struct work_struct *work)
 262{
 263        struct bio_post_read_ctx *ctx =
 264                container_of(work, struct bio_post_read_ctx, work);
 265
 266        if (ctx->enabled_steps & (1 << STEP_DECRYPT))
 267                f2fs_decrypt_work(ctx);
 268
 269        if (ctx->enabled_steps & (1 << STEP_DECOMPRESS))
 270                f2fs_decompress_work(ctx);
 271
 272        if (ctx->enabled_steps & (1 << STEP_VERITY)) {
 273                INIT_WORK(&ctx->work, f2fs_verity_work);
 274                fsverity_enqueue_verify_work(&ctx->work);
 275                return;
 276        }
 277
 278        __f2fs_read_end_io(ctx->bio,
 279                ctx->enabled_steps & (1 << STEP_DECOMPRESS), false);
 280}
 281
 282static void f2fs_enqueue_post_read_work(struct f2fs_sb_info *sbi,
 283                                                struct work_struct *work)
 284{
 285        queue_work(sbi->post_read_wq, work);
 286}
 287
 288static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
 289{
 290        /*
 291         * We use different work queues for decryption and for verity because
 292         * verity may require reading metadata pages that need decryption, and
 293         * we shouldn't recurse to the same workqueue.
 294         */
 295
 296        if (ctx->enabled_steps & (1 << STEP_DECRYPT) ||
 297                ctx->enabled_steps & (1 << STEP_DECOMPRESS)) {
 298                INIT_WORK(&ctx->work, f2fs_post_read_work);
 299                f2fs_enqueue_post_read_work(ctx->sbi, &ctx->work);
 300                return;
 301        }
 302
 303        if (ctx->enabled_steps & (1 << STEP_VERITY)) {
 304                INIT_WORK(&ctx->work, f2fs_verity_work);
 305                fsverity_enqueue_verify_work(&ctx->work);
 306                return;
 307        }
 308
 309        __f2fs_read_end_io(ctx->bio, false, false);
 310}
 311
 312static bool f2fs_bio_post_read_required(struct bio *bio)
 313{
 314        return bio->bi_private;
 315}
 316
 317static void f2fs_read_end_io(struct bio *bio)
 318{
 319        struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
 320
 321        if (time_to_inject(sbi, FAULT_READ_IO)) {
 322                f2fs_show_injection_info(sbi, FAULT_READ_IO);
 323                bio->bi_status = BLK_STS_IOERR;
 324        }
 325
 326        if (f2fs_bio_post_read_required(bio)) {
 327                struct bio_post_read_ctx *ctx = bio->bi_private;
 328
 329                bio_post_read_processing(ctx);
 330                return;
 331        }
 332
 333        __f2fs_read_end_io(bio, false, false);
 334}
 335
 336static void f2fs_write_end_io(struct bio *bio)
 337{
 338        struct f2fs_sb_info *sbi = bio->bi_private;
 339        struct bio_vec *bvec;
 340        struct bvec_iter_all iter_all;
 341
 342        if (time_to_inject(sbi, FAULT_WRITE_IO)) {
 343                f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
 344                bio->bi_status = BLK_STS_IOERR;
 345        }
 346
 347        bio_for_each_segment_all(bvec, bio, iter_all) {
 348                struct page *page = bvec->bv_page;
 349                enum count_type type = WB_DATA_TYPE(page);
 350
 351                if (IS_DUMMY_WRITTEN_PAGE(page)) {
 352                        set_page_private(page, (unsigned long)NULL);
 353                        ClearPagePrivate(page);
 354                        unlock_page(page);
 355                        mempool_free(page, sbi->write_io_dummy);
 356
 357                        if (unlikely(bio->bi_status))
 358                                f2fs_stop_checkpoint(sbi, true);
 359                        continue;
 360                }
 361
 362                fscrypt_finalize_bounce_page(&page);
 363
 364#ifdef CONFIG_F2FS_FS_COMPRESSION
 365                if (f2fs_is_compressed_page(page)) {
 366                        f2fs_compress_write_end_io(bio, page);
 367                        continue;
 368                }
 369#endif
 370
 371                if (unlikely(bio->bi_status)) {
 372                        mapping_set_error(page->mapping, -EIO);
 373                        if (type == F2FS_WB_CP_DATA)
 374                                f2fs_stop_checkpoint(sbi, true);
 375                }
 376
 377                f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
 378                                        page->index != nid_of_node(page));
 379
 380                dec_page_count(sbi, type);
 381                if (f2fs_in_warm_node_list(sbi, page))
 382                        f2fs_del_fsync_node_entry(sbi, page);
 383                clear_cold_data(page);
 384                end_page_writeback(page);
 385        }
 386        if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
 387                                wq_has_sleeper(&sbi->cp_wait))
 388                wake_up(&sbi->cp_wait);
 389
 390        bio_put(bio);
 391}
 392
 393struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
 394                                block_t blk_addr, struct bio *bio)
 395{
 396        struct block_device *bdev = sbi->sb->s_bdev;
 397        int i;
 398
 399        if (f2fs_is_multi_device(sbi)) {
 400                for (i = 0; i < sbi->s_ndevs; i++) {
 401                        if (FDEV(i).start_blk <= blk_addr &&
 402                            FDEV(i).end_blk >= blk_addr) {
 403                                blk_addr -= FDEV(i).start_blk;
 404                                bdev = FDEV(i).bdev;
 405                                break;
 406                        }
 407                }
 408        }
 409        if (bio) {
 410                bio_set_dev(bio, bdev);
 411                bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
 412        }
 413        return bdev;
 414}
 415
 416int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
 417{
 418        int i;
 419
 420        if (!f2fs_is_multi_device(sbi))
 421                return 0;
 422
 423        for (i = 0; i < sbi->s_ndevs; i++)
 424                if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
 425                        return i;
 426        return 0;
 427}
 428
 429/*
 430 * Return true, if pre_bio's bdev is same as its target device.
 431 */
 432static bool __same_bdev(struct f2fs_sb_info *sbi,
 433                                block_t blk_addr, struct bio *bio)
 434{
 435        struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
 436        return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
 437}
 438
 439static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
 440{
 441        struct f2fs_sb_info *sbi = fio->sbi;
 442        struct bio *bio;
 443
 444        bio = f2fs_bio_alloc(sbi, npages, true);
 445
 446        f2fs_target_device(sbi, fio->new_blkaddr, bio);
 447        if (is_read_io(fio->op)) {
 448                bio->bi_end_io = f2fs_read_end_io;
 449                bio->bi_private = NULL;
 450        } else {
 451                bio->bi_end_io = f2fs_write_end_io;
 452                bio->bi_private = sbi;
 453                bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
 454                                                fio->type, fio->temp);
 455        }
 456        if (fio->io_wbc)
 457                wbc_init_bio(fio->io_wbc, bio);
 458
 459        return bio;
 460}
 461
 462static inline void __submit_bio(struct f2fs_sb_info *sbi,
 463                                struct bio *bio, enum page_type type)
 464{
 465        if (!is_read_io(bio_op(bio))) {
 466                unsigned int start;
 467
 468                if (type != DATA && type != NODE)
 469                        goto submit_io;
 470
 471                if (f2fs_lfs_mode(sbi) && current->plug)
 472                        blk_finish_plug(current->plug);
 473
 474                if (F2FS_IO_ALIGNED(sbi))
 475                        goto submit_io;
 476
 477                start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
 478                start %= F2FS_IO_SIZE(sbi);
 479
 480                if (start == 0)
 481                        goto submit_io;
 482
 483                /* fill dummy pages */
 484                for (; start < F2FS_IO_SIZE(sbi); start++) {
 485                        struct page *page =
 486                                mempool_alloc(sbi->write_io_dummy,
 487                                              GFP_NOIO | __GFP_NOFAIL);
 488                        f2fs_bug_on(sbi, !page);
 489
 490                        zero_user_segment(page, 0, PAGE_SIZE);
 491                        SetPagePrivate(page);
 492                        set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
 493                        lock_page(page);
 494                        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
 495                                f2fs_bug_on(sbi, 1);
 496                }
 497                /*
 498                 * In the NODE case, we lose next block address chain. So, we
 499                 * need to do checkpoint in f2fs_sync_file.
 500                 */
 501                if (type == NODE)
 502                        set_sbi_flag(sbi, SBI_NEED_CP);
 503        }
 504submit_io:
 505        if (is_read_io(bio_op(bio)))
 506                trace_f2fs_submit_read_bio(sbi->sb, type, bio);
 507        else
 508                trace_f2fs_submit_write_bio(sbi->sb, type, bio);
 509        submit_bio(bio);
 510}
 511
 512void f2fs_submit_bio(struct f2fs_sb_info *sbi,
 513                                struct bio *bio, enum page_type type)
 514{
 515        __submit_bio(sbi, bio, type);
 516}
 517
 518static void __attach_io_flag(struct f2fs_io_info *fio)
 519{
 520        struct f2fs_sb_info *sbi = fio->sbi;
 521        unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
 522        unsigned int io_flag, fua_flag, meta_flag;
 523
 524        if (fio->type == DATA)
 525                io_flag = sbi->data_io_flag;
 526        else if (fio->type == NODE)
 527                io_flag = sbi->node_io_flag;
 528        else
 529                return;
 530
 531        fua_flag = io_flag & temp_mask;
 532        meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
 533
 534        /*
 535         * data/node io flag bits per temp:
 536         *      REQ_META     |      REQ_FUA      |
 537         *    5 |    4 |   3 |    2 |    1 |   0 |
 538         * Cold | Warm | Hot | Cold | Warm | Hot |
 539         */
 540        if ((1 << fio->temp) & meta_flag)
 541                fio->op_flags |= REQ_META;
 542        if ((1 << fio->temp) & fua_flag)
 543                fio->op_flags |= REQ_FUA;
 544}
 545
 546static void __submit_merged_bio(struct f2fs_bio_info *io)
 547{
 548        struct f2fs_io_info *fio = &io->fio;
 549
 550        if (!io->bio)
 551                return;
 552
 553        __attach_io_flag(fio);
 554        bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
 555
 556        if (is_read_io(fio->op))
 557                trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
 558        else
 559                trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
 560
 561        __submit_bio(io->sbi, io->bio, fio->type);
 562        io->bio = NULL;
 563}
 564
 565static bool __has_merged_page(struct bio *bio, struct inode *inode,
 566                                                struct page *page, nid_t ino)
 567{
 568        struct bio_vec *bvec;
 569        struct bvec_iter_all iter_all;
 570
 571        if (!bio)
 572                return false;
 573
 574        if (!inode && !page && !ino)
 575                return true;
 576
 577        bio_for_each_segment_all(bvec, bio, iter_all) {
 578                struct page *target = bvec->bv_page;
 579
 580                if (fscrypt_is_bounce_page(target)) {
 581                        target = fscrypt_pagecache_page(target);
 582                        if (IS_ERR(target))
 583                                continue;
 584                }
 585                if (f2fs_is_compressed_page(target)) {
 586                        target = f2fs_compress_control_page(target);
 587                        if (IS_ERR(target))
 588                                continue;
 589                }
 590
 591                if (inode && inode == target->mapping->host)
 592                        return true;
 593                if (page && page == target)
 594                        return true;
 595                if (ino && ino == ino_of_node(target))
 596                        return true;
 597        }
 598
 599        return false;
 600}
 601
 602static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
 603                                enum page_type type, enum temp_type temp)
 604{
 605        enum page_type btype = PAGE_TYPE_OF_BIO(type);
 606        struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
 607
 608        down_write(&io->io_rwsem);
 609
 610        /* change META to META_FLUSH in the checkpoint procedure */
 611        if (type >= META_FLUSH) {
 612                io->fio.type = META_FLUSH;
 613                io->fio.op = REQ_OP_WRITE;
 614                io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
 615                if (!test_opt(sbi, NOBARRIER))
 616                        io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
 617        }
 618        __submit_merged_bio(io);
 619        up_write(&io->io_rwsem);
 620}
 621
 622static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
 623                                struct inode *inode, struct page *page,
 624                                nid_t ino, enum page_type type, bool force)
 625{
 626        enum temp_type temp;
 627        bool ret = true;
 628
 629        for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
 630                if (!force)     {
 631                        enum page_type btype = PAGE_TYPE_OF_BIO(type);
 632                        struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
 633
 634                        down_read(&io->io_rwsem);
 635                        ret = __has_merged_page(io->bio, inode, page, ino);
 636                        up_read(&io->io_rwsem);
 637                }
 638                if (ret)
 639                        __f2fs_submit_merged_write(sbi, type, temp);
 640
 641                /* TODO: use HOT temp only for meta pages now. */
 642                if (type >= META)
 643                        break;
 644        }
 645}
 646
 647void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
 648{
 649        __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
 650}
 651
 652void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
 653                                struct inode *inode, struct page *page,
 654                                nid_t ino, enum page_type type)
 655{
 656        __submit_merged_write_cond(sbi, inode, page, ino, type, false);
 657}
 658
 659void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
 660{
 661        f2fs_submit_merged_write(sbi, DATA);
 662        f2fs_submit_merged_write(sbi, NODE);
 663        f2fs_submit_merged_write(sbi, META);
 664}
 665
 666/*
 667 * Fill the locked page with data located in the block address.
 668 * A caller needs to unlock the page on failure.
 669 */
 670int f2fs_submit_page_bio(struct f2fs_io_info *fio)
 671{
 672        struct bio *bio;
 673        struct page *page = fio->encrypted_page ?
 674                        fio->encrypted_page : fio->page;
 675
 676        if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
 677                        fio->is_por ? META_POR : (__is_meta_io(fio) ?
 678                        META_GENERIC : DATA_GENERIC_ENHANCE)))
 679                return -EFSCORRUPTED;
 680
 681        trace_f2fs_submit_page_bio(page, fio);
 682        f2fs_trace_ios(fio, 0);
 683
 684        /* Allocate a new bio */
 685        bio = __bio_alloc(fio, 1);
 686
 687        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 688                bio_put(bio);
 689                return -EFAULT;
 690        }
 691
 692        if (fio->io_wbc && !is_read_io(fio->op))
 693                wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
 694
 695        __attach_io_flag(fio);
 696        bio_set_op_attrs(bio, fio->op, fio->op_flags);
 697
 698        inc_page_count(fio->sbi, is_read_io(fio->op) ?
 699                        __read_io_type(page): WB_DATA_TYPE(fio->page));
 700
 701        __submit_bio(fio->sbi, bio, fio->type);
 702        return 0;
 703}
 704
 705static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
 706                                block_t last_blkaddr, block_t cur_blkaddr)
 707{
 708        if (last_blkaddr + 1 != cur_blkaddr)
 709                return false;
 710        return __same_bdev(sbi, cur_blkaddr, bio);
 711}
 712
 713static bool io_type_is_mergeable(struct f2fs_bio_info *io,
 714                                                struct f2fs_io_info *fio)
 715{
 716        if (io->fio.op != fio->op)
 717                return false;
 718        return io->fio.op_flags == fio->op_flags;
 719}
 720
 721static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
 722                                        struct f2fs_bio_info *io,
 723                                        struct f2fs_io_info *fio,
 724                                        block_t last_blkaddr,
 725                                        block_t cur_blkaddr)
 726{
 727        if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
 728                unsigned int filled_blocks =
 729                                F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
 730                unsigned int io_size = F2FS_IO_SIZE(sbi);
 731                unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
 732
 733                /* IOs in bio is aligned and left space of vectors is not enough */
 734                if (!(filled_blocks % io_size) && left_vecs < io_size)
 735                        return false;
 736        }
 737        if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
 738                return false;
 739        return io_type_is_mergeable(io, fio);
 740}
 741
 742static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
 743                                struct page *page, enum temp_type temp)
 744{
 745        struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
 746        struct bio_entry *be;
 747
 748        be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
 749        be->bio = bio;
 750        bio_get(bio);
 751
 752        if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
 753                f2fs_bug_on(sbi, 1);
 754
 755        down_write(&io->bio_list_lock);
 756        list_add_tail(&be->list, &io->bio_list);
 757        up_write(&io->bio_list_lock);
 758}
 759
 760static void del_bio_entry(struct bio_entry *be)
 761{
 762        list_del(&be->list);
 763        kmem_cache_free(bio_entry_slab, be);
 764}
 765
 766static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio,
 767                                                        struct page *page)
 768{
 769        enum temp_type temp;
 770        bool found = false;
 771        int ret = -EAGAIN;
 772
 773        for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
 774                struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
 775                struct list_head *head = &io->bio_list;
 776                struct bio_entry *be;
 777
 778                down_write(&io->bio_list_lock);
 779                list_for_each_entry(be, head, list) {
 780                        if (be->bio != *bio)
 781                                continue;
 782
 783                        found = true;
 784
 785                        if (bio_add_page(*bio, page, PAGE_SIZE, 0) ==
 786                                                        PAGE_SIZE) {
 787                                ret = 0;
 788                                break;
 789                        }
 790
 791                        /* bio is full */
 792                        del_bio_entry(be);
 793                        __submit_bio(sbi, *bio, DATA);
 794                        break;
 795                }
 796                up_write(&io->bio_list_lock);
 797        }
 798
 799        if (ret) {
 800                bio_put(*bio);
 801                *bio = NULL;
 802        }
 803
 804        return ret;
 805}
 806
 807void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
 808                                        struct bio **bio, struct page *page)
 809{
 810        enum temp_type temp;
 811        bool found = false;
 812        struct bio *target = bio ? *bio : NULL;
 813
 814        for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
 815                struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
 816                struct list_head *head = &io->bio_list;
 817                struct bio_entry *be;
 818
 819                if (list_empty(head))
 820                        continue;
 821
 822                down_read(&io->bio_list_lock);
 823                list_for_each_entry(be, head, list) {
 824                        if (target)
 825                                found = (target == be->bio);
 826                        else
 827                                found = __has_merged_page(be->bio, NULL,
 828                                                                page, 0);
 829                        if (found)
 830                                break;
 831                }
 832                up_read(&io->bio_list_lock);
 833
 834                if (!found)
 835                        continue;
 836
 837                found = false;
 838
 839                down_write(&io->bio_list_lock);
 840                list_for_each_entry(be, head, list) {
 841                        if (target)
 842                                found = (target == be->bio);
 843                        else
 844                                found = __has_merged_page(be->bio, NULL,
 845                                                                page, 0);
 846                        if (found) {
 847                                target = be->bio;
 848                                del_bio_entry(be);
 849                                break;
 850                        }
 851                }
 852                up_write(&io->bio_list_lock);
 853        }
 854
 855        if (found)
 856                __submit_bio(sbi, target, DATA);
 857        if (bio && *bio) {
 858                bio_put(*bio);
 859                *bio = NULL;
 860        }
 861}
 862
 863int f2fs_merge_page_bio(struct f2fs_io_info *fio)
 864{
 865        struct bio *bio = *fio->bio;
 866        struct page *page = fio->encrypted_page ?
 867                        fio->encrypted_page : fio->page;
 868
 869        if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
 870                        __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
 871                return -EFSCORRUPTED;
 872
 873        trace_f2fs_submit_page_bio(page, fio);
 874        f2fs_trace_ios(fio, 0);
 875
 876        if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
 877                                                fio->new_blkaddr))
 878                f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
 879alloc_new:
 880        if (!bio) {
 881                bio = __bio_alloc(fio, BIO_MAX_PAGES);
 882                __attach_io_flag(fio);
 883                bio_set_op_attrs(bio, fio->op, fio->op_flags);
 884
 885                add_bio_entry(fio->sbi, bio, page, fio->temp);
 886        } else {
 887                if (add_ipu_page(fio->sbi, &bio, page))
 888                        goto alloc_new;
 889        }
 890
 891        if (fio->io_wbc)
 892                wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
 893
 894        inc_page_count(fio->sbi, WB_DATA_TYPE(page));
 895
 896        *fio->last_block = fio->new_blkaddr;
 897        *fio->bio = bio;
 898
 899        return 0;
 900}
 901
 902void f2fs_submit_page_write(struct f2fs_io_info *fio)
 903{
 904        struct f2fs_sb_info *sbi = fio->sbi;
 905        enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
 906        struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
 907        struct page *bio_page;
 908
 909        f2fs_bug_on(sbi, is_read_io(fio->op));
 910
 911        down_write(&io->io_rwsem);
 912next:
 913        if (fio->in_list) {
 914                spin_lock(&io->io_lock);
 915                if (list_empty(&io->io_list)) {
 916                        spin_unlock(&io->io_lock);
 917                        goto out;
 918                }
 919                fio = list_first_entry(&io->io_list,
 920                                                struct f2fs_io_info, list);
 921                list_del(&fio->list);
 922                spin_unlock(&io->io_lock);
 923        }
 924
 925        verify_fio_blkaddr(fio);
 926
 927        if (fio->encrypted_page)
 928                bio_page = fio->encrypted_page;
 929        else if (fio->compressed_page)
 930                bio_page = fio->compressed_page;
 931        else
 932                bio_page = fio->page;
 933
 934        /* set submitted = true as a return value */
 935        fio->submitted = true;
 936
 937        inc_page_count(sbi, WB_DATA_TYPE(bio_page));
 938
 939        if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio,
 940                        io->last_block_in_bio, fio->new_blkaddr))
 941                __submit_merged_bio(io);
 942alloc_new:
 943        if (io->bio == NULL) {
 944                if (F2FS_IO_ALIGNED(sbi) &&
 945                                (fio->type == DATA || fio->type == NODE) &&
 946                                fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
 947                        dec_page_count(sbi, WB_DATA_TYPE(bio_page));
 948                        fio->retry = true;
 949                        goto skip;
 950                }
 951                io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
 952                io->fio = *fio;
 953        }
 954
 955        if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
 956                __submit_merged_bio(io);
 957                goto alloc_new;
 958        }
 959
 960        if (fio->io_wbc)
 961                wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
 962
 963        io->last_block_in_bio = fio->new_blkaddr;
 964        f2fs_trace_ios(fio, 0);
 965
 966        trace_f2fs_submit_page_write(fio->page, fio);
 967skip:
 968        if (fio->in_list)
 969                goto next;
 970out:
 971        if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
 972                                !f2fs_is_checkpoint_ready(sbi))
 973                __submit_merged_bio(io);
 974        up_write(&io->io_rwsem);
 975}
 976
 977static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
 978{
 979        return fsverity_active(inode) &&
 980               idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
 981}
 982
 983static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
 984                                      unsigned nr_pages, unsigned op_flag,
 985                                      pgoff_t first_idx, bool for_write)
 986{
 987        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 988        struct bio *bio;
 989        struct bio_post_read_ctx *ctx;
 990        unsigned int post_read_steps = 0;
 991
 992        bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES),
 993                                                                for_write);
 994        if (!bio)
 995                return ERR_PTR(-ENOMEM);
 996        f2fs_target_device(sbi, blkaddr, bio);
 997        bio->bi_end_io = f2fs_read_end_io;
 998        bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
 999
1000        if (f2fs_encrypted_file(inode))
1001                post_read_steps |= 1 << STEP_DECRYPT;
1002        if (f2fs_compressed_file(inode))
1003                post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
1004        if (f2fs_need_verity(inode, first_idx))
1005                post_read_steps |= 1 << STEP_VERITY;
1006
1007        if (post_read_steps) {
1008                /* Due to the mempool, this never fails. */
1009                ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1010                ctx->bio = bio;
1011                ctx->sbi = sbi;
1012                ctx->enabled_steps = post_read_steps;
1013                bio->bi_private = ctx;
1014        }
1015
1016        return bio;
1017}
1018
1019static void f2fs_release_read_bio(struct bio *bio)
1020{
1021        if (bio->bi_private)
1022                mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1023        bio_put(bio);
1024}
1025
1026/* This can handle encryption stuffs */
1027static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1028                                                block_t blkaddr, bool for_write)
1029{
1030        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1031        struct bio *bio;
1032
1033        bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index, for_write);
1034        if (IS_ERR(bio))
1035                return PTR_ERR(bio);
1036
1037        /* wait for GCed page writeback via META_MAPPING */
1038        f2fs_wait_on_block_writeback(inode, blkaddr);
1039
1040        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1041                bio_put(bio);
1042                return -EFAULT;
1043        }
1044        ClearPageError(page);
1045        inc_page_count(sbi, F2FS_RD_DATA);
1046        f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1047        __submit_bio(sbi, bio, DATA);
1048        return 0;
1049}
1050
1051static void __set_data_blkaddr(struct dnode_of_data *dn)
1052{
1053        struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1054        __le32 *addr_array;
1055        int base = 0;
1056
1057        if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1058                base = get_extra_isize(dn->inode);
1059
1060        /* Get physical address of data block */
1061        addr_array = blkaddr_in_node(rn);
1062        addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1063}
1064
1065/*
1066 * Lock ordering for the change of data block address:
1067 * ->data_page
1068 *  ->node_page
1069 *    update block addresses in the node page
1070 */
1071void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1072{
1073        f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1074        __set_data_blkaddr(dn);
1075        if (set_page_dirty(dn->node_page))
1076                dn->node_changed = true;
1077}
1078
1079void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1080{
1081        dn->data_blkaddr = blkaddr;
1082        f2fs_set_data_blkaddr(dn);
1083        f2fs_update_extent_cache(dn);
1084}
1085
1086/* dn->ofs_in_node will be returned with up-to-date last block pointer */
1087int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1088{
1089        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1090        int err;
1091
1092        if (!count)
1093                return 0;
1094
1095        if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1096                return -EPERM;
1097        if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1098                return err;
1099
1100        trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1101                                                dn->ofs_in_node, count);
1102
1103        f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1104
1105        for (; count > 0; dn->ofs_in_node++) {
1106                block_t blkaddr = f2fs_data_blkaddr(dn);
1107                if (blkaddr == NULL_ADDR) {
1108                        dn->data_blkaddr = NEW_ADDR;
1109                        __set_data_blkaddr(dn);
1110                        count--;
1111                }
1112        }
1113
1114        if (set_page_dirty(dn->node_page))
1115                dn->node_changed = true;
1116        return 0;
1117}
1118
1119/* Should keep dn->ofs_in_node unchanged */
1120int f2fs_reserve_new_block(struct dnode_of_data *dn)
1121{
1122        unsigned int ofs_in_node = dn->ofs_in_node;
1123        int ret;
1124
1125        ret = f2fs_reserve_new_blocks(dn, 1);
1126        dn->ofs_in_node = ofs_in_node;
1127        return ret;
1128}
1129
1130int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1131{
1132        bool need_put = dn->inode_page ? false : true;
1133        int err;
1134
1135        err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1136        if (err)
1137                return err;
1138
1139        if (dn->data_blkaddr == NULL_ADDR)
1140                err = f2fs_reserve_new_block(dn);
1141        if (err || need_put)
1142                f2fs_put_dnode(dn);
1143        return err;
1144}
1145
1146int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1147{
1148        struct extent_info ei  = {0,0,0};
1149        struct inode *inode = dn->inode;
1150
1151        if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1152                dn->data_blkaddr = ei.blk + index - ei.fofs;
1153                return 0;
1154        }
1155
1156        return f2fs_reserve_block(dn, index);
1157}
1158
1159struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1160                                                int op_flags, bool for_write)
1161{
1162        struct address_space *mapping = inode->i_mapping;
1163        struct dnode_of_data dn;
1164        struct page *page;
1165        struct extent_info ei = {0,0,0};
1166        int err;
1167
1168        page = f2fs_grab_cache_page(mapping, index, for_write);
1169        if (!page)
1170                return ERR_PTR(-ENOMEM);
1171
1172        if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1173                dn.data_blkaddr = ei.blk + index - ei.fofs;
1174                if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1175                                                DATA_GENERIC_ENHANCE_READ)) {
1176                        err = -EFSCORRUPTED;
1177                        goto put_err;
1178                }
1179                goto got_it;
1180        }
1181
1182        set_new_dnode(&dn, inode, NULL, NULL, 0);
1183        err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1184        if (err)
1185                goto put_err;
1186        f2fs_put_dnode(&dn);
1187
1188        if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1189                err = -ENOENT;
1190                goto put_err;
1191        }
1192        if (dn.data_blkaddr != NEW_ADDR &&
1193                        !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1194                                                dn.data_blkaddr,
1195                                                DATA_GENERIC_ENHANCE)) {
1196                err = -EFSCORRUPTED;
1197                goto put_err;
1198        }
1199got_it:
1200        if (PageUptodate(page)) {
1201                unlock_page(page);
1202                return page;
1203        }
1204
1205        /*
1206         * A new dentry page is allocated but not able to be written, since its
1207         * new inode page couldn't be allocated due to -ENOSPC.
1208         * In such the case, its blkaddr can be remained as NEW_ADDR.
1209         * see, f2fs_add_link -> f2fs_get_new_data_page ->
1210         * f2fs_init_inode_metadata.
1211         */
1212        if (dn.data_blkaddr == NEW_ADDR) {
1213                zero_user_segment(page, 0, PAGE_SIZE);
1214                if (!PageUptodate(page))
1215                        SetPageUptodate(page);
1216                unlock_page(page);
1217                return page;
1218        }
1219
1220        err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, for_write);
1221        if (err)
1222                goto put_err;
1223        return page;
1224
1225put_err:
1226        f2fs_put_page(page, 1);
1227        return ERR_PTR(err);
1228}
1229
1230struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1231{
1232        struct address_space *mapping = inode->i_mapping;
1233        struct page *page;
1234
1235        page = find_get_page(mapping, index);
1236        if (page && PageUptodate(page))
1237                return page;
1238        f2fs_put_page(page, 0);
1239
1240        page = f2fs_get_read_data_page(inode, index, 0, false);
1241        if (IS_ERR(page))
1242                return page;
1243
1244        if (PageUptodate(page))
1245                return page;
1246
1247        wait_on_page_locked(page);
1248        if (unlikely(!PageUptodate(page))) {
1249                f2fs_put_page(page, 0);
1250                return ERR_PTR(-EIO);
1251        }
1252        return page;
1253}
1254
1255/*
1256 * If it tries to access a hole, return an error.
1257 * Because, the callers, functions in dir.c and GC, should be able to know
1258 * whether this page exists or not.
1259 */
1260struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1261                                                        bool for_write)
1262{
1263        struct address_space *mapping = inode->i_mapping;
1264        struct page *page;
1265repeat:
1266        page = f2fs_get_read_data_page(inode, index, 0, for_write);
1267        if (IS_ERR(page))
1268                return page;
1269
1270        /* wait for read completion */
1271        lock_page(page);
1272        if (unlikely(page->mapping != mapping)) {
1273                f2fs_put_page(page, 1);
1274                goto repeat;
1275        }
1276        if (unlikely(!PageUptodate(page))) {
1277                f2fs_put_page(page, 1);
1278                return ERR_PTR(-EIO);
1279        }
1280        return page;
1281}
1282
1283/*
1284 * Caller ensures that this data page is never allocated.
1285 * A new zero-filled data page is allocated in the page cache.
1286 *
1287 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1288 * f2fs_unlock_op().
1289 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1290 * ipage should be released by this function.
1291 */
1292struct page *f2fs_get_new_data_page(struct inode *inode,
1293                struct page *ipage, pgoff_t index, bool new_i_size)
1294{
1295        struct address_space *mapping = inode->i_mapping;
1296        struct page *page;
1297        struct dnode_of_data dn;
1298        int err;
1299
1300        page = f2fs_grab_cache_page(mapping, index, true);
1301        if (!page) {
1302                /*
1303                 * before exiting, we should make sure ipage will be released
1304                 * if any error occur.
1305                 */
1306                f2fs_put_page(ipage, 1);
1307                return ERR_PTR(-ENOMEM);
1308        }
1309
1310        set_new_dnode(&dn, inode, ipage, NULL, 0);
1311        err = f2fs_reserve_block(&dn, index);
1312        if (err) {
1313                f2fs_put_page(page, 1);
1314                return ERR_PTR(err);
1315        }
1316        if (!ipage)
1317                f2fs_put_dnode(&dn);
1318
1319        if (PageUptodate(page))
1320                goto got_it;
1321
1322        if (dn.data_blkaddr == NEW_ADDR) {
1323                zero_user_segment(page, 0, PAGE_SIZE);
1324                if (!PageUptodate(page))
1325                        SetPageUptodate(page);
1326        } else {
1327                f2fs_put_page(page, 1);
1328
1329                /* if ipage exists, blkaddr should be NEW_ADDR */
1330                f2fs_bug_on(F2FS_I_SB(inode), ipage);
1331                page = f2fs_get_lock_data_page(inode, index, true);
1332                if (IS_ERR(page))
1333                        return page;
1334        }
1335got_it:
1336        if (new_i_size && i_size_read(inode) <
1337                                ((loff_t)(index + 1) << PAGE_SHIFT))
1338                f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1339        return page;
1340}
1341
1342static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1343{
1344        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1345        struct f2fs_summary sum;
1346        struct node_info ni;
1347        block_t old_blkaddr;
1348        blkcnt_t count = 1;
1349        int err;
1350
1351        if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1352                return -EPERM;
1353
1354        err = f2fs_get_node_info(sbi, dn->nid, &ni);
1355        if (err)
1356                return err;
1357
1358        dn->data_blkaddr = f2fs_data_blkaddr(dn);
1359        if (dn->data_blkaddr != NULL_ADDR)
1360                goto alloc;
1361
1362        if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1363                return err;
1364
1365alloc:
1366        set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1367        old_blkaddr = dn->data_blkaddr;
1368        f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1369                                        &sum, seg_type, NULL, false);
1370        if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1371                invalidate_mapping_pages(META_MAPPING(sbi),
1372                                        old_blkaddr, old_blkaddr);
1373        f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1374
1375        /*
1376         * i_size will be updated by direct_IO. Otherwise, we'll get stale
1377         * data from unwritten block via dio_read.
1378         */
1379        return 0;
1380}
1381
1382int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1383{
1384        struct inode *inode = file_inode(iocb->ki_filp);
1385        struct f2fs_map_blocks map;
1386        int flag;
1387        int err = 0;
1388        bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1389
1390        map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1391        map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1392        if (map.m_len > map.m_lblk)
1393                map.m_len -= map.m_lblk;
1394        else
1395                map.m_len = 0;
1396
1397        map.m_next_pgofs = NULL;
1398        map.m_next_extent = NULL;
1399        map.m_seg_type = NO_CHECK_TYPE;
1400        map.m_may_create = true;
1401
1402        if (direct_io) {
1403                map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1404                flag = f2fs_force_buffered_io(inode, iocb, from) ?
1405                                        F2FS_GET_BLOCK_PRE_AIO :
1406                                        F2FS_GET_BLOCK_PRE_DIO;
1407                goto map_blocks;
1408        }
1409        if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1410                err = f2fs_convert_inline_inode(inode);
1411                if (err)
1412                        return err;
1413        }
1414        if (f2fs_has_inline_data(inode))
1415                return err;
1416
1417        flag = F2FS_GET_BLOCK_PRE_AIO;
1418
1419map_blocks:
1420        err = f2fs_map_blocks(inode, &map, 1, flag);
1421        if (map.m_len > 0 && err == -ENOSPC) {
1422                if (!direct_io)
1423                        set_inode_flag(inode, FI_NO_PREALLOC);
1424                err = 0;
1425        }
1426        return err;
1427}
1428
1429void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1430{
1431        if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1432                if (lock)
1433                        down_read(&sbi->node_change);
1434                else
1435                        up_read(&sbi->node_change);
1436        } else {
1437                if (lock)
1438                        f2fs_lock_op(sbi);
1439                else
1440                        f2fs_unlock_op(sbi);
1441        }
1442}
1443
1444/*
1445 * f2fs_map_blocks() tries to find or build mapping relationship which
1446 * maps continuous logical blocks to physical blocks, and return such
1447 * info via f2fs_map_blocks structure.
1448 */
1449int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1450                                                int create, int flag)
1451{
1452        unsigned int maxblocks = map->m_len;
1453        struct dnode_of_data dn;
1454        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1455        int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1456        pgoff_t pgofs, end_offset, end;
1457        int err = 0, ofs = 1;
1458        unsigned int ofs_in_node, last_ofs_in_node;
1459        blkcnt_t prealloc;
1460        struct extent_info ei = {0,0,0};
1461        block_t blkaddr;
1462        unsigned int start_pgofs;
1463
1464        if (!maxblocks)
1465                return 0;
1466
1467        map->m_len = 0;
1468        map->m_flags = 0;
1469
1470        /* it only supports block size == page size */
1471        pgofs = (pgoff_t)map->m_lblk;
1472        end = pgofs + maxblocks;
1473
1474        if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1475                if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1476                                                        map->m_may_create)
1477                        goto next_dnode;
1478
1479                map->m_pblk = ei.blk + pgofs - ei.fofs;
1480                map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1481                map->m_flags = F2FS_MAP_MAPPED;
1482                if (map->m_next_extent)
1483                        *map->m_next_extent = pgofs + map->m_len;
1484
1485                /* for hardware encryption, but to avoid potential issue in future */
1486                if (flag == F2FS_GET_BLOCK_DIO)
1487                        f2fs_wait_on_block_writeback_range(inode,
1488                                                map->m_pblk, map->m_len);
1489                goto out;
1490        }
1491
1492next_dnode:
1493        if (map->m_may_create)
1494                __do_map_lock(sbi, flag, true);
1495
1496        /* When reading holes, we need its node page */
1497        set_new_dnode(&dn, inode, NULL, NULL, 0);
1498        err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1499        if (err) {
1500                if (flag == F2FS_GET_BLOCK_BMAP)
1501                        map->m_pblk = 0;
1502                if (err == -ENOENT) {
1503                        err = 0;
1504                        if (map->m_next_pgofs)
1505                                *map->m_next_pgofs =
1506                                        f2fs_get_next_page_offset(&dn, pgofs);
1507                        if (map->m_next_extent)
1508                                *map->m_next_extent =
1509                                        f2fs_get_next_page_offset(&dn, pgofs);
1510                }
1511                goto unlock_out;
1512        }
1513
1514        start_pgofs = pgofs;
1515        prealloc = 0;
1516        last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1517        end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1518
1519next_block:
1520        blkaddr = f2fs_data_blkaddr(&dn);
1521
1522        if (__is_valid_data_blkaddr(blkaddr) &&
1523                !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1524                err = -EFSCORRUPTED;
1525                goto sync_out;
1526        }
1527
1528        if (__is_valid_data_blkaddr(blkaddr)) {
1529                /* use out-place-update for driect IO under LFS mode */
1530                if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1531                                                        map->m_may_create) {
1532                        err = __allocate_data_block(&dn, map->m_seg_type);
1533                        if (err)
1534                                goto sync_out;
1535                        blkaddr = dn.data_blkaddr;
1536                        set_inode_flag(inode, FI_APPEND_WRITE);
1537                }
1538        } else {
1539                if (create) {
1540                        if (unlikely(f2fs_cp_error(sbi))) {
1541                                err = -EIO;
1542                                goto sync_out;
1543                        }
1544                        if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1545                                if (blkaddr == NULL_ADDR) {
1546                                        prealloc++;
1547                                        last_ofs_in_node = dn.ofs_in_node;
1548                                }
1549                        } else {
1550                                WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1551                                        flag != F2FS_GET_BLOCK_DIO);
1552                                err = __allocate_data_block(&dn,
1553                                                        map->m_seg_type);
1554                                if (!err)
1555                                        set_inode_flag(inode, FI_APPEND_WRITE);
1556                        }
1557                        if (err)
1558                                goto sync_out;
1559                        map->m_flags |= F2FS_MAP_NEW;
1560                        blkaddr = dn.data_blkaddr;
1561                } else {
1562                        if (flag == F2FS_GET_BLOCK_BMAP) {
1563                                map->m_pblk = 0;
1564                                goto sync_out;
1565                        }
1566                        if (flag == F2FS_GET_BLOCK_PRECACHE)
1567                                goto sync_out;
1568                        if (flag == F2FS_GET_BLOCK_FIEMAP &&
1569                                                blkaddr == NULL_ADDR) {
1570                                if (map->m_next_pgofs)
1571                                        *map->m_next_pgofs = pgofs + 1;
1572                                goto sync_out;
1573                        }
1574                        if (flag != F2FS_GET_BLOCK_FIEMAP) {
1575                                /* for defragment case */
1576                                if (map->m_next_pgofs)
1577                                        *map->m_next_pgofs = pgofs + 1;
1578                                goto sync_out;
1579                        }
1580                }
1581        }
1582
1583        if (flag == F2FS_GET_BLOCK_PRE_AIO)
1584                goto skip;
1585
1586        if (map->m_len == 0) {
1587                /* preallocated unwritten block should be mapped for fiemap. */
1588                if (blkaddr == NEW_ADDR)
1589                        map->m_flags |= F2FS_MAP_UNWRITTEN;
1590                map->m_flags |= F2FS_MAP_MAPPED;
1591
1592                map->m_pblk = blkaddr;
1593                map->m_len = 1;
1594        } else if ((map->m_pblk != NEW_ADDR &&
1595                        blkaddr == (map->m_pblk + ofs)) ||
1596                        (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1597                        flag == F2FS_GET_BLOCK_PRE_DIO) {
1598                ofs++;
1599                map->m_len++;
1600        } else {
1601                goto sync_out;
1602        }
1603
1604skip:
1605        dn.ofs_in_node++;
1606        pgofs++;
1607
1608        /* preallocate blocks in batch for one dnode page */
1609        if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1610                        (pgofs == end || dn.ofs_in_node == end_offset)) {
1611
1612                dn.ofs_in_node = ofs_in_node;
1613                err = f2fs_reserve_new_blocks(&dn, prealloc);
1614                if (err)
1615                        goto sync_out;
1616
1617                map->m_len += dn.ofs_in_node - ofs_in_node;
1618                if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1619                        err = -ENOSPC;
1620                        goto sync_out;
1621                }
1622                dn.ofs_in_node = end_offset;
1623        }
1624
1625        if (pgofs >= end)
1626                goto sync_out;
1627        else if (dn.ofs_in_node < end_offset)
1628                goto next_block;
1629
1630        if (flag == F2FS_GET_BLOCK_PRECACHE) {
1631                if (map->m_flags & F2FS_MAP_MAPPED) {
1632                        unsigned int ofs = start_pgofs - map->m_lblk;
1633
1634                        f2fs_update_extent_cache_range(&dn,
1635                                start_pgofs, map->m_pblk + ofs,
1636                                map->m_len - ofs);
1637                }
1638        }
1639
1640        f2fs_put_dnode(&dn);
1641
1642        if (map->m_may_create) {
1643                __do_map_lock(sbi, flag, false);
1644                f2fs_balance_fs(sbi, dn.node_changed);
1645        }
1646        goto next_dnode;
1647
1648sync_out:
1649
1650        /* for hardware encryption, but to avoid potential issue in future */
1651        if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1652                f2fs_wait_on_block_writeback_range(inode,
1653                                                map->m_pblk, map->m_len);
1654
1655        if (flag == F2FS_GET_BLOCK_PRECACHE) {
1656                if (map->m_flags & F2FS_MAP_MAPPED) {
1657                        unsigned int ofs = start_pgofs - map->m_lblk;
1658
1659                        f2fs_update_extent_cache_range(&dn,
1660                                start_pgofs, map->m_pblk + ofs,
1661                                map->m_len - ofs);
1662                }
1663                if (map->m_next_extent)
1664                        *map->m_next_extent = pgofs + 1;
1665        }
1666        f2fs_put_dnode(&dn);
1667unlock_out:
1668        if (map->m_may_create) {
1669                __do_map_lock(sbi, flag, false);
1670                f2fs_balance_fs(sbi, dn.node_changed);
1671        }
1672out:
1673        trace_f2fs_map_blocks(inode, map, err);
1674        return err;
1675}
1676
1677bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1678{
1679        struct f2fs_map_blocks map;
1680        block_t last_lblk;
1681        int err;
1682
1683        if (pos + len > i_size_read(inode))
1684                return false;
1685
1686        map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1687        map.m_next_pgofs = NULL;
1688        map.m_next_extent = NULL;
1689        map.m_seg_type = NO_CHECK_TYPE;
1690        map.m_may_create = false;
1691        last_lblk = F2FS_BLK_ALIGN(pos + len);
1692
1693        while (map.m_lblk < last_lblk) {
1694                map.m_len = last_lblk - map.m_lblk;
1695                err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1696                if (err || map.m_len == 0)
1697                        return false;
1698                map.m_lblk += map.m_len;
1699        }
1700        return true;
1701}
1702
1703static int __get_data_block(struct inode *inode, sector_t iblock,
1704                        struct buffer_head *bh, int create, int flag,
1705                        pgoff_t *next_pgofs, int seg_type, bool may_write)
1706{
1707        struct f2fs_map_blocks map;
1708        int err;
1709
1710        map.m_lblk = iblock;
1711        map.m_len = bh->b_size >> inode->i_blkbits;
1712        map.m_next_pgofs = next_pgofs;
1713        map.m_next_extent = NULL;
1714        map.m_seg_type = seg_type;
1715        map.m_may_create = may_write;
1716
1717        err = f2fs_map_blocks(inode, &map, create, flag);
1718        if (!err) {
1719                map_bh(bh, inode->i_sb, map.m_pblk);
1720                bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1721                bh->b_size = (u64)map.m_len << inode->i_blkbits;
1722        }
1723        return err;
1724}
1725
1726static int get_data_block(struct inode *inode, sector_t iblock,
1727                        struct buffer_head *bh_result, int create, int flag,
1728                        pgoff_t *next_pgofs)
1729{
1730        return __get_data_block(inode, iblock, bh_result, create,
1731                                                        flag, next_pgofs,
1732                                                        NO_CHECK_TYPE, create);
1733}
1734
1735static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1736                        struct buffer_head *bh_result, int create)
1737{
1738        return __get_data_block(inode, iblock, bh_result, create,
1739                                F2FS_GET_BLOCK_DIO, NULL,
1740                                f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1741                                IS_SWAPFILE(inode) ? false : true);
1742}
1743
1744static int get_data_block_dio(struct inode *inode, sector_t iblock,
1745                        struct buffer_head *bh_result, int create)
1746{
1747        return __get_data_block(inode, iblock, bh_result, create,
1748                                F2FS_GET_BLOCK_DIO, NULL,
1749                                f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1750                                false);
1751}
1752
1753static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1754                        struct buffer_head *bh_result, int create)
1755{
1756        /* Block number less than F2FS MAX BLOCKS */
1757        if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1758                return -EFBIG;
1759
1760        return __get_data_block(inode, iblock, bh_result, create,
1761                                                F2FS_GET_BLOCK_BMAP, NULL,
1762                                                NO_CHECK_TYPE, create);
1763}
1764
1765static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1766{
1767        return (offset >> inode->i_blkbits);
1768}
1769
1770static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1771{
1772        return (blk << inode->i_blkbits);
1773}
1774
1775static int f2fs_xattr_fiemap(struct inode *inode,
1776                                struct fiemap_extent_info *fieinfo)
1777{
1778        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1779        struct page *page;
1780        struct node_info ni;
1781        __u64 phys = 0, len;
1782        __u32 flags;
1783        nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1784        int err = 0;
1785
1786        if (f2fs_has_inline_xattr(inode)) {
1787                int offset;
1788
1789                page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1790                                                inode->i_ino, false);
1791                if (!page)
1792                        return -ENOMEM;
1793
1794                err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1795                if (err) {
1796                        f2fs_put_page(page, 1);
1797                        return err;
1798                }
1799
1800                phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1801                offset = offsetof(struct f2fs_inode, i_addr) +
1802                                        sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1803                                        get_inline_xattr_addrs(inode));
1804
1805                phys += offset;
1806                len = inline_xattr_size(inode);
1807
1808                f2fs_put_page(page, 1);
1809
1810                flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1811
1812                if (!xnid)
1813                        flags |= FIEMAP_EXTENT_LAST;
1814
1815                err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1816                if (err || err == 1)
1817                        return err;
1818        }
1819
1820        if (xnid) {
1821                page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1822                if (!page)
1823                        return -ENOMEM;
1824
1825                err = f2fs_get_node_info(sbi, xnid, &ni);
1826                if (err) {
1827                        f2fs_put_page(page, 1);
1828                        return err;
1829                }
1830
1831                phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1832                len = inode->i_sb->s_blocksize;
1833
1834                f2fs_put_page(page, 1);
1835
1836                flags = FIEMAP_EXTENT_LAST;
1837        }
1838
1839        if (phys)
1840                err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1841
1842        return (err < 0 ? err : 0);
1843}
1844
1845static loff_t max_inode_blocks(struct inode *inode)
1846{
1847        loff_t result = ADDRS_PER_INODE(inode);
1848        loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1849
1850        /* two direct node blocks */
1851        result += (leaf_count * 2);
1852
1853        /* two indirect node blocks */
1854        leaf_count *= NIDS_PER_BLOCK;
1855        result += (leaf_count * 2);
1856
1857        /* one double indirect node block */
1858        leaf_count *= NIDS_PER_BLOCK;
1859        result += leaf_count;
1860
1861        return result;
1862}
1863
1864int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1865                u64 start, u64 len)
1866{
1867        struct buffer_head map_bh;
1868        sector_t start_blk, last_blk;
1869        pgoff_t next_pgofs;
1870        u64 logical = 0, phys = 0, size = 0;
1871        u32 flags = 0;
1872        int ret = 0;
1873        bool compr_cluster = false;
1874        unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1875
1876        if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1877                ret = f2fs_precache_extents(inode);
1878                if (ret)
1879                        return ret;
1880        }
1881
1882        ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1883        if (ret)
1884                return ret;
1885
1886        inode_lock(inode);
1887
1888        if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1889                ret = f2fs_xattr_fiemap(inode, fieinfo);
1890                goto out;
1891        }
1892
1893        if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1894                ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1895                if (ret != -EAGAIN)
1896                        goto out;
1897        }
1898
1899        if (logical_to_blk(inode, len) == 0)
1900                len = blk_to_logical(inode, 1);
1901
1902        start_blk = logical_to_blk(inode, start);
1903        last_blk = logical_to_blk(inode, start + len - 1);
1904
1905next:
1906        memset(&map_bh, 0, sizeof(struct buffer_head));
1907        map_bh.b_size = len;
1908
1909        if (compr_cluster)
1910                map_bh.b_size = blk_to_logical(inode, cluster_size - 1);
1911
1912        ret = get_data_block(inode, start_blk, &map_bh, 0,
1913                                        F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1914        if (ret)
1915                goto out;
1916
1917        /* HOLE */
1918        if (!buffer_mapped(&map_bh)) {
1919                start_blk = next_pgofs;
1920
1921                if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
1922                                                max_inode_blocks(inode)))
1923                        goto prep_next;
1924
1925                flags |= FIEMAP_EXTENT_LAST;
1926        }
1927
1928        if (size) {
1929                if (IS_ENCRYPTED(inode))
1930                        flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1931
1932                ret = fiemap_fill_next_extent(fieinfo, logical,
1933                                phys, size, flags);
1934                if (ret)
1935                        goto out;
1936                size = 0;
1937        }
1938
1939        if (start_blk > last_blk)
1940                goto out;
1941
1942        if (compr_cluster) {
1943                compr_cluster = false;
1944
1945
1946                logical = blk_to_logical(inode, start_blk - 1);
1947                phys = blk_to_logical(inode, map_bh.b_blocknr);
1948                size = blk_to_logical(inode, cluster_size);
1949
1950                flags |= FIEMAP_EXTENT_ENCODED;
1951
1952                start_blk += cluster_size - 1;
1953
1954                if (start_blk > last_blk)
1955                        goto out;
1956
1957                goto prep_next;
1958        }
1959
1960        if (map_bh.b_blocknr == COMPRESS_ADDR) {
1961                compr_cluster = true;
1962                start_blk++;
1963                goto prep_next;
1964        }
1965
1966        logical = blk_to_logical(inode, start_blk);
1967        phys = blk_to_logical(inode, map_bh.b_blocknr);
1968        size = map_bh.b_size;
1969        flags = 0;
1970        if (buffer_unwritten(&map_bh))
1971                flags = FIEMAP_EXTENT_UNWRITTEN;
1972
1973        start_blk += logical_to_blk(inode, size);
1974
1975prep_next:
1976        cond_resched();
1977        if (fatal_signal_pending(current))
1978                ret = -EINTR;
1979        else
1980                goto next;
1981out:
1982        if (ret == 1)
1983                ret = 0;
1984
1985        inode_unlock(inode);
1986        return ret;
1987}
1988
1989static inline loff_t f2fs_readpage_limit(struct inode *inode)
1990{
1991        if (IS_ENABLED(CONFIG_FS_VERITY) &&
1992            (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
1993                return inode->i_sb->s_maxbytes;
1994
1995        return i_size_read(inode);
1996}
1997
1998static int f2fs_read_single_page(struct inode *inode, struct page *page,
1999                                        unsigned nr_pages,
2000                                        struct f2fs_map_blocks *map,
2001                                        struct bio **bio_ret,
2002                                        sector_t *last_block_in_bio,
2003                                        bool is_readahead)
2004{
2005        struct bio *bio = *bio_ret;
2006        const unsigned blkbits = inode->i_blkbits;
2007        const unsigned blocksize = 1 << blkbits;
2008        sector_t block_in_file;
2009        sector_t last_block;
2010        sector_t last_block_in_file;
2011        sector_t block_nr;
2012        int ret = 0;
2013
2014        block_in_file = (sector_t)page_index(page);
2015        last_block = block_in_file + nr_pages;
2016        last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >>
2017                                                        blkbits;
2018        if (last_block > last_block_in_file)
2019                last_block = last_block_in_file;
2020
2021        /* just zeroing out page which is beyond EOF */
2022        if (block_in_file >= last_block)
2023                goto zero_out;
2024        /*
2025         * Map blocks using the previous result first.
2026         */
2027        if ((map->m_flags & F2FS_MAP_MAPPED) &&
2028                        block_in_file > map->m_lblk &&
2029                        block_in_file < (map->m_lblk + map->m_len))
2030                goto got_it;
2031
2032        /*
2033         * Then do more f2fs_map_blocks() calls until we are
2034         * done with this page.
2035         */
2036        map->m_lblk = block_in_file;
2037        map->m_len = last_block - block_in_file;
2038
2039        ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2040        if (ret)
2041                goto out;
2042got_it:
2043        if ((map->m_flags & F2FS_MAP_MAPPED)) {
2044                block_nr = map->m_pblk + block_in_file - map->m_lblk;
2045                SetPageMappedToDisk(page);
2046
2047                if (!PageUptodate(page) && (!PageSwapCache(page) &&
2048                                        !cleancache_get_page(page))) {
2049                        SetPageUptodate(page);
2050                        goto confused;
2051                }
2052
2053                if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2054                                                DATA_GENERIC_ENHANCE_READ)) {
2055                        ret = -EFSCORRUPTED;
2056                        goto out;
2057                }
2058        } else {
2059zero_out:
2060                zero_user_segment(page, 0, PAGE_SIZE);
2061                if (f2fs_need_verity(inode, page->index) &&
2062                    !fsverity_verify_page(page)) {
2063                        ret = -EIO;
2064                        goto out;
2065                }
2066                if (!PageUptodate(page))
2067                        SetPageUptodate(page);
2068                unlock_page(page);
2069                goto out;
2070        }
2071
2072        /*
2073         * This page will go to BIO.  Do we need to send this
2074         * BIO off first?
2075         */
2076        if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio,
2077                                *last_block_in_bio, block_nr)) {
2078submit_and_realloc:
2079                __submit_bio(F2FS_I_SB(inode), bio, DATA);
2080                bio = NULL;
2081        }
2082        if (bio == NULL) {
2083                bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2084                                is_readahead ? REQ_RAHEAD : 0, page->index,
2085                                false);
2086                if (IS_ERR(bio)) {
2087                        ret = PTR_ERR(bio);
2088                        bio = NULL;
2089                        goto out;
2090                }
2091        }
2092
2093        /*
2094         * If the page is under writeback, we need to wait for
2095         * its completion to see the correct decrypted data.
2096         */
2097        f2fs_wait_on_block_writeback(inode, block_nr);
2098
2099        if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2100                goto submit_and_realloc;
2101
2102        inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2103        f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2104        ClearPageError(page);
2105        *last_block_in_bio = block_nr;
2106        goto out;
2107confused:
2108        if (bio) {
2109                __submit_bio(F2FS_I_SB(inode), bio, DATA);
2110                bio = NULL;
2111        }
2112        unlock_page(page);
2113out:
2114        *bio_ret = bio;
2115        return ret;
2116}
2117
2118#ifdef CONFIG_F2FS_FS_COMPRESSION
2119int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2120                                unsigned nr_pages, sector_t *last_block_in_bio,
2121                                bool is_readahead, bool for_write)
2122{
2123        struct dnode_of_data dn;
2124        struct inode *inode = cc->inode;
2125        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2126        struct bio *bio = *bio_ret;
2127        unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2128        sector_t last_block_in_file;
2129        const unsigned blkbits = inode->i_blkbits;
2130        const unsigned blocksize = 1 << blkbits;
2131        struct decompress_io_ctx *dic = NULL;
2132        int i;
2133        int ret = 0;
2134
2135        f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2136
2137        last_block_in_file = (f2fs_readpage_limit(inode) +
2138                                        blocksize - 1) >> blkbits;
2139
2140        /* get rid of pages beyond EOF */
2141        for (i = 0; i < cc->cluster_size; i++) {
2142                struct page *page = cc->rpages[i];
2143
2144                if (!page)
2145                        continue;
2146                if ((sector_t)page->index >= last_block_in_file) {
2147                        zero_user_segment(page, 0, PAGE_SIZE);
2148                        if (!PageUptodate(page))
2149                                SetPageUptodate(page);
2150                } else if (!PageUptodate(page)) {
2151                        continue;
2152                }
2153                unlock_page(page);
2154                cc->rpages[i] = NULL;
2155                cc->nr_rpages--;
2156        }
2157
2158        /* we are done since all pages are beyond EOF */
2159        if (f2fs_cluster_is_empty(cc))
2160                goto out;
2161
2162        set_new_dnode(&dn, inode, NULL, NULL, 0);
2163        ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2164        if (ret)
2165                goto out;
2166
2167        /* cluster was overwritten as normal cluster */
2168        if (dn.data_blkaddr != COMPRESS_ADDR)
2169                goto out;
2170
2171        for (i = 1; i < cc->cluster_size; i++) {
2172                block_t blkaddr;
2173
2174                blkaddr = data_blkaddr(dn.inode, dn.node_page,
2175                                                dn.ofs_in_node + i);
2176
2177                if (!__is_valid_data_blkaddr(blkaddr))
2178                        break;
2179
2180                if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2181                        ret = -EFAULT;
2182                        goto out_put_dnode;
2183                }
2184                cc->nr_cpages++;
2185        }
2186
2187        /* nothing to decompress */
2188        if (cc->nr_cpages == 0) {
2189                ret = 0;
2190                goto out_put_dnode;
2191        }
2192
2193        dic = f2fs_alloc_dic(cc);
2194        if (IS_ERR(dic)) {
2195                ret = PTR_ERR(dic);
2196                goto out_put_dnode;
2197        }
2198
2199        for (i = 0; i < dic->nr_cpages; i++) {
2200                struct page *page = dic->cpages[i];
2201                block_t blkaddr;
2202                struct bio_post_read_ctx *ctx;
2203
2204                blkaddr = data_blkaddr(dn.inode, dn.node_page,
2205                                                dn.ofs_in_node + i + 1);
2206
2207                if (bio && !page_is_mergeable(sbi, bio,
2208                                        *last_block_in_bio, blkaddr)) {
2209submit_and_realloc:
2210                        __submit_bio(sbi, bio, DATA);
2211                        bio = NULL;
2212                }
2213
2214                if (!bio) {
2215                        bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2216                                        is_readahead ? REQ_RAHEAD : 0,
2217                                        page->index, for_write);
2218                        if (IS_ERR(bio)) {
2219                                ret = PTR_ERR(bio);
2220                                dic->failed = true;
2221                                if (refcount_sub_and_test(dic->nr_cpages - i,
2222                                                        &dic->ref)) {
2223                                        f2fs_decompress_end_io(dic->rpages,
2224                                                        cc->cluster_size, true,
2225                                                        false);
2226                                        f2fs_free_dic(dic);
2227                                }
2228                                f2fs_put_dnode(&dn);
2229                                *bio_ret = NULL;
2230                                return ret;
2231                        }
2232                }
2233
2234                f2fs_wait_on_block_writeback(inode, blkaddr);
2235
2236                if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2237                        goto submit_and_realloc;
2238
2239                /* tag STEP_DECOMPRESS to handle IO in wq */
2240                ctx = bio->bi_private;
2241                if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS)))
2242                        ctx->enabled_steps |= 1 << STEP_DECOMPRESS;
2243
2244                inc_page_count(sbi, F2FS_RD_DATA);
2245                f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2246                f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2247                ClearPageError(page);
2248                *last_block_in_bio = blkaddr;
2249        }
2250
2251        f2fs_put_dnode(&dn);
2252
2253        *bio_ret = bio;
2254        return 0;
2255
2256out_put_dnode:
2257        f2fs_put_dnode(&dn);
2258out:
2259        f2fs_decompress_end_io(cc->rpages, cc->cluster_size, true, false);
2260        *bio_ret = bio;
2261        return ret;
2262}
2263#endif
2264
2265/*
2266 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2267 * Major change was from block_size == page_size in f2fs by default.
2268 *
2269 * Note that the aops->readpages() function is ONLY used for read-ahead. If
2270 * this function ever deviates from doing just read-ahead, it should either
2271 * use ->readpage() or do the necessary surgery to decouple ->readpages()
2272 * from read-ahead.
2273 */
2274static int f2fs_mpage_readpages(struct inode *inode,
2275                struct readahead_control *rac, struct page *page)
2276{
2277        struct bio *bio = NULL;
2278        sector_t last_block_in_bio = 0;
2279        struct f2fs_map_blocks map;
2280#ifdef CONFIG_F2FS_FS_COMPRESSION
2281        struct compress_ctx cc = {
2282                .inode = inode,
2283                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2284                .cluster_size = F2FS_I(inode)->i_cluster_size,
2285                .cluster_idx = NULL_CLUSTER,
2286                .rpages = NULL,
2287                .cpages = NULL,
2288                .nr_rpages = 0,
2289                .nr_cpages = 0,
2290        };
2291#endif
2292        unsigned nr_pages = rac ? readahead_count(rac) : 1;
2293        unsigned max_nr_pages = nr_pages;
2294        int ret = 0;
2295
2296        map.m_pblk = 0;
2297        map.m_lblk = 0;
2298        map.m_len = 0;
2299        map.m_flags = 0;
2300        map.m_next_pgofs = NULL;
2301        map.m_next_extent = NULL;
2302        map.m_seg_type = NO_CHECK_TYPE;
2303        map.m_may_create = false;
2304
2305        for (; nr_pages; nr_pages--) {
2306                if (rac) {
2307                        page = readahead_page(rac);
2308                        prefetchw(&page->flags);
2309                }
2310
2311#ifdef CONFIG_F2FS_FS_COMPRESSION
2312                if (f2fs_compressed_file(inode)) {
2313                        /* there are remained comressed pages, submit them */
2314                        if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2315                                ret = f2fs_read_multi_pages(&cc, &bio,
2316                                                        max_nr_pages,
2317                                                        &last_block_in_bio,
2318                                                        rac != NULL, false);
2319                                f2fs_destroy_compress_ctx(&cc);
2320                                if (ret)
2321                                        goto set_error_page;
2322                        }
2323                        ret = f2fs_is_compressed_cluster(inode, page->index);
2324                        if (ret < 0)
2325                                goto set_error_page;
2326                        else if (!ret)
2327                                goto read_single_page;
2328
2329                        ret = f2fs_init_compress_ctx(&cc);
2330                        if (ret)
2331                                goto set_error_page;
2332
2333                        f2fs_compress_ctx_add_page(&cc, page);
2334
2335                        goto next_page;
2336                }
2337read_single_page:
2338#endif
2339
2340                ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2341                                        &bio, &last_block_in_bio, rac);
2342                if (ret) {
2343#ifdef CONFIG_F2FS_FS_COMPRESSION
2344set_error_page:
2345#endif
2346                        SetPageError(page);
2347                        zero_user_segment(page, 0, PAGE_SIZE);
2348                        unlock_page(page);
2349                }
2350#ifdef CONFIG_F2FS_FS_COMPRESSION
2351next_page:
2352#endif
2353                if (rac)
2354                        put_page(page);
2355
2356#ifdef CONFIG_F2FS_FS_COMPRESSION
2357                if (f2fs_compressed_file(inode)) {
2358                        /* last page */
2359                        if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2360                                ret = f2fs_read_multi_pages(&cc, &bio,
2361                                                        max_nr_pages,
2362                                                        &last_block_in_bio,
2363                                                        rac != NULL, false);
2364                                f2fs_destroy_compress_ctx(&cc);
2365                        }
2366                }
2367#endif
2368        }
2369        if (bio)
2370                __submit_bio(F2FS_I_SB(inode), bio, DATA);
2371        return ret;
2372}
2373
2374static int f2fs_read_data_page(struct file *file, struct page *page)
2375{
2376        struct inode *inode = page_file_mapping(page)->host;
2377        int ret = -EAGAIN;
2378
2379        trace_f2fs_readpage(page, DATA);
2380
2381        if (!f2fs_is_compress_backend_ready(inode)) {
2382                unlock_page(page);
2383                return -EOPNOTSUPP;
2384        }
2385
2386        /* If the file has inline data, try to read it directly */
2387        if (f2fs_has_inline_data(inode))
2388                ret = f2fs_read_inline_data(inode, page);
2389        if (ret == -EAGAIN)
2390                ret = f2fs_mpage_readpages(inode, NULL, page);
2391        return ret;
2392}
2393
2394static void f2fs_readahead(struct readahead_control *rac)
2395{
2396        struct inode *inode = rac->mapping->host;
2397
2398        trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2399
2400        if (!f2fs_is_compress_backend_ready(inode))
2401                return;
2402
2403        /* If the file has inline data, skip readpages */
2404        if (f2fs_has_inline_data(inode))
2405                return;
2406
2407        f2fs_mpage_readpages(inode, rac, NULL);
2408}
2409
2410int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2411{
2412        struct inode *inode = fio->page->mapping->host;
2413        struct page *mpage, *page;
2414        gfp_t gfp_flags = GFP_NOFS;
2415
2416        if (!f2fs_encrypted_file(inode))
2417                return 0;
2418
2419        page = fio->compressed_page ? fio->compressed_page : fio->page;
2420
2421        /* wait for GCed page writeback via META_MAPPING */
2422        f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2423
2424retry_encrypt:
2425        fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2426                                        PAGE_SIZE, 0, gfp_flags);
2427        if (IS_ERR(fio->encrypted_page)) {
2428                /* flush pending IOs and wait for a while in the ENOMEM case */
2429                if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2430                        f2fs_flush_merged_writes(fio->sbi);
2431                        congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2432                        gfp_flags |= __GFP_NOFAIL;
2433                        goto retry_encrypt;
2434                }
2435                return PTR_ERR(fio->encrypted_page);
2436        }
2437
2438        mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2439        if (mpage) {
2440                if (PageUptodate(mpage))
2441                        memcpy(page_address(mpage),
2442                                page_address(fio->encrypted_page), PAGE_SIZE);
2443                f2fs_put_page(mpage, 1);
2444        }
2445        return 0;
2446}
2447
2448static inline bool check_inplace_update_policy(struct inode *inode,
2449                                struct f2fs_io_info *fio)
2450{
2451        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2452        unsigned int policy = SM_I(sbi)->ipu_policy;
2453
2454        if (policy & (0x1 << F2FS_IPU_FORCE))
2455                return true;
2456        if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2457                return true;
2458        if (policy & (0x1 << F2FS_IPU_UTIL) &&
2459                        utilization(sbi) > SM_I(sbi)->min_ipu_util)
2460                return true;
2461        if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2462                        utilization(sbi) > SM_I(sbi)->min_ipu_util)
2463                return true;
2464
2465        /*
2466         * IPU for rewrite async pages
2467         */
2468        if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2469                        fio && fio->op == REQ_OP_WRITE &&
2470                        !(fio->op_flags & REQ_SYNC) &&
2471                        !IS_ENCRYPTED(inode))
2472                return true;
2473
2474        /* this is only set during fdatasync */
2475        if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2476                        is_inode_flag_set(inode, FI_NEED_IPU))
2477                return true;
2478
2479        if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2480                        !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2481                return true;
2482
2483        return false;
2484}
2485
2486bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2487{
2488        if (f2fs_is_pinned_file(inode))
2489                return true;
2490
2491        /* if this is cold file, we should overwrite to avoid fragmentation */
2492        if (file_is_cold(inode))
2493                return true;
2494
2495        return check_inplace_update_policy(inode, fio);
2496}
2497
2498bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2499{
2500        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2501
2502        if (f2fs_lfs_mode(sbi))
2503                return true;
2504        if (S_ISDIR(inode->i_mode))
2505                return true;
2506        if (IS_NOQUOTA(inode))
2507                return true;
2508        if (f2fs_is_atomic_file(inode))
2509                return true;
2510        if (fio) {
2511                if (is_cold_data(fio->page))
2512                        return true;
2513                if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
2514                        return true;
2515                if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2516                        f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2517                        return true;
2518        }
2519        return false;
2520}
2521
2522static inline bool need_inplace_update(struct f2fs_io_info *fio)
2523{
2524        struct inode *inode = fio->page->mapping->host;
2525
2526        if (f2fs_should_update_outplace(inode, fio))
2527                return false;
2528
2529        return f2fs_should_update_inplace(inode, fio);
2530}
2531
2532int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2533{
2534        struct page *page = fio->page;
2535        struct inode *inode = page->mapping->host;
2536        struct dnode_of_data dn;
2537        struct extent_info ei = {0,0,0};
2538        struct node_info ni;
2539        bool ipu_force = false;
2540        int err = 0;
2541
2542        set_new_dnode(&dn, inode, NULL, NULL, 0);
2543        if (need_inplace_update(fio) &&
2544                        f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2545                fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2546
2547                if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2548                                                DATA_GENERIC_ENHANCE))
2549                        return -EFSCORRUPTED;
2550
2551                ipu_force = true;
2552                fio->need_lock = LOCK_DONE;
2553                goto got_it;
2554        }
2555
2556        /* Deadlock due to between page->lock and f2fs_lock_op */
2557        if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2558                return -EAGAIN;
2559
2560        err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2561        if (err)
2562                goto out;
2563
2564        fio->old_blkaddr = dn.data_blkaddr;
2565
2566        /* This page is already truncated */
2567        if (fio->old_blkaddr == NULL_ADDR) {
2568                ClearPageUptodate(page);
2569                clear_cold_data(page);
2570                goto out_writepage;
2571        }
2572got_it:
2573        if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2574                !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2575                                                DATA_GENERIC_ENHANCE)) {
2576                err = -EFSCORRUPTED;
2577                goto out_writepage;
2578        }
2579        /*
2580         * If current allocation needs SSR,
2581         * it had better in-place writes for updated data.
2582         */
2583        if (ipu_force ||
2584                (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2585                                        need_inplace_update(fio))) {
2586                err = f2fs_encrypt_one_page(fio);
2587                if (err)
2588                        goto out_writepage;
2589
2590                set_page_writeback(page);
2591                ClearPageError(page);
2592                f2fs_put_dnode(&dn);
2593                if (fio->need_lock == LOCK_REQ)
2594                        f2fs_unlock_op(fio->sbi);
2595                err = f2fs_inplace_write_data(fio);
2596                if (err) {
2597                        if (f2fs_encrypted_file(inode))
2598                                fscrypt_finalize_bounce_page(&fio->encrypted_page);
2599                        if (PageWriteback(page))
2600                                end_page_writeback(page);
2601                } else {
2602                        set_inode_flag(inode, FI_UPDATE_WRITE);
2603                }
2604                trace_f2fs_do_write_data_page(fio->page, IPU);
2605                return err;
2606        }
2607
2608        if (fio->need_lock == LOCK_RETRY) {
2609                if (!f2fs_trylock_op(fio->sbi)) {
2610                        err = -EAGAIN;
2611                        goto out_writepage;
2612                }
2613                fio->need_lock = LOCK_REQ;
2614        }
2615
2616        err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2617        if (err)
2618                goto out_writepage;
2619
2620        fio->version = ni.version;
2621
2622        err = f2fs_encrypt_one_page(fio);
2623        if (err)
2624                goto out_writepage;
2625
2626        set_page_writeback(page);
2627        ClearPageError(page);
2628
2629        if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2630                f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2631
2632        /* LFS mode write path */
2633        f2fs_outplace_write_data(&dn, fio);
2634        trace_f2fs_do_write_data_page(page, OPU);
2635        set_inode_flag(inode, FI_APPEND_WRITE);
2636        if (page->index == 0)
2637                set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2638out_writepage:
2639        f2fs_put_dnode(&dn);
2640out:
2641        if (fio->need_lock == LOCK_REQ)
2642                f2fs_unlock_op(fio->sbi);
2643        return err;
2644}
2645
2646int f2fs_write_single_data_page(struct page *page, int *submitted,
2647                                struct bio **bio,
2648                                sector_t *last_block,
2649                                struct writeback_control *wbc,
2650                                enum iostat_type io_type,
2651                                int compr_blocks)
2652{
2653        struct inode *inode = page->mapping->host;
2654        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2655        loff_t i_size = i_size_read(inode);
2656        const pgoff_t end_index = ((unsigned long long)i_size)
2657                                                        >> PAGE_SHIFT;
2658        loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2659        unsigned offset = 0;
2660        bool need_balance_fs = false;
2661        int err = 0;
2662        struct f2fs_io_info fio = {
2663                .sbi = sbi,
2664                .ino = inode->i_ino,
2665                .type = DATA,
2666                .op = REQ_OP_WRITE,
2667                .op_flags = wbc_to_write_flags(wbc),
2668                .old_blkaddr = NULL_ADDR,
2669                .page = page,
2670                .encrypted_page = NULL,
2671                .submitted = false,
2672                .compr_blocks = compr_blocks,
2673                .need_lock = LOCK_RETRY,
2674                .io_type = io_type,
2675                .io_wbc = wbc,
2676                .bio = bio,
2677                .last_block = last_block,
2678        };
2679
2680        trace_f2fs_writepage(page, DATA);
2681
2682        /* we should bypass data pages to proceed the kworkder jobs */
2683        if (unlikely(f2fs_cp_error(sbi))) {
2684                mapping_set_error(page->mapping, -EIO);
2685                /*
2686                 * don't drop any dirty dentry pages for keeping lastest
2687                 * directory structure.
2688                 */
2689                if (S_ISDIR(inode->i_mode))
2690                        goto redirty_out;
2691                goto out;
2692        }
2693
2694        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2695                goto redirty_out;
2696
2697        if (page->index < end_index ||
2698                        f2fs_verity_in_progress(inode) ||
2699                        compr_blocks)
2700                goto write;
2701
2702        /*
2703         * If the offset is out-of-range of file size,
2704         * this page does not have to be written to disk.
2705         */
2706        offset = i_size & (PAGE_SIZE - 1);
2707        if ((page->index >= end_index + 1) || !offset)
2708                goto out;
2709
2710        zero_user_segment(page, offset, PAGE_SIZE);
2711write:
2712        if (f2fs_is_drop_cache(inode))
2713                goto out;
2714        /* we should not write 0'th page having journal header */
2715        if (f2fs_is_volatile_file(inode) && (!page->index ||
2716                        (!wbc->for_reclaim &&
2717                        f2fs_available_free_memory(sbi, BASE_CHECK))))
2718                goto redirty_out;
2719
2720        /* Dentry/quota blocks are controlled by checkpoint */
2721        if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2722                fio.need_lock = LOCK_DONE;
2723                err = f2fs_do_write_data_page(&fio);
2724                goto done;
2725        }
2726
2727        if (!wbc->for_reclaim)
2728                need_balance_fs = true;
2729        else if (has_not_enough_free_secs(sbi, 0, 0))
2730                goto redirty_out;
2731        else
2732                set_inode_flag(inode, FI_HOT_DATA);
2733
2734        err = -EAGAIN;
2735        if (f2fs_has_inline_data(inode)) {
2736                err = f2fs_write_inline_data(inode, page);
2737                if (!err)
2738                        goto out;
2739        }
2740
2741        if (err == -EAGAIN) {
2742                err = f2fs_do_write_data_page(&fio);
2743                if (err == -EAGAIN) {
2744                        fio.need_lock = LOCK_REQ;
2745                        err = f2fs_do_write_data_page(&fio);
2746                }
2747        }
2748
2749        if (err) {
2750                file_set_keep_isize(inode);
2751        } else {
2752                spin_lock(&F2FS_I(inode)->i_size_lock);
2753                if (F2FS_I(inode)->last_disk_size < psize)
2754                        F2FS_I(inode)->last_disk_size = psize;
2755                spin_unlock(&F2FS_I(inode)->i_size_lock);
2756        }
2757
2758done:
2759        if (err && err != -ENOENT)
2760                goto redirty_out;
2761
2762out:
2763        inode_dec_dirty_pages(inode);
2764        if (err) {
2765                ClearPageUptodate(page);
2766                clear_cold_data(page);
2767        }
2768
2769        if (wbc->for_reclaim) {
2770                f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2771                clear_inode_flag(inode, FI_HOT_DATA);
2772                f2fs_remove_dirty_inode(inode);
2773                submitted = NULL;
2774        }
2775        unlock_page(page);
2776        if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2777                                        !F2FS_I(inode)->cp_task)
2778                f2fs_balance_fs(sbi, need_balance_fs);
2779
2780        if (unlikely(f2fs_cp_error(sbi))) {
2781                f2fs_submit_merged_write(sbi, DATA);
2782                f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2783                submitted = NULL;
2784        }
2785
2786        if (submitted)
2787                *submitted = fio.submitted ? 1 : 0;
2788
2789        return 0;
2790
2791redirty_out:
2792        redirty_page_for_writepage(wbc, page);
2793        /*
2794         * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2795         * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2796         * file_write_and_wait_range() will see EIO error, which is critical
2797         * to return value of fsync() followed by atomic_write failure to user.
2798         */
2799        if (!err || wbc->for_reclaim)
2800                return AOP_WRITEPAGE_ACTIVATE;
2801        unlock_page(page);
2802        return err;
2803}
2804
2805static int f2fs_write_data_page(struct page *page,
2806                                        struct writeback_control *wbc)
2807{
2808#ifdef CONFIG_F2FS_FS_COMPRESSION
2809        struct inode *inode = page->mapping->host;
2810
2811        if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2812                goto out;
2813
2814        if (f2fs_compressed_file(inode)) {
2815                if (f2fs_is_compressed_cluster(inode, page->index)) {
2816                        redirty_page_for_writepage(wbc, page);
2817                        return AOP_WRITEPAGE_ACTIVATE;
2818                }
2819        }
2820out:
2821#endif
2822
2823        return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2824                                                wbc, FS_DATA_IO, 0);
2825}
2826
2827/*
2828 * This function was copied from write_cche_pages from mm/page-writeback.c.
2829 * The major change is making write step of cold data page separately from
2830 * warm/hot data page.
2831 */
2832static int f2fs_write_cache_pages(struct address_space *mapping,
2833                                        struct writeback_control *wbc,
2834                                        enum iostat_type io_type)
2835{
2836        int ret = 0;
2837        int done = 0, retry = 0;
2838        struct pagevec pvec;
2839        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2840        struct bio *bio = NULL;
2841        sector_t last_block;
2842#ifdef CONFIG_F2FS_FS_COMPRESSION
2843        struct inode *inode = mapping->host;
2844        struct compress_ctx cc = {
2845                .inode = inode,
2846                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2847                .cluster_size = F2FS_I(inode)->i_cluster_size,
2848                .cluster_idx = NULL_CLUSTER,
2849                .rpages = NULL,
2850                .nr_rpages = 0,
2851                .cpages = NULL,
2852                .rbuf = NULL,
2853                .cbuf = NULL,
2854                .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2855                .private = NULL,
2856        };
2857#endif
2858        int nr_pages;
2859        pgoff_t uninitialized_var(writeback_index);
2860        pgoff_t index;
2861        pgoff_t end;            /* Inclusive */
2862        pgoff_t done_index;
2863        int range_whole = 0;
2864        xa_mark_t tag;
2865        int nwritten = 0;
2866        int submitted = 0;
2867        int i;
2868
2869        pagevec_init(&pvec);
2870
2871        if (get_dirty_pages(mapping->host) <=
2872                                SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2873                set_inode_flag(mapping->host, FI_HOT_DATA);
2874        else
2875                clear_inode_flag(mapping->host, FI_HOT_DATA);
2876
2877        if (wbc->range_cyclic) {
2878                writeback_index = mapping->writeback_index; /* prev offset */
2879                index = writeback_index;
2880                end = -1;
2881        } else {
2882                index = wbc->range_start >> PAGE_SHIFT;
2883                end = wbc->range_end >> PAGE_SHIFT;
2884                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2885                        range_whole = 1;
2886        }
2887        if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2888                tag = PAGECACHE_TAG_TOWRITE;
2889        else
2890                tag = PAGECACHE_TAG_DIRTY;
2891retry:
2892        retry = 0;
2893        if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2894                tag_pages_for_writeback(mapping, index, end);
2895        done_index = index;
2896        while (!done && !retry && (index <= end)) {
2897                nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2898                                tag);
2899                if (nr_pages == 0)
2900                        break;
2901
2902                for (i = 0; i < nr_pages; i++) {
2903                        struct page *page = pvec.pages[i];
2904                        bool need_readd;
2905readd:
2906                        need_readd = false;
2907#ifdef CONFIG_F2FS_FS_COMPRESSION
2908                        if (f2fs_compressed_file(inode)) {
2909                                ret = f2fs_init_compress_ctx(&cc);
2910                                if (ret) {
2911                                        done = 1;
2912                                        break;
2913                                }
2914
2915                                if (!f2fs_cluster_can_merge_page(&cc,
2916                                                                page->index)) {
2917                                        ret = f2fs_write_multi_pages(&cc,
2918                                                &submitted, wbc, io_type);
2919                                        if (!ret)
2920                                                need_readd = true;
2921                                        goto result;
2922                                }
2923
2924                                if (unlikely(f2fs_cp_error(sbi)))
2925                                        goto lock_page;
2926
2927                                if (f2fs_cluster_is_empty(&cc)) {
2928                                        void *fsdata = NULL;
2929                                        struct page *pagep;
2930                                        int ret2;
2931
2932                                        ret2 = f2fs_prepare_compress_overwrite(
2933                                                        inode, &pagep,
2934                                                        page->index, &fsdata);
2935                                        if (ret2 < 0) {
2936                                                ret = ret2;
2937                                                done = 1;
2938                                                break;
2939                                        } else if (ret2 &&
2940                                                !f2fs_compress_write_end(inode,
2941                                                                fsdata, page->index,
2942                                                                1)) {
2943                                                retry = 1;
2944                                                break;
2945                                        }
2946                                } else {
2947                                        goto lock_page;
2948                                }
2949                        }
2950#endif
2951                        /* give a priority to WB_SYNC threads */
2952                        if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2953                                        wbc->sync_mode == WB_SYNC_NONE) {
2954                                done = 1;
2955                                break;
2956                        }
2957#ifdef CONFIG_F2FS_FS_COMPRESSION
2958lock_page:
2959#endif
2960                        done_index = page->index;
2961retry_write:
2962                        lock_page(page);
2963
2964                        if (unlikely(page->mapping != mapping)) {
2965continue_unlock:
2966                                unlock_page(page);
2967                                continue;
2968                        }
2969
2970                        if (!PageDirty(page)) {
2971                                /* someone wrote it for us */
2972                                goto continue_unlock;
2973                        }
2974
2975                        if (PageWriteback(page)) {
2976                                if (wbc->sync_mode != WB_SYNC_NONE)
2977                                        f2fs_wait_on_page_writeback(page,
2978                                                        DATA, true, true);
2979                                else
2980                                        goto continue_unlock;
2981                        }
2982
2983                        if (!clear_page_dirty_for_io(page))
2984                                goto continue_unlock;
2985
2986#ifdef CONFIG_F2FS_FS_COMPRESSION
2987                        if (f2fs_compressed_file(inode)) {
2988                                get_page(page);
2989                                f2fs_compress_ctx_add_page(&cc, page);
2990                                continue;
2991                        }
2992#endif
2993                        ret = f2fs_write_single_data_page(page, &submitted,
2994                                        &bio, &last_block, wbc, io_type, 0);
2995                        if (ret == AOP_WRITEPAGE_ACTIVATE)
2996                                unlock_page(page);
2997#ifdef CONFIG_F2FS_FS_COMPRESSION
2998result:
2999#endif
3000                        nwritten += submitted;
3001                        wbc->nr_to_write -= submitted;
3002
3003                        if (unlikely(ret)) {
3004                                /*
3005                                 * keep nr_to_write, since vfs uses this to
3006                                 * get # of written pages.
3007                                 */
3008                                if (ret == AOP_WRITEPAGE_ACTIVATE) {
3009                                        ret = 0;
3010                                        goto next;
3011                                } else if (ret == -EAGAIN) {
3012                                        ret = 0;
3013                                        if (wbc->sync_mode == WB_SYNC_ALL) {
3014                                                cond_resched();
3015                                                congestion_wait(BLK_RW_ASYNC,
3016                                                        DEFAULT_IO_TIMEOUT);
3017                                                goto retry_write;
3018                                        }
3019                                        goto next;
3020                                }
3021                                done_index = page->index + 1;
3022                                done = 1;
3023                                break;
3024                        }
3025
3026                        if (wbc->nr_to_write <= 0 &&
3027                                        wbc->sync_mode == WB_SYNC_NONE) {
3028                                done = 1;
3029                                break;
3030                        }
3031next:
3032                        if (need_readd)
3033                                goto readd;
3034                }
3035                pagevec_release(&pvec);
3036                cond_resched();
3037        }
3038#ifdef CONFIG_F2FS_FS_COMPRESSION
3039        /* flush remained pages in compress cluster */
3040        if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3041                ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3042                nwritten += submitted;
3043                wbc->nr_to_write -= submitted;
3044                if (ret) {
3045                        done = 1;
3046                        retry = 0;
3047                }
3048        }
3049#endif
3050        if (retry) {
3051                index = 0;
3052                end = -1;
3053                goto retry;
3054        }
3055        if (wbc->range_cyclic && !done)
3056                done_index = 0;
3057        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3058                mapping->writeback_index = done_index;
3059
3060        if (nwritten)
3061                f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3062                                                                NULL, 0, DATA);
3063        /* submit cached bio of IPU write */
3064        if (bio)
3065                f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3066
3067        return ret;
3068}
3069
3070static inline bool __should_serialize_io(struct inode *inode,
3071                                        struct writeback_control *wbc)
3072{
3073        /* to avoid deadlock in path of data flush */
3074        if (F2FS_I(inode)->cp_task)
3075                return false;
3076
3077        if (!S_ISREG(inode->i_mode))
3078                return false;
3079        if (IS_NOQUOTA(inode))
3080                return false;
3081
3082        if (f2fs_compressed_file(inode))
3083                return true;
3084        if (wbc->sync_mode != WB_SYNC_ALL)
3085                return true;
3086        if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3087                return true;
3088        return false;
3089}
3090
3091static int __f2fs_write_data_pages(struct address_space *mapping,
3092                                                struct writeback_control *wbc,
3093                                                enum iostat_type io_type)
3094{
3095        struct inode *inode = mapping->host;
3096        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3097        struct blk_plug plug;
3098        int ret;
3099        bool locked = false;
3100
3101        /* deal with chardevs and other special file */
3102        if (!mapping->a_ops->writepage)
3103                return 0;
3104
3105        /* skip writing if there is no dirty page in this inode */
3106        if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3107                return 0;
3108
3109        /* during POR, we don't need to trigger writepage at all. */
3110        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3111                goto skip_write;
3112
3113        if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3114                        wbc->sync_mode == WB_SYNC_NONE &&
3115                        get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3116                        f2fs_available_free_memory(sbi, DIRTY_DENTS))
3117                goto skip_write;
3118
3119        /* skip writing during file defragment */
3120        if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3121                goto skip_write;
3122
3123        trace_f2fs_writepages(mapping->host, wbc, DATA);
3124
3125        /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3126        if (wbc->sync_mode == WB_SYNC_ALL)
3127                atomic_inc(&sbi->wb_sync_req[DATA]);
3128        else if (atomic_read(&sbi->wb_sync_req[DATA]))
3129                goto skip_write;
3130
3131        if (__should_serialize_io(inode, wbc)) {
3132                mutex_lock(&sbi->writepages);
3133                locked = true;
3134        }
3135
3136        blk_start_plug(&plug);
3137        ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3138        blk_finish_plug(&plug);
3139
3140        if (locked)
3141                mutex_unlock(&sbi->writepages);
3142
3143        if (wbc->sync_mode == WB_SYNC_ALL)
3144                atomic_dec(&sbi->wb_sync_req[DATA]);
3145        /*
3146         * if some pages were truncated, we cannot guarantee its mapping->host
3147         * to detect pending bios.
3148         */
3149
3150        f2fs_remove_dirty_inode(inode);
3151        return ret;
3152
3153skip_write:
3154        wbc->pages_skipped += get_dirty_pages(inode);
3155        trace_f2fs_writepages(mapping->host, wbc, DATA);
3156        return 0;
3157}
3158
3159static int f2fs_write_data_pages(struct address_space *mapping,
3160                            struct writeback_control *wbc)
3161{
3162        struct inode *inode = mapping->host;
3163
3164        return __f2fs_write_data_pages(mapping, wbc,
3165                        F2FS_I(inode)->cp_task == current ?
3166                        FS_CP_DATA_IO : FS_DATA_IO);
3167}
3168
3169static void f2fs_write_failed(struct address_space *mapping, loff_t to)
3170{
3171        struct inode *inode = mapping->host;
3172        loff_t i_size = i_size_read(inode);
3173
3174        if (IS_NOQUOTA(inode))
3175                return;
3176
3177        /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3178        if (to > i_size && !f2fs_verity_in_progress(inode)) {
3179                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3180                down_write(&F2FS_I(inode)->i_mmap_sem);
3181
3182                truncate_pagecache(inode, i_size);
3183                f2fs_truncate_blocks(inode, i_size, true);
3184
3185                up_write(&F2FS_I(inode)->i_mmap_sem);
3186                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3187        }
3188}
3189
3190static int prepare_write_begin(struct f2fs_sb_info *sbi,
3191                        struct page *page, loff_t pos, unsigned len,
3192                        block_t *blk_addr, bool *node_changed)
3193{
3194        struct inode *inode = page->mapping->host;
3195        pgoff_t index = page->index;
3196        struct dnode_of_data dn;
3197        struct page *ipage;
3198        bool locked = false;
3199        struct extent_info ei = {0,0,0};
3200        int err = 0;
3201        int flag;
3202
3203        /*
3204         * we already allocated all the blocks, so we don't need to get
3205         * the block addresses when there is no need to fill the page.
3206         */
3207        if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3208            !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3209            !f2fs_verity_in_progress(inode))
3210                return 0;
3211
3212        /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3213        if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3214                flag = F2FS_GET_BLOCK_DEFAULT;
3215        else
3216                flag = F2FS_GET_BLOCK_PRE_AIO;
3217
3218        if (f2fs_has_inline_data(inode) ||
3219                        (pos & PAGE_MASK) >= i_size_read(inode)) {
3220                __do_map_lock(sbi, flag, true);
3221                locked = true;
3222        }
3223
3224restart:
3225        /* check inline_data */
3226        ipage = f2fs_get_node_page(sbi, inode->i_ino);
3227        if (IS_ERR(ipage)) {
3228                err = PTR_ERR(ipage);
3229                goto unlock_out;
3230        }
3231
3232        set_new_dnode(&dn, inode, ipage, ipage, 0);
3233
3234        if (f2fs_has_inline_data(inode)) {
3235                if (pos + len <= MAX_INLINE_DATA(inode)) {
3236                        f2fs_do_read_inline_data(page, ipage);
3237                        set_inode_flag(inode, FI_DATA_EXIST);
3238                        if (inode->i_nlink)
3239                                set_inline_node(ipage);
3240                } else {
3241                        err = f2fs_convert_inline_page(&dn, page);
3242                        if (err)
3243                                goto out;
3244                        if (dn.data_blkaddr == NULL_ADDR)
3245                                err = f2fs_get_block(&dn, index);
3246                }
3247        } else if (locked) {
3248                err = f2fs_get_block(&dn, index);
3249        } else {
3250                if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3251                        dn.data_blkaddr = ei.blk + index - ei.fofs;
3252                } else {
3253                        /* hole case */
3254                        err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3255                        if (err || dn.data_blkaddr == NULL_ADDR) {
3256                                f2fs_put_dnode(&dn);
3257                                __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3258                                                                true);
3259                                WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3260                                locked = true;
3261                                goto restart;
3262                        }
3263                }
3264        }
3265
3266        /* convert_inline_page can make node_changed */
3267        *blk_addr = dn.data_blkaddr;
3268        *node_changed = dn.node_changed;
3269out:
3270        f2fs_put_dnode(&dn);
3271unlock_out:
3272        if (locked)
3273                __do_map_lock(sbi, flag, false);
3274        return err;
3275}
3276
3277static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3278                loff_t pos, unsigned len, unsigned flags,
3279                struct page **pagep, void **fsdata)
3280{
3281        struct inode *inode = mapping->host;
3282        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3283        struct page *page = NULL;
3284        pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3285        bool need_balance = false, drop_atomic = false;
3286        block_t blkaddr = NULL_ADDR;
3287        int err = 0;
3288
3289        trace_f2fs_write_begin(inode, pos, len, flags);
3290
3291        if (!f2fs_is_checkpoint_ready(sbi)) {
3292                err = -ENOSPC;
3293                goto fail;
3294        }
3295
3296        if ((f2fs_is_atomic_file(inode) &&
3297                        !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3298                        is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3299                err = -ENOMEM;
3300                drop_atomic = true;
3301                goto fail;
3302        }
3303
3304        /*
3305         * We should check this at this moment to avoid deadlock on inode page
3306         * and #0 page. The locking rule for inline_data conversion should be:
3307         * lock_page(page #0) -> lock_page(inode_page)
3308         */
3309        if (index != 0) {
3310                err = f2fs_convert_inline_inode(inode);
3311                if (err)
3312                        goto fail;
3313        }
3314
3315#ifdef CONFIG_F2FS_FS_COMPRESSION
3316        if (f2fs_compressed_file(inode)) {
3317                int ret;
3318
3319                *fsdata = NULL;
3320
3321                ret = f2fs_prepare_compress_overwrite(inode, pagep,
3322                                                        index, fsdata);
3323                if (ret < 0) {
3324                        err = ret;
3325                        goto fail;
3326                } else if (ret) {
3327                        return 0;
3328                }
3329        }
3330#endif
3331
3332repeat:
3333        /*
3334         * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3335         * wait_for_stable_page. Will wait that below with our IO control.
3336         */
3337        page = f2fs_pagecache_get_page(mapping, index,
3338                                FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3339        if (!page) {
3340                err = -ENOMEM;
3341                goto fail;
3342        }
3343
3344        /* TODO: cluster can be compressed due to race with .writepage */
3345
3346        *pagep = page;
3347
3348        err = prepare_write_begin(sbi, page, pos, len,
3349                                        &blkaddr, &need_balance);
3350        if (err)
3351                goto fail;
3352
3353        if (need_balance && !IS_NOQUOTA(inode) &&
3354                        has_not_enough_free_secs(sbi, 0, 0)) {
3355                unlock_page(page);
3356                f2fs_balance_fs(sbi, true);
3357                lock_page(page);
3358                if (page->mapping != mapping) {
3359                        /* The page got truncated from under us */
3360                        f2fs_put_page(page, 1);
3361                        goto repeat;
3362                }
3363        }
3364
3365        f2fs_wait_on_page_writeback(page, DATA, false, true);
3366
3367        if (len == PAGE_SIZE || PageUptodate(page))
3368                return 0;
3369
3370        if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3371            !f2fs_verity_in_progress(inode)) {
3372                zero_user_segment(page, len, PAGE_SIZE);
3373                return 0;
3374        }
3375
3376        if (blkaddr == NEW_ADDR) {
3377                zero_user_segment(page, 0, PAGE_SIZE);
3378                SetPageUptodate(page);
3379        } else {
3380                if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3381                                DATA_GENERIC_ENHANCE_READ)) {
3382                        err = -EFSCORRUPTED;
3383                        goto fail;
3384                }
3385                err = f2fs_submit_page_read(inode, page, blkaddr, true);
3386                if (err)
3387                        goto fail;
3388
3389                lock_page(page);
3390                if (unlikely(page->mapping != mapping)) {
3391                        f2fs_put_page(page, 1);
3392                        goto repeat;
3393                }
3394                if (unlikely(!PageUptodate(page))) {
3395                        err = -EIO;
3396                        goto fail;
3397                }
3398        }
3399        return 0;
3400
3401fail:
3402        f2fs_put_page(page, 1);
3403        f2fs_write_failed(mapping, pos + len);
3404        if (drop_atomic)
3405                f2fs_drop_inmem_pages_all(sbi, false);
3406        return err;
3407}
3408
3409static int f2fs_write_end(struct file *file,
3410                        struct address_space *mapping,
3411                        loff_t pos, unsigned len, unsigned copied,
3412                        struct page *page, void *fsdata)
3413{
3414        struct inode *inode = page->mapping->host;
3415
3416        trace_f2fs_write_end(inode, pos, len, copied);
3417
3418        /*
3419         * This should be come from len == PAGE_SIZE, and we expect copied
3420         * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3421         * let generic_perform_write() try to copy data again through copied=0.
3422         */
3423        if (!PageUptodate(page)) {
3424                if (unlikely(copied != len))
3425                        copied = 0;
3426                else
3427                        SetPageUptodate(page);
3428        }
3429
3430#ifdef CONFIG_F2FS_FS_COMPRESSION
3431        /* overwrite compressed file */
3432        if (f2fs_compressed_file(inode) && fsdata) {
3433                f2fs_compress_write_end(inode, fsdata, page->index, copied);
3434                f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3435                return copied;
3436        }
3437#endif
3438
3439        if (!copied)
3440                goto unlock_out;
3441
3442        set_page_dirty(page);
3443
3444        if (pos + copied > i_size_read(inode) &&
3445            !f2fs_verity_in_progress(inode))
3446                f2fs_i_size_write(inode, pos + copied);
3447unlock_out:
3448        f2fs_put_page(page, 1);
3449        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3450        return copied;
3451}
3452
3453static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3454                           loff_t offset)
3455{
3456        unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3457        unsigned blkbits = i_blkbits;
3458        unsigned blocksize_mask = (1 << blkbits) - 1;
3459        unsigned long align = offset | iov_iter_alignment(iter);
3460        struct block_device *bdev = inode->i_sb->s_bdev;
3461
3462        if (align & blocksize_mask) {
3463                if (bdev)
3464                        blkbits = blksize_bits(bdev_logical_block_size(bdev));
3465                blocksize_mask = (1 << blkbits) - 1;
3466                if (align & blocksize_mask)
3467                        return -EINVAL;
3468                return 1;
3469        }
3470        return 0;
3471}
3472
3473static void f2fs_dio_end_io(struct bio *bio)
3474{
3475        struct f2fs_private_dio *dio = bio->bi_private;
3476
3477        dec_page_count(F2FS_I_SB(dio->inode),
3478                        dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3479
3480        bio->bi_private = dio->orig_private;
3481        bio->bi_end_io = dio->orig_end_io;
3482
3483        kvfree(dio);
3484
3485        bio_endio(bio);
3486}
3487
3488static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3489                                                        loff_t file_offset)
3490{
3491        struct f2fs_private_dio *dio;
3492        bool write = (bio_op(bio) == REQ_OP_WRITE);
3493
3494        dio = f2fs_kzalloc(F2FS_I_SB(inode),
3495                        sizeof(struct f2fs_private_dio), GFP_NOFS);
3496        if (!dio)
3497                goto out;
3498
3499        dio->inode = inode;
3500        dio->orig_end_io = bio->bi_end_io;
3501        dio->orig_private = bio->bi_private;
3502        dio->write = write;
3503
3504        bio->bi_end_io = f2fs_dio_end_io;
3505        bio->bi_private = dio;
3506
3507        inc_page_count(F2FS_I_SB(inode),
3508                        write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3509
3510        submit_bio(bio);
3511        return;
3512out:
3513        bio->bi_status = BLK_STS_IOERR;
3514        bio_endio(bio);
3515}
3516
3517static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3518{
3519        struct address_space *mapping = iocb->ki_filp->f_mapping;
3520        struct inode *inode = mapping->host;
3521        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3522        struct f2fs_inode_info *fi = F2FS_I(inode);
3523        size_t count = iov_iter_count(iter);
3524        loff_t offset = iocb->ki_pos;
3525        int rw = iov_iter_rw(iter);
3526        int err;
3527        enum rw_hint hint = iocb->ki_hint;
3528        int whint_mode = F2FS_OPTION(sbi).whint_mode;
3529        bool do_opu;
3530
3531        err = check_direct_IO(inode, iter, offset);
3532        if (err)
3533                return err < 0 ? err : 0;
3534
3535        if (f2fs_force_buffered_io(inode, iocb, iter))
3536                return 0;
3537
3538        do_opu = allow_outplace_dio(inode, iocb, iter);
3539
3540        trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3541
3542        if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3543                iocb->ki_hint = WRITE_LIFE_NOT_SET;
3544
3545        if (iocb->ki_flags & IOCB_NOWAIT) {
3546                if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3547                        iocb->ki_hint = hint;
3548                        err = -EAGAIN;
3549                        goto out;
3550                }
3551                if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3552                        up_read(&fi->i_gc_rwsem[rw]);
3553                        iocb->ki_hint = hint;
3554                        err = -EAGAIN;
3555                        goto out;
3556                }
3557        } else {
3558                down_read(&fi->i_gc_rwsem[rw]);
3559                if (do_opu)
3560                        down_read(&fi->i_gc_rwsem[READ]);
3561        }
3562
3563        err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3564                        iter, rw == WRITE ? get_data_block_dio_write :
3565                        get_data_block_dio, NULL, f2fs_dio_submit_bio,
3566                        rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3567                        DIO_SKIP_HOLES);
3568
3569        if (do_opu)
3570                up_read(&fi->i_gc_rwsem[READ]);
3571
3572        up_read(&fi->i_gc_rwsem[rw]);
3573
3574        if (rw == WRITE) {
3575                if (whint_mode == WHINT_MODE_OFF)
3576                        iocb->ki_hint = hint;
3577                if (err > 0) {
3578                        f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3579                                                                        err);
3580                        if (!do_opu)
3581                                set_inode_flag(inode, FI_UPDATE_WRITE);
3582                } else if (err < 0) {
3583                        f2fs_write_failed(mapping, offset + count);
3584                }
3585        } else {
3586                if (err > 0)
3587                        f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3588        }
3589
3590out:
3591        trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3592
3593        return err;
3594}
3595
3596void f2fs_invalidate_page(struct page *page, unsigned int offset,
3597                                                        unsigned int length)
3598{
3599        struct inode *inode = page->mapping->host;
3600        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3601
3602        if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3603                (offset % PAGE_SIZE || length != PAGE_SIZE))
3604                return;
3605
3606        if (PageDirty(page)) {
3607                if (inode->i_ino == F2FS_META_INO(sbi)) {
3608                        dec_page_count(sbi, F2FS_DIRTY_META);
3609                } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3610                        dec_page_count(sbi, F2FS_DIRTY_NODES);
3611                } else {
3612                        inode_dec_dirty_pages(inode);
3613                        f2fs_remove_dirty_inode(inode);
3614                }
3615        }
3616
3617        clear_cold_data(page);
3618
3619        if (IS_ATOMIC_WRITTEN_PAGE(page))
3620                return f2fs_drop_inmem_page(inode, page);
3621
3622        f2fs_clear_page_private(page);
3623}
3624
3625int f2fs_release_page(struct page *page, gfp_t wait)
3626{
3627        /* If this is dirty page, keep PagePrivate */
3628        if (PageDirty(page))
3629                return 0;
3630
3631        /* This is atomic written page, keep Private */
3632        if (IS_ATOMIC_WRITTEN_PAGE(page))
3633                return 0;
3634
3635        clear_cold_data(page);
3636        f2fs_clear_page_private(page);
3637        return 1;
3638}
3639
3640static int f2fs_set_data_page_dirty(struct page *page)
3641{
3642        struct inode *inode = page_file_mapping(page)->host;
3643
3644        trace_f2fs_set_page_dirty(page, DATA);
3645
3646        if (!PageUptodate(page))
3647                SetPageUptodate(page);
3648        if (PageSwapCache(page))
3649                return __set_page_dirty_nobuffers(page);
3650
3651        if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3652                if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
3653                        f2fs_register_inmem_page(inode, page);
3654                        return 1;
3655                }
3656                /*
3657                 * Previously, this page has been registered, we just
3658                 * return here.
3659                 */
3660                return 0;
3661        }
3662
3663        if (!PageDirty(page)) {
3664                __set_page_dirty_nobuffers(page);
3665                f2fs_update_dirty_page(inode, page);
3666                return 1;
3667        }
3668        return 0;
3669}
3670
3671
3672static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3673{
3674#ifdef CONFIG_F2FS_FS_COMPRESSION
3675        struct dnode_of_data dn;
3676        sector_t start_idx, blknr = 0;
3677        int ret;
3678
3679        start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3680
3681        set_new_dnode(&dn, inode, NULL, NULL, 0);
3682        ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3683        if (ret)
3684                return 0;
3685
3686        if (dn.data_blkaddr != COMPRESS_ADDR) {
3687                dn.ofs_in_node += block - start_idx;
3688                blknr = f2fs_data_blkaddr(&dn);
3689                if (!__is_valid_data_blkaddr(blknr))
3690                        blknr = 0;
3691        }
3692
3693        f2fs_put_dnode(&dn);
3694
3695        return blknr;
3696#else
3697        return -EOPNOTSUPP;
3698#endif
3699}
3700
3701
3702static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3703{
3704        struct inode *inode = mapping->host;
3705
3706        if (f2fs_has_inline_data(inode))
3707                return 0;
3708
3709        /* make sure allocating whole blocks */
3710        if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3711                filemap_write_and_wait(mapping);
3712
3713        if (f2fs_compressed_file(inode))
3714                return f2fs_bmap_compress(inode, block);
3715
3716        return generic_block_bmap(mapping, block, get_data_block_bmap);
3717}
3718
3719#ifdef CONFIG_MIGRATION
3720#include <linux/migrate.h>
3721
3722int f2fs_migrate_page(struct address_space *mapping,
3723                struct page *newpage, struct page *page, enum migrate_mode mode)
3724{
3725        int rc, extra_count;
3726        struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3727        bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
3728
3729        BUG_ON(PageWriteback(page));
3730
3731        /* migrating an atomic written page is safe with the inmem_lock hold */
3732        if (atomic_written) {
3733                if (mode != MIGRATE_SYNC)
3734                        return -EBUSY;
3735                if (!mutex_trylock(&fi->inmem_lock))
3736                        return -EAGAIN;
3737        }
3738
3739        /* one extra reference was held for atomic_write page */
3740        extra_count = atomic_written ? 1 : 0;
3741        rc = migrate_page_move_mapping(mapping, newpage,
3742                                page, extra_count);
3743        if (rc != MIGRATEPAGE_SUCCESS) {
3744                if (atomic_written)
3745                        mutex_unlock(&fi->inmem_lock);
3746                return rc;
3747        }
3748
3749        if (atomic_written) {
3750                struct inmem_pages *cur;
3751                list_for_each_entry(cur, &fi->inmem_pages, list)
3752                        if (cur->page == page) {
3753                                cur->page = newpage;
3754                                break;
3755                        }
3756                mutex_unlock(&fi->inmem_lock);
3757                put_page(page);
3758                get_page(newpage);
3759        }
3760
3761        if (PagePrivate(page)) {
3762                f2fs_set_page_private(newpage, page_private(page));
3763                f2fs_clear_page_private(page);
3764        }
3765
3766        if (mode != MIGRATE_SYNC_NO_COPY)
3767                migrate_page_copy(newpage, page);
3768        else
3769                migrate_page_states(newpage, page);
3770
3771        return MIGRATEPAGE_SUCCESS;
3772}
3773#endif
3774
3775#ifdef CONFIG_SWAP
3776/* Copied from generic_swapfile_activate() to check any holes */
3777static int check_swap_activate(struct swap_info_struct *sis,
3778                                struct file *swap_file, sector_t *span)
3779{
3780        struct address_space *mapping = swap_file->f_mapping;
3781        struct inode *inode = mapping->host;
3782        unsigned blocks_per_page;
3783        unsigned long page_no;
3784        unsigned blkbits;
3785        sector_t probe_block;
3786        sector_t last_block;
3787        sector_t lowest_block = -1;
3788        sector_t highest_block = 0;
3789        int nr_extents = 0;
3790        int ret;
3791
3792        blkbits = inode->i_blkbits;
3793        blocks_per_page = PAGE_SIZE >> blkbits;
3794
3795        /*
3796         * Map all the blocks into the extent list.  This code doesn't try
3797         * to be very smart.
3798         */
3799        probe_block = 0;
3800        page_no = 0;
3801        last_block = i_size_read(inode) >> blkbits;
3802        while ((probe_block + blocks_per_page) <= last_block &&
3803                        page_no < sis->max) {
3804                unsigned block_in_page;
3805                sector_t first_block;
3806                sector_t block = 0;
3807                int      err = 0;
3808
3809                cond_resched();
3810
3811                block = probe_block;
3812                err = bmap(inode, &block);
3813                if (err || !block)
3814                        goto bad_bmap;
3815                first_block = block;
3816
3817                /*
3818                 * It must be PAGE_SIZE aligned on-disk
3819                 */
3820                if (first_block & (blocks_per_page - 1)) {
3821                        probe_block++;
3822                        goto reprobe;
3823                }
3824
3825                for (block_in_page = 1; block_in_page < blocks_per_page;
3826                                        block_in_page++) {
3827
3828                        block = probe_block + block_in_page;
3829                        err = bmap(inode, &block);
3830
3831                        if (err || !block)
3832                                goto bad_bmap;
3833
3834                        if (block != first_block + block_in_page) {
3835                                /* Discontiguity */
3836                                probe_block++;
3837                                goto reprobe;
3838                        }
3839                }
3840
3841                first_block >>= (PAGE_SHIFT - blkbits);
3842                if (page_no) {  /* exclude the header page */
3843                        if (first_block < lowest_block)
3844                                lowest_block = first_block;
3845                        if (first_block > highest_block)
3846                                highest_block = first_block;
3847                }
3848
3849                /*
3850                 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3851                 */
3852                ret = add_swap_extent(sis, page_no, 1, first_block);
3853                if (ret < 0)
3854                        goto out;
3855                nr_extents += ret;
3856                page_no++;
3857                probe_block += blocks_per_page;
3858reprobe:
3859                continue;
3860        }
3861        ret = nr_extents;
3862        *span = 1 + highest_block - lowest_block;
3863        if (page_no == 0)
3864                page_no = 1;    /* force Empty message */
3865        sis->max = page_no;
3866        sis->pages = page_no - 1;
3867        sis->highest_bit = page_no - 1;
3868out:
3869        return ret;
3870bad_bmap:
3871        pr_err("swapon: swapfile has holes\n");
3872        return -EINVAL;
3873}
3874
3875static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3876                                sector_t *span)
3877{
3878        struct inode *inode = file_inode(file);
3879        int ret;
3880
3881        if (!S_ISREG(inode->i_mode))
3882                return -EINVAL;
3883
3884        if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3885                return -EROFS;
3886
3887        ret = f2fs_convert_inline_inode(inode);
3888        if (ret)
3889                return ret;
3890
3891        if (f2fs_disable_compressed_file(inode))
3892                return -EINVAL;
3893
3894        ret = check_swap_activate(sis, file, span);
3895        if (ret < 0)
3896                return ret;
3897
3898        set_inode_flag(inode, FI_PIN_FILE);
3899        f2fs_precache_extents(inode);
3900        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3901        return ret;
3902}
3903
3904static void f2fs_swap_deactivate(struct file *file)
3905{
3906        struct inode *inode = file_inode(file);
3907
3908        clear_inode_flag(inode, FI_PIN_FILE);
3909}
3910#else
3911static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3912                                sector_t *span)
3913{
3914        return -EOPNOTSUPP;
3915}
3916
3917static void f2fs_swap_deactivate(struct file *file)
3918{
3919}
3920#endif
3921
3922const struct address_space_operations f2fs_dblock_aops = {
3923        .readpage       = f2fs_read_data_page,
3924        .readahead      = f2fs_readahead,
3925        .writepage      = f2fs_write_data_page,
3926        .writepages     = f2fs_write_data_pages,
3927        .write_begin    = f2fs_write_begin,
3928        .write_end      = f2fs_write_end,
3929        .set_page_dirty = f2fs_set_data_page_dirty,
3930        .invalidatepage = f2fs_invalidate_page,
3931        .releasepage    = f2fs_release_page,
3932        .direct_IO      = f2fs_direct_IO,
3933        .bmap           = f2fs_bmap,
3934        .swap_activate  = f2fs_swap_activate,
3935        .swap_deactivate = f2fs_swap_deactivate,
3936#ifdef CONFIG_MIGRATION
3937        .migratepage    = f2fs_migrate_page,
3938#endif
3939};
3940
3941void f2fs_clear_page_cache_dirty_tag(struct page *page)
3942{
3943        struct address_space *mapping = page_mapping(page);
3944        unsigned long flags;
3945
3946        xa_lock_irqsave(&mapping->i_pages, flags);
3947        __xa_clear_mark(&mapping->i_pages, page_index(page),
3948                                                PAGECACHE_TAG_DIRTY);
3949        xa_unlock_irqrestore(&mapping->i_pages, flags);
3950}
3951
3952int __init f2fs_init_post_read_processing(void)
3953{
3954        bio_post_read_ctx_cache =
3955                kmem_cache_create("f2fs_bio_post_read_ctx",
3956                                  sizeof(struct bio_post_read_ctx), 0, 0, NULL);
3957        if (!bio_post_read_ctx_cache)
3958                goto fail;
3959        bio_post_read_ctx_pool =
3960                mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
3961                                         bio_post_read_ctx_cache);
3962        if (!bio_post_read_ctx_pool)
3963                goto fail_free_cache;
3964        return 0;
3965
3966fail_free_cache:
3967        kmem_cache_destroy(bio_post_read_ctx_cache);
3968fail:
3969        return -ENOMEM;
3970}
3971
3972void f2fs_destroy_post_read_processing(void)
3973{
3974        mempool_destroy(bio_post_read_ctx_pool);
3975        kmem_cache_destroy(bio_post_read_ctx_cache);
3976}
3977
3978int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
3979{
3980        if (!f2fs_sb_has_encrypt(sbi) &&
3981                !f2fs_sb_has_verity(sbi) &&
3982                !f2fs_sb_has_compression(sbi))
3983                return 0;
3984
3985        sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
3986                                                 WQ_UNBOUND | WQ_HIGHPRI,
3987                                                 num_online_cpus());
3988        if (!sbi->post_read_wq)
3989                return -ENOMEM;
3990        return 0;
3991}
3992
3993void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
3994{
3995        if (sbi->post_read_wq)
3996                destroy_workqueue(sbi->post_read_wq);
3997}
3998
3999int __init f2fs_init_bio_entry_cache(void)
4000{
4001        bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4002                        sizeof(struct bio_entry));
4003        if (!bio_entry_slab)
4004                return -ENOMEM;
4005        return 0;
4006}
4007
4008void f2fs_destroy_bio_entry_cache(void)
4009{
4010        kmem_cache_destroy(bio_entry_slab);
4011}
4012