linux/fs/f2fs/data.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/data.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/buffer_head.h>
  11#include <linux/mpage.h>
  12#include <linux/writeback.h>
  13#include <linux/backing-dev.h>
  14#include <linux/pagevec.h>
  15#include <linux/blkdev.h>
  16#include <linux/bio.h>
  17#include <linux/blk-crypto.h>
  18#include <linux/swap.h>
  19#include <linux/prefetch.h>
  20#include <linux/uio.h>
  21#include <linux/cleancache.h>
  22#include <linux/sched/signal.h>
  23#include <linux/fiemap.h>
  24
  25#include "f2fs.h"
  26#include "node.h"
  27#include "segment.h"
  28#include "trace.h"
  29#include <trace/events/f2fs.h>
  30
  31#define NUM_PREALLOC_POST_READ_CTXS     128
  32
  33static struct kmem_cache *bio_post_read_ctx_cache;
  34static struct kmem_cache *bio_entry_slab;
  35static mempool_t *bio_post_read_ctx_pool;
  36static struct bio_set f2fs_bioset;
  37
  38#define F2FS_BIO_POOL_SIZE      NR_CURSEG_TYPE
  39
  40int __init f2fs_init_bioset(void)
  41{
  42        if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
  43                                        0, BIOSET_NEED_BVECS))
  44                return -ENOMEM;
  45        return 0;
  46}
  47
  48void f2fs_destroy_bioset(void)
  49{
  50        bioset_exit(&f2fs_bioset);
  51}
  52
  53static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
  54                                                unsigned int nr_iovecs)
  55{
  56        return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
  57}
  58
  59struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio)
  60{
  61        if (noio) {
  62                /* No failure on bio allocation */
  63                return __f2fs_bio_alloc(GFP_NOIO, npages);
  64        }
  65
  66        if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
  67                f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
  68                return NULL;
  69        }
  70
  71        return __f2fs_bio_alloc(GFP_KERNEL, npages);
  72}
  73
  74static bool __is_cp_guaranteed(struct page *page)
  75{
  76        struct address_space *mapping = page->mapping;
  77        struct inode *inode;
  78        struct f2fs_sb_info *sbi;
  79
  80        if (!mapping)
  81                return false;
  82
  83        if (f2fs_is_compressed_page(page))
  84                return false;
  85
  86        inode = mapping->host;
  87        sbi = F2FS_I_SB(inode);
  88
  89        if (inode->i_ino == F2FS_META_INO(sbi) ||
  90                        inode->i_ino == F2FS_NODE_INO(sbi) ||
  91                        S_ISDIR(inode->i_mode) ||
  92                        (S_ISREG(inode->i_mode) &&
  93                        (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
  94                        is_cold_data(page))
  95                return true;
  96        return false;
  97}
  98
  99static enum count_type __read_io_type(struct page *page)
 100{
 101        struct address_space *mapping = page_file_mapping(page);
 102
 103        if (mapping) {
 104                struct inode *inode = mapping->host;
 105                struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 106
 107                if (inode->i_ino == F2FS_META_INO(sbi))
 108                        return F2FS_RD_META;
 109
 110                if (inode->i_ino == F2FS_NODE_INO(sbi))
 111                        return F2FS_RD_NODE;
 112        }
 113        return F2FS_RD_DATA;
 114}
 115
 116/* postprocessing steps for read bios */
 117enum bio_post_read_step {
 118        STEP_DECRYPT,
 119        STEP_DECOMPRESS_NOWQ,           /* handle normal cluster data inplace */
 120        STEP_DECOMPRESS,                /* handle compressed cluster data in workqueue */
 121        STEP_VERITY,
 122};
 123
 124struct bio_post_read_ctx {
 125        struct bio *bio;
 126        struct f2fs_sb_info *sbi;
 127        struct work_struct work;
 128        unsigned int enabled_steps;
 129};
 130
 131static void __read_end_io(struct bio *bio, bool compr, bool verity)
 132{
 133        struct page *page;
 134        struct bio_vec *bv;
 135        struct bvec_iter_all iter_all;
 136
 137        bio_for_each_segment_all(bv, bio, iter_all) {
 138                page = bv->bv_page;
 139
 140#ifdef CONFIG_F2FS_FS_COMPRESSION
 141                if (compr && f2fs_is_compressed_page(page)) {
 142                        f2fs_decompress_pages(bio, page, verity);
 143                        continue;
 144                }
 145                if (verity)
 146                        continue;
 147#endif
 148
 149                /* PG_error was set if any post_read step failed */
 150                if (bio->bi_status || PageError(page)) {
 151                        ClearPageUptodate(page);
 152                        /* will re-read again later */
 153                        ClearPageError(page);
 154                } else {
 155                        SetPageUptodate(page);
 156                }
 157                dec_page_count(F2FS_P_SB(page), __read_io_type(page));
 158                unlock_page(page);
 159        }
 160}
 161
 162static void f2fs_release_read_bio(struct bio *bio);
 163static void __f2fs_read_end_io(struct bio *bio, bool compr, bool verity)
 164{
 165        if (!compr)
 166                __read_end_io(bio, false, verity);
 167        f2fs_release_read_bio(bio);
 168}
 169
 170static void f2fs_decompress_bio(struct bio *bio, bool verity)
 171{
 172        __read_end_io(bio, true, verity);
 173}
 174
 175static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
 176
 177static void f2fs_decrypt_work(struct bio_post_read_ctx *ctx)
 178{
 179        fscrypt_decrypt_bio(ctx->bio);
 180}
 181
 182static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
 183{
 184        f2fs_decompress_bio(ctx->bio, ctx->enabled_steps & (1 << STEP_VERITY));
 185}
 186
 187#ifdef CONFIG_F2FS_FS_COMPRESSION
 188static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
 189{
 190        f2fs_decompress_end_io(rpages, cluster_size, false, true);
 191}
 192
 193static void f2fs_verify_bio(struct bio *bio)
 194{
 195        struct bio_vec *bv;
 196        struct bvec_iter_all iter_all;
 197
 198        bio_for_each_segment_all(bv, bio, iter_all) {
 199                struct page *page = bv->bv_page;
 200                struct decompress_io_ctx *dic;
 201
 202                dic = (struct decompress_io_ctx *)page_private(page);
 203
 204                if (dic) {
 205                        if (refcount_dec_not_one(&dic->ref))
 206                                continue;
 207                        f2fs_verify_pages(dic->rpages,
 208                                                dic->cluster_size);
 209                        f2fs_free_dic(dic);
 210                        continue;
 211                }
 212
 213                if (bio->bi_status || PageError(page))
 214                        goto clear_uptodate;
 215
 216                if (fsverity_verify_page(page)) {
 217                        SetPageUptodate(page);
 218                        goto unlock;
 219                }
 220clear_uptodate:
 221                ClearPageUptodate(page);
 222                ClearPageError(page);
 223unlock:
 224                dec_page_count(F2FS_P_SB(page), __read_io_type(page));
 225                unlock_page(page);
 226        }
 227}
 228#endif
 229
 230static void f2fs_verity_work(struct work_struct *work)
 231{
 232        struct bio_post_read_ctx *ctx =
 233                container_of(work, struct bio_post_read_ctx, work);
 234        struct bio *bio = ctx->bio;
 235#ifdef CONFIG_F2FS_FS_COMPRESSION
 236        unsigned int enabled_steps = ctx->enabled_steps;
 237#endif
 238
 239        /*
 240         * fsverity_verify_bio() may call readpages() again, and while verity
 241         * will be disabled for this, decryption may still be needed, resulting
 242         * in another bio_post_read_ctx being allocated.  So to prevent
 243         * deadlocks we need to release the current ctx to the mempool first.
 244         * This assumes that verity is the last post-read step.
 245         */
 246        mempool_free(ctx, bio_post_read_ctx_pool);
 247        bio->bi_private = NULL;
 248
 249#ifdef CONFIG_F2FS_FS_COMPRESSION
 250        /* previous step is decompression */
 251        if (enabled_steps & (1 << STEP_DECOMPRESS)) {
 252                f2fs_verify_bio(bio);
 253                f2fs_release_read_bio(bio);
 254                return;
 255        }
 256#endif
 257
 258        fsverity_verify_bio(bio);
 259        __f2fs_read_end_io(bio, false, false);
 260}
 261
 262static void f2fs_post_read_work(struct work_struct *work)
 263{
 264        struct bio_post_read_ctx *ctx =
 265                container_of(work, struct bio_post_read_ctx, work);
 266
 267        if (ctx->enabled_steps & (1 << STEP_DECRYPT))
 268                f2fs_decrypt_work(ctx);
 269
 270        if (ctx->enabled_steps & (1 << STEP_DECOMPRESS))
 271                f2fs_decompress_work(ctx);
 272
 273        if (ctx->enabled_steps & (1 << STEP_VERITY)) {
 274                INIT_WORK(&ctx->work, f2fs_verity_work);
 275                fsverity_enqueue_verify_work(&ctx->work);
 276                return;
 277        }
 278
 279        __f2fs_read_end_io(ctx->bio,
 280                ctx->enabled_steps & (1 << STEP_DECOMPRESS), false);
 281}
 282
 283static void f2fs_enqueue_post_read_work(struct f2fs_sb_info *sbi,
 284                                                struct work_struct *work)
 285{
 286        queue_work(sbi->post_read_wq, work);
 287}
 288
 289static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
 290{
 291        /*
 292         * We use different work queues for decryption and for verity because
 293         * verity may require reading metadata pages that need decryption, and
 294         * we shouldn't recurse to the same workqueue.
 295         */
 296
 297        if (ctx->enabled_steps & (1 << STEP_DECRYPT) ||
 298                ctx->enabled_steps & (1 << STEP_DECOMPRESS)) {
 299                INIT_WORK(&ctx->work, f2fs_post_read_work);
 300                f2fs_enqueue_post_read_work(ctx->sbi, &ctx->work);
 301                return;
 302        }
 303
 304        if (ctx->enabled_steps & (1 << STEP_VERITY)) {
 305                INIT_WORK(&ctx->work, f2fs_verity_work);
 306                fsverity_enqueue_verify_work(&ctx->work);
 307                return;
 308        }
 309
 310        __f2fs_read_end_io(ctx->bio, false, false);
 311}
 312
 313static bool f2fs_bio_post_read_required(struct bio *bio)
 314{
 315        return bio->bi_private;
 316}
 317
 318static void f2fs_read_end_io(struct bio *bio)
 319{
 320        struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
 321
 322        if (time_to_inject(sbi, FAULT_READ_IO)) {
 323                f2fs_show_injection_info(sbi, FAULT_READ_IO);
 324                bio->bi_status = BLK_STS_IOERR;
 325        }
 326
 327        if (f2fs_bio_post_read_required(bio)) {
 328                struct bio_post_read_ctx *ctx = bio->bi_private;
 329
 330                bio_post_read_processing(ctx);
 331                return;
 332        }
 333
 334        __f2fs_read_end_io(bio, false, false);
 335}
 336
 337static void f2fs_write_end_io(struct bio *bio)
 338{
 339        struct f2fs_sb_info *sbi = bio->bi_private;
 340        struct bio_vec *bvec;
 341        struct bvec_iter_all iter_all;
 342
 343        if (time_to_inject(sbi, FAULT_WRITE_IO)) {
 344                f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
 345                bio->bi_status = BLK_STS_IOERR;
 346        }
 347
 348        bio_for_each_segment_all(bvec, bio, iter_all) {
 349                struct page *page = bvec->bv_page;
 350                enum count_type type = WB_DATA_TYPE(page);
 351
 352                if (IS_DUMMY_WRITTEN_PAGE(page)) {
 353                        set_page_private(page, (unsigned long)NULL);
 354                        ClearPagePrivate(page);
 355                        unlock_page(page);
 356                        mempool_free(page, sbi->write_io_dummy);
 357
 358                        if (unlikely(bio->bi_status))
 359                                f2fs_stop_checkpoint(sbi, true);
 360                        continue;
 361                }
 362
 363                fscrypt_finalize_bounce_page(&page);
 364
 365#ifdef CONFIG_F2FS_FS_COMPRESSION
 366                if (f2fs_is_compressed_page(page)) {
 367                        f2fs_compress_write_end_io(bio, page);
 368                        continue;
 369                }
 370#endif
 371
 372                if (unlikely(bio->bi_status)) {
 373                        mapping_set_error(page->mapping, -EIO);
 374                        if (type == F2FS_WB_CP_DATA)
 375                                f2fs_stop_checkpoint(sbi, true);
 376                }
 377
 378                f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
 379                                        page->index != nid_of_node(page));
 380
 381                dec_page_count(sbi, type);
 382                if (f2fs_in_warm_node_list(sbi, page))
 383                        f2fs_del_fsync_node_entry(sbi, page);
 384                clear_cold_data(page);
 385                end_page_writeback(page);
 386        }
 387        if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
 388                                wq_has_sleeper(&sbi->cp_wait))
 389                wake_up(&sbi->cp_wait);
 390
 391        bio_put(bio);
 392}
 393
 394struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
 395                                block_t blk_addr, struct bio *bio)
 396{
 397        struct block_device *bdev = sbi->sb->s_bdev;
 398        int i;
 399
 400        if (f2fs_is_multi_device(sbi)) {
 401                for (i = 0; i < sbi->s_ndevs; i++) {
 402                        if (FDEV(i).start_blk <= blk_addr &&
 403                            FDEV(i).end_blk >= blk_addr) {
 404                                blk_addr -= FDEV(i).start_blk;
 405                                bdev = FDEV(i).bdev;
 406                                break;
 407                        }
 408                }
 409        }
 410        if (bio) {
 411                bio_set_dev(bio, bdev);
 412                bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
 413        }
 414        return bdev;
 415}
 416
 417int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
 418{
 419        int i;
 420
 421        if (!f2fs_is_multi_device(sbi))
 422                return 0;
 423
 424        for (i = 0; i < sbi->s_ndevs; i++)
 425                if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
 426                        return i;
 427        return 0;
 428}
 429
 430/*
 431 * Return true, if pre_bio's bdev is same as its target device.
 432 */
 433static bool __same_bdev(struct f2fs_sb_info *sbi,
 434                                block_t blk_addr, struct bio *bio)
 435{
 436        struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
 437        return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
 438}
 439
 440static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
 441{
 442        struct f2fs_sb_info *sbi = fio->sbi;
 443        struct bio *bio;
 444
 445        bio = f2fs_bio_alloc(sbi, npages, true);
 446
 447        f2fs_target_device(sbi, fio->new_blkaddr, bio);
 448        if (is_read_io(fio->op)) {
 449                bio->bi_end_io = f2fs_read_end_io;
 450                bio->bi_private = NULL;
 451        } else {
 452                bio->bi_end_io = f2fs_write_end_io;
 453                bio->bi_private = sbi;
 454                bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
 455                                                fio->type, fio->temp);
 456        }
 457        if (fio->io_wbc)
 458                wbc_init_bio(fio->io_wbc, bio);
 459
 460        return bio;
 461}
 462
 463static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
 464                                  pgoff_t first_idx,
 465                                  const struct f2fs_io_info *fio,
 466                                  gfp_t gfp_mask)
 467{
 468        /*
 469         * The f2fs garbage collector sets ->encrypted_page when it wants to
 470         * read/write raw data without encryption.
 471         */
 472        if (!fio || !fio->encrypted_page)
 473                fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
 474}
 475
 476static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
 477                                     pgoff_t next_idx,
 478                                     const struct f2fs_io_info *fio)
 479{
 480        /*
 481         * The f2fs garbage collector sets ->encrypted_page when it wants to
 482         * read/write raw data without encryption.
 483         */
 484        if (fio && fio->encrypted_page)
 485                return !bio_has_crypt_ctx(bio);
 486
 487        return fscrypt_mergeable_bio(bio, inode, next_idx);
 488}
 489
 490static inline void __submit_bio(struct f2fs_sb_info *sbi,
 491                                struct bio *bio, enum page_type type)
 492{
 493        if (!is_read_io(bio_op(bio))) {
 494                unsigned int start;
 495
 496                if (type != DATA && type != NODE)
 497                        goto submit_io;
 498
 499                if (f2fs_lfs_mode(sbi) && current->plug)
 500                        blk_finish_plug(current->plug);
 501
 502                if (F2FS_IO_ALIGNED(sbi))
 503                        goto submit_io;
 504
 505                start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
 506                start %= F2FS_IO_SIZE(sbi);
 507
 508                if (start == 0)
 509                        goto submit_io;
 510
 511                /* fill dummy pages */
 512                for (; start < F2FS_IO_SIZE(sbi); start++) {
 513                        struct page *page =
 514                                mempool_alloc(sbi->write_io_dummy,
 515                                              GFP_NOIO | __GFP_NOFAIL);
 516                        f2fs_bug_on(sbi, !page);
 517
 518                        zero_user_segment(page, 0, PAGE_SIZE);
 519                        SetPagePrivate(page);
 520                        set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
 521                        lock_page(page);
 522                        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
 523                                f2fs_bug_on(sbi, 1);
 524                }
 525                /*
 526                 * In the NODE case, we lose next block address chain. So, we
 527                 * need to do checkpoint in f2fs_sync_file.
 528                 */
 529                if (type == NODE)
 530                        set_sbi_flag(sbi, SBI_NEED_CP);
 531        }
 532submit_io:
 533        if (is_read_io(bio_op(bio)))
 534                trace_f2fs_submit_read_bio(sbi->sb, type, bio);
 535        else
 536                trace_f2fs_submit_write_bio(sbi->sb, type, bio);
 537        submit_bio(bio);
 538}
 539
 540void f2fs_submit_bio(struct f2fs_sb_info *sbi,
 541                                struct bio *bio, enum page_type type)
 542{
 543        __submit_bio(sbi, bio, type);
 544}
 545
 546static void __attach_io_flag(struct f2fs_io_info *fio)
 547{
 548        struct f2fs_sb_info *sbi = fio->sbi;
 549        unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
 550        unsigned int io_flag, fua_flag, meta_flag;
 551
 552        if (fio->type == DATA)
 553                io_flag = sbi->data_io_flag;
 554        else if (fio->type == NODE)
 555                io_flag = sbi->node_io_flag;
 556        else
 557                return;
 558
 559        fua_flag = io_flag & temp_mask;
 560        meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
 561
 562        /*
 563         * data/node io flag bits per temp:
 564         *      REQ_META     |      REQ_FUA      |
 565         *    5 |    4 |   3 |    2 |    1 |   0 |
 566         * Cold | Warm | Hot | Cold | Warm | Hot |
 567         */
 568        if ((1 << fio->temp) & meta_flag)
 569                fio->op_flags |= REQ_META;
 570        if ((1 << fio->temp) & fua_flag)
 571                fio->op_flags |= REQ_FUA;
 572}
 573
 574static void __submit_merged_bio(struct f2fs_bio_info *io)
 575{
 576        struct f2fs_io_info *fio = &io->fio;
 577
 578        if (!io->bio)
 579                return;
 580
 581        __attach_io_flag(fio);
 582        bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
 583
 584        if (is_read_io(fio->op))
 585                trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
 586        else
 587                trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
 588
 589        __submit_bio(io->sbi, io->bio, fio->type);
 590        io->bio = NULL;
 591}
 592
 593static bool __has_merged_page(struct bio *bio, struct inode *inode,
 594                                                struct page *page, nid_t ino)
 595{
 596        struct bio_vec *bvec;
 597        struct bvec_iter_all iter_all;
 598
 599        if (!bio)
 600                return false;
 601
 602        if (!inode && !page && !ino)
 603                return true;
 604
 605        bio_for_each_segment_all(bvec, bio, iter_all) {
 606                struct page *target = bvec->bv_page;
 607
 608                if (fscrypt_is_bounce_page(target)) {
 609                        target = fscrypt_pagecache_page(target);
 610                        if (IS_ERR(target))
 611                                continue;
 612                }
 613                if (f2fs_is_compressed_page(target)) {
 614                        target = f2fs_compress_control_page(target);
 615                        if (IS_ERR(target))
 616                                continue;
 617                }
 618
 619                if (inode && inode == target->mapping->host)
 620                        return true;
 621                if (page && page == target)
 622                        return true;
 623                if (ino && ino == ino_of_node(target))
 624                        return true;
 625        }
 626
 627        return false;
 628}
 629
 630static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
 631                                enum page_type type, enum temp_type temp)
 632{
 633        enum page_type btype = PAGE_TYPE_OF_BIO(type);
 634        struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
 635
 636        down_write(&io->io_rwsem);
 637
 638        /* change META to META_FLUSH in the checkpoint procedure */
 639        if (type >= META_FLUSH) {
 640                io->fio.type = META_FLUSH;
 641                io->fio.op = REQ_OP_WRITE;
 642                io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
 643                if (!test_opt(sbi, NOBARRIER))
 644                        io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
 645        }
 646        __submit_merged_bio(io);
 647        up_write(&io->io_rwsem);
 648}
 649
 650static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
 651                                struct inode *inode, struct page *page,
 652                                nid_t ino, enum page_type type, bool force)
 653{
 654        enum temp_type temp;
 655        bool ret = true;
 656
 657        for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
 658                if (!force)     {
 659                        enum page_type btype = PAGE_TYPE_OF_BIO(type);
 660                        struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
 661
 662                        down_read(&io->io_rwsem);
 663                        ret = __has_merged_page(io->bio, inode, page, ino);
 664                        up_read(&io->io_rwsem);
 665                }
 666                if (ret)
 667                        __f2fs_submit_merged_write(sbi, type, temp);
 668
 669                /* TODO: use HOT temp only for meta pages now. */
 670                if (type >= META)
 671                        break;
 672        }
 673}
 674
 675void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
 676{
 677        __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
 678}
 679
 680void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
 681                                struct inode *inode, struct page *page,
 682                                nid_t ino, enum page_type type)
 683{
 684        __submit_merged_write_cond(sbi, inode, page, ino, type, false);
 685}
 686
 687void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
 688{
 689        f2fs_submit_merged_write(sbi, DATA);
 690        f2fs_submit_merged_write(sbi, NODE);
 691        f2fs_submit_merged_write(sbi, META);
 692}
 693
 694/*
 695 * Fill the locked page with data located in the block address.
 696 * A caller needs to unlock the page on failure.
 697 */
 698int f2fs_submit_page_bio(struct f2fs_io_info *fio)
 699{
 700        struct bio *bio;
 701        struct page *page = fio->encrypted_page ?
 702                        fio->encrypted_page : fio->page;
 703
 704        if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
 705                        fio->is_por ? META_POR : (__is_meta_io(fio) ?
 706                        META_GENERIC : DATA_GENERIC_ENHANCE)))
 707                return -EFSCORRUPTED;
 708
 709        trace_f2fs_submit_page_bio(page, fio);
 710        f2fs_trace_ios(fio, 0);
 711
 712        /* Allocate a new bio */
 713        bio = __bio_alloc(fio, 1);
 714
 715        f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
 716                               fio->page->index, fio, GFP_NOIO);
 717
 718        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 719                bio_put(bio);
 720                return -EFAULT;
 721        }
 722
 723        if (fio->io_wbc && !is_read_io(fio->op))
 724                wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
 725
 726        __attach_io_flag(fio);
 727        bio_set_op_attrs(bio, fio->op, fio->op_flags);
 728
 729        inc_page_count(fio->sbi, is_read_io(fio->op) ?
 730                        __read_io_type(page): WB_DATA_TYPE(fio->page));
 731
 732        __submit_bio(fio->sbi, bio, fio->type);
 733        return 0;
 734}
 735
 736static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
 737                                block_t last_blkaddr, block_t cur_blkaddr)
 738{
 739        if (last_blkaddr + 1 != cur_blkaddr)
 740                return false;
 741        return __same_bdev(sbi, cur_blkaddr, bio);
 742}
 743
 744static bool io_type_is_mergeable(struct f2fs_bio_info *io,
 745                                                struct f2fs_io_info *fio)
 746{
 747        if (io->fio.op != fio->op)
 748                return false;
 749        return io->fio.op_flags == fio->op_flags;
 750}
 751
 752static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
 753                                        struct f2fs_bio_info *io,
 754                                        struct f2fs_io_info *fio,
 755                                        block_t last_blkaddr,
 756                                        block_t cur_blkaddr)
 757{
 758        if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
 759                unsigned int filled_blocks =
 760                                F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
 761                unsigned int io_size = F2FS_IO_SIZE(sbi);
 762                unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
 763
 764                /* IOs in bio is aligned and left space of vectors is not enough */
 765                if (!(filled_blocks % io_size) && left_vecs < io_size)
 766                        return false;
 767        }
 768        if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
 769                return false;
 770        return io_type_is_mergeable(io, fio);
 771}
 772
 773static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
 774                                struct page *page, enum temp_type temp)
 775{
 776        struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
 777        struct bio_entry *be;
 778
 779        be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
 780        be->bio = bio;
 781        bio_get(bio);
 782
 783        if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
 784                f2fs_bug_on(sbi, 1);
 785
 786        down_write(&io->bio_list_lock);
 787        list_add_tail(&be->list, &io->bio_list);
 788        up_write(&io->bio_list_lock);
 789}
 790
 791static void del_bio_entry(struct bio_entry *be)
 792{
 793        list_del(&be->list);
 794        kmem_cache_free(bio_entry_slab, be);
 795}
 796
 797static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
 798                                                        struct page *page)
 799{
 800        struct f2fs_sb_info *sbi = fio->sbi;
 801        enum temp_type temp;
 802        bool found = false;
 803        int ret = -EAGAIN;
 804
 805        for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
 806                struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
 807                struct list_head *head = &io->bio_list;
 808                struct bio_entry *be;
 809
 810                down_write(&io->bio_list_lock);
 811                list_for_each_entry(be, head, list) {
 812                        if (be->bio != *bio)
 813                                continue;
 814
 815                        found = true;
 816
 817                        f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
 818                                                            *fio->last_block,
 819                                                            fio->new_blkaddr));
 820                        if (f2fs_crypt_mergeable_bio(*bio,
 821                                        fio->page->mapping->host,
 822                                        fio->page->index, fio) &&
 823                            bio_add_page(*bio, page, PAGE_SIZE, 0) ==
 824                                        PAGE_SIZE) {
 825                                ret = 0;
 826                                break;
 827                        }
 828
 829                        /* page can't be merged into bio; submit the bio */
 830                        del_bio_entry(be);
 831                        __submit_bio(sbi, *bio, DATA);
 832                        break;
 833                }
 834                up_write(&io->bio_list_lock);
 835        }
 836
 837        if (ret) {
 838                bio_put(*bio);
 839                *bio = NULL;
 840        }
 841
 842        return ret;
 843}
 844
 845void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
 846                                        struct bio **bio, struct page *page)
 847{
 848        enum temp_type temp;
 849        bool found = false;
 850        struct bio *target = bio ? *bio : NULL;
 851
 852        for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
 853                struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
 854                struct list_head *head = &io->bio_list;
 855                struct bio_entry *be;
 856
 857                if (list_empty(head))
 858                        continue;
 859
 860                down_read(&io->bio_list_lock);
 861                list_for_each_entry(be, head, list) {
 862                        if (target)
 863                                found = (target == be->bio);
 864                        else
 865                                found = __has_merged_page(be->bio, NULL,
 866                                                                page, 0);
 867                        if (found)
 868                                break;
 869                }
 870                up_read(&io->bio_list_lock);
 871
 872                if (!found)
 873                        continue;
 874
 875                found = false;
 876
 877                down_write(&io->bio_list_lock);
 878                list_for_each_entry(be, head, list) {
 879                        if (target)
 880                                found = (target == be->bio);
 881                        else
 882                                found = __has_merged_page(be->bio, NULL,
 883                                                                page, 0);
 884                        if (found) {
 885                                target = be->bio;
 886                                del_bio_entry(be);
 887                                break;
 888                        }
 889                }
 890                up_write(&io->bio_list_lock);
 891        }
 892
 893        if (found)
 894                __submit_bio(sbi, target, DATA);
 895        if (bio && *bio) {
 896                bio_put(*bio);
 897                *bio = NULL;
 898        }
 899}
 900
 901int f2fs_merge_page_bio(struct f2fs_io_info *fio)
 902{
 903        struct bio *bio = *fio->bio;
 904        struct page *page = fio->encrypted_page ?
 905                        fio->encrypted_page : fio->page;
 906
 907        if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
 908                        __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
 909                return -EFSCORRUPTED;
 910
 911        trace_f2fs_submit_page_bio(page, fio);
 912        f2fs_trace_ios(fio, 0);
 913
 914        if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
 915                                                fio->new_blkaddr))
 916                f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
 917alloc_new:
 918        if (!bio) {
 919                bio = __bio_alloc(fio, BIO_MAX_PAGES);
 920                __attach_io_flag(fio);
 921                f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
 922                                       fio->page->index, fio, GFP_NOIO);
 923                bio_set_op_attrs(bio, fio->op, fio->op_flags);
 924
 925                add_bio_entry(fio->sbi, bio, page, fio->temp);
 926        } else {
 927                if (add_ipu_page(fio, &bio, page))
 928                        goto alloc_new;
 929        }
 930
 931        if (fio->io_wbc)
 932                wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
 933
 934        inc_page_count(fio->sbi, WB_DATA_TYPE(page));
 935
 936        *fio->last_block = fio->new_blkaddr;
 937        *fio->bio = bio;
 938
 939        return 0;
 940}
 941
 942void f2fs_submit_page_write(struct f2fs_io_info *fio)
 943{
 944        struct f2fs_sb_info *sbi = fio->sbi;
 945        enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
 946        struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
 947        struct page *bio_page;
 948
 949        f2fs_bug_on(sbi, is_read_io(fio->op));
 950
 951        down_write(&io->io_rwsem);
 952next:
 953        if (fio->in_list) {
 954                spin_lock(&io->io_lock);
 955                if (list_empty(&io->io_list)) {
 956                        spin_unlock(&io->io_lock);
 957                        goto out;
 958                }
 959                fio = list_first_entry(&io->io_list,
 960                                                struct f2fs_io_info, list);
 961                list_del(&fio->list);
 962                spin_unlock(&io->io_lock);
 963        }
 964
 965        verify_fio_blkaddr(fio);
 966
 967        if (fio->encrypted_page)
 968                bio_page = fio->encrypted_page;
 969        else if (fio->compressed_page)
 970                bio_page = fio->compressed_page;
 971        else
 972                bio_page = fio->page;
 973
 974        /* set submitted = true as a return value */
 975        fio->submitted = true;
 976
 977        inc_page_count(sbi, WB_DATA_TYPE(bio_page));
 978
 979        if (io->bio &&
 980            (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
 981                              fio->new_blkaddr) ||
 982             !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
 983                                       bio_page->index, fio)))
 984                __submit_merged_bio(io);
 985alloc_new:
 986        if (io->bio == NULL) {
 987                if (F2FS_IO_ALIGNED(sbi) &&
 988                                (fio->type == DATA || fio->type == NODE) &&
 989                                fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
 990                        dec_page_count(sbi, WB_DATA_TYPE(bio_page));
 991                        fio->retry = true;
 992                        goto skip;
 993                }
 994                io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
 995                f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
 996                                       bio_page->index, fio, GFP_NOIO);
 997                io->fio = *fio;
 998        }
 999
1000        if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1001                __submit_merged_bio(io);
1002                goto alloc_new;
1003        }
1004
1005        if (fio->io_wbc)
1006                wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
1007
1008        io->last_block_in_bio = fio->new_blkaddr;
1009        f2fs_trace_ios(fio, 0);
1010
1011        trace_f2fs_submit_page_write(fio->page, fio);
1012skip:
1013        if (fio->in_list)
1014                goto next;
1015out:
1016        if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1017                                !f2fs_is_checkpoint_ready(sbi))
1018                __submit_merged_bio(io);
1019        up_write(&io->io_rwsem);
1020}
1021
1022static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
1023{
1024        return fsverity_active(inode) &&
1025               idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
1026}
1027
1028static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1029                                      unsigned nr_pages, unsigned op_flag,
1030                                      pgoff_t first_idx, bool for_write)
1031{
1032        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1033        struct bio *bio;
1034        struct bio_post_read_ctx *ctx;
1035        unsigned int post_read_steps = 0;
1036
1037        bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES),
1038                                                                for_write);
1039        if (!bio)
1040                return ERR_PTR(-ENOMEM);
1041
1042        f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1043
1044        f2fs_target_device(sbi, blkaddr, bio);
1045        bio->bi_end_io = f2fs_read_end_io;
1046        bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
1047
1048        if (fscrypt_inode_uses_fs_layer_crypto(inode))
1049                post_read_steps |= 1 << STEP_DECRYPT;
1050        if (f2fs_compressed_file(inode))
1051                post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
1052        if (f2fs_need_verity(inode, first_idx))
1053                post_read_steps |= 1 << STEP_VERITY;
1054
1055        if (post_read_steps) {
1056                /* Due to the mempool, this never fails. */
1057                ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1058                ctx->bio = bio;
1059                ctx->sbi = sbi;
1060                ctx->enabled_steps = post_read_steps;
1061                bio->bi_private = ctx;
1062        }
1063
1064        return bio;
1065}
1066
1067static void f2fs_release_read_bio(struct bio *bio)
1068{
1069        if (bio->bi_private)
1070                mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1071        bio_put(bio);
1072}
1073
1074/* This can handle encryption stuffs */
1075static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1076                                 block_t blkaddr, int op_flags, bool for_write)
1077{
1078        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1079        struct bio *bio;
1080
1081        bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1082                                        page->index, for_write);
1083        if (IS_ERR(bio))
1084                return PTR_ERR(bio);
1085
1086        /* wait for GCed page writeback via META_MAPPING */
1087        f2fs_wait_on_block_writeback(inode, blkaddr);
1088
1089        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1090                bio_put(bio);
1091                return -EFAULT;
1092        }
1093        ClearPageError(page);
1094        inc_page_count(sbi, F2FS_RD_DATA);
1095        f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1096        __submit_bio(sbi, bio, DATA);
1097        return 0;
1098}
1099
1100static void __set_data_blkaddr(struct dnode_of_data *dn)
1101{
1102        struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1103        __le32 *addr_array;
1104        int base = 0;
1105
1106        if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1107                base = get_extra_isize(dn->inode);
1108
1109        /* Get physical address of data block */
1110        addr_array = blkaddr_in_node(rn);
1111        addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1112}
1113
1114/*
1115 * Lock ordering for the change of data block address:
1116 * ->data_page
1117 *  ->node_page
1118 *    update block addresses in the node page
1119 */
1120void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1121{
1122        f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1123        __set_data_blkaddr(dn);
1124        if (set_page_dirty(dn->node_page))
1125                dn->node_changed = true;
1126}
1127
1128void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1129{
1130        dn->data_blkaddr = blkaddr;
1131        f2fs_set_data_blkaddr(dn);
1132        f2fs_update_extent_cache(dn);
1133}
1134
1135/* dn->ofs_in_node will be returned with up-to-date last block pointer */
1136int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1137{
1138        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1139        int err;
1140
1141        if (!count)
1142                return 0;
1143
1144        if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1145                return -EPERM;
1146        if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1147                return err;
1148
1149        trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1150                                                dn->ofs_in_node, count);
1151
1152        f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1153
1154        for (; count > 0; dn->ofs_in_node++) {
1155                block_t blkaddr = f2fs_data_blkaddr(dn);
1156                if (blkaddr == NULL_ADDR) {
1157                        dn->data_blkaddr = NEW_ADDR;
1158                        __set_data_blkaddr(dn);
1159                        count--;
1160                }
1161        }
1162
1163        if (set_page_dirty(dn->node_page))
1164                dn->node_changed = true;
1165        return 0;
1166}
1167
1168/* Should keep dn->ofs_in_node unchanged */
1169int f2fs_reserve_new_block(struct dnode_of_data *dn)
1170{
1171        unsigned int ofs_in_node = dn->ofs_in_node;
1172        int ret;
1173
1174        ret = f2fs_reserve_new_blocks(dn, 1);
1175        dn->ofs_in_node = ofs_in_node;
1176        return ret;
1177}
1178
1179int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1180{
1181        bool need_put = dn->inode_page ? false : true;
1182        int err;
1183
1184        err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1185        if (err)
1186                return err;
1187
1188        if (dn->data_blkaddr == NULL_ADDR)
1189                err = f2fs_reserve_new_block(dn);
1190        if (err || need_put)
1191                f2fs_put_dnode(dn);
1192        return err;
1193}
1194
1195int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1196{
1197        struct extent_info ei = {0, 0, 0};
1198        struct inode *inode = dn->inode;
1199
1200        if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1201                dn->data_blkaddr = ei.blk + index - ei.fofs;
1202                return 0;
1203        }
1204
1205        return f2fs_reserve_block(dn, index);
1206}
1207
1208struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1209                                                int op_flags, bool for_write)
1210{
1211        struct address_space *mapping = inode->i_mapping;
1212        struct dnode_of_data dn;
1213        struct page *page;
1214        struct extent_info ei = {0,0,0};
1215        int err;
1216
1217        page = f2fs_grab_cache_page(mapping, index, for_write);
1218        if (!page)
1219                return ERR_PTR(-ENOMEM);
1220
1221        if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1222                dn.data_blkaddr = ei.blk + index - ei.fofs;
1223                if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1224                                                DATA_GENERIC_ENHANCE_READ)) {
1225                        err = -EFSCORRUPTED;
1226                        goto put_err;
1227                }
1228                goto got_it;
1229        }
1230
1231        set_new_dnode(&dn, inode, NULL, NULL, 0);
1232        err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1233        if (err)
1234                goto put_err;
1235        f2fs_put_dnode(&dn);
1236
1237        if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1238                err = -ENOENT;
1239                goto put_err;
1240        }
1241        if (dn.data_blkaddr != NEW_ADDR &&
1242                        !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1243                                                dn.data_blkaddr,
1244                                                DATA_GENERIC_ENHANCE)) {
1245                err = -EFSCORRUPTED;
1246                goto put_err;
1247        }
1248got_it:
1249        if (PageUptodate(page)) {
1250                unlock_page(page);
1251                return page;
1252        }
1253
1254        /*
1255         * A new dentry page is allocated but not able to be written, since its
1256         * new inode page couldn't be allocated due to -ENOSPC.
1257         * In such the case, its blkaddr can be remained as NEW_ADDR.
1258         * see, f2fs_add_link -> f2fs_get_new_data_page ->
1259         * f2fs_init_inode_metadata.
1260         */
1261        if (dn.data_blkaddr == NEW_ADDR) {
1262                zero_user_segment(page, 0, PAGE_SIZE);
1263                if (!PageUptodate(page))
1264                        SetPageUptodate(page);
1265                unlock_page(page);
1266                return page;
1267        }
1268
1269        err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1270                                                op_flags, for_write);
1271        if (err)
1272                goto put_err;
1273        return page;
1274
1275put_err:
1276        f2fs_put_page(page, 1);
1277        return ERR_PTR(err);
1278}
1279
1280struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1281{
1282        struct address_space *mapping = inode->i_mapping;
1283        struct page *page;
1284
1285        page = find_get_page(mapping, index);
1286        if (page && PageUptodate(page))
1287                return page;
1288        f2fs_put_page(page, 0);
1289
1290        page = f2fs_get_read_data_page(inode, index, 0, false);
1291        if (IS_ERR(page))
1292                return page;
1293
1294        if (PageUptodate(page))
1295                return page;
1296
1297        wait_on_page_locked(page);
1298        if (unlikely(!PageUptodate(page))) {
1299                f2fs_put_page(page, 0);
1300                return ERR_PTR(-EIO);
1301        }
1302        return page;
1303}
1304
1305/*
1306 * If it tries to access a hole, return an error.
1307 * Because, the callers, functions in dir.c and GC, should be able to know
1308 * whether this page exists or not.
1309 */
1310struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1311                                                        bool for_write)
1312{
1313        struct address_space *mapping = inode->i_mapping;
1314        struct page *page;
1315repeat:
1316        page = f2fs_get_read_data_page(inode, index, 0, for_write);
1317        if (IS_ERR(page))
1318                return page;
1319
1320        /* wait for read completion */
1321        lock_page(page);
1322        if (unlikely(page->mapping != mapping)) {
1323                f2fs_put_page(page, 1);
1324                goto repeat;
1325        }
1326        if (unlikely(!PageUptodate(page))) {
1327                f2fs_put_page(page, 1);
1328                return ERR_PTR(-EIO);
1329        }
1330        return page;
1331}
1332
1333/*
1334 * Caller ensures that this data page is never allocated.
1335 * A new zero-filled data page is allocated in the page cache.
1336 *
1337 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1338 * f2fs_unlock_op().
1339 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1340 * ipage should be released by this function.
1341 */
1342struct page *f2fs_get_new_data_page(struct inode *inode,
1343                struct page *ipage, pgoff_t index, bool new_i_size)
1344{
1345        struct address_space *mapping = inode->i_mapping;
1346        struct page *page;
1347        struct dnode_of_data dn;
1348        int err;
1349
1350        page = f2fs_grab_cache_page(mapping, index, true);
1351        if (!page) {
1352                /*
1353                 * before exiting, we should make sure ipage will be released
1354                 * if any error occur.
1355                 */
1356                f2fs_put_page(ipage, 1);
1357                return ERR_PTR(-ENOMEM);
1358        }
1359
1360        set_new_dnode(&dn, inode, ipage, NULL, 0);
1361        err = f2fs_reserve_block(&dn, index);
1362        if (err) {
1363                f2fs_put_page(page, 1);
1364                return ERR_PTR(err);
1365        }
1366        if (!ipage)
1367                f2fs_put_dnode(&dn);
1368
1369        if (PageUptodate(page))
1370                goto got_it;
1371
1372        if (dn.data_blkaddr == NEW_ADDR) {
1373                zero_user_segment(page, 0, PAGE_SIZE);
1374                if (!PageUptodate(page))
1375                        SetPageUptodate(page);
1376        } else {
1377                f2fs_put_page(page, 1);
1378
1379                /* if ipage exists, blkaddr should be NEW_ADDR */
1380                f2fs_bug_on(F2FS_I_SB(inode), ipage);
1381                page = f2fs_get_lock_data_page(inode, index, true);
1382                if (IS_ERR(page))
1383                        return page;
1384        }
1385got_it:
1386        if (new_i_size && i_size_read(inode) <
1387                                ((loff_t)(index + 1) << PAGE_SHIFT))
1388                f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1389        return page;
1390}
1391
1392static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1393{
1394        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1395        struct f2fs_summary sum;
1396        struct node_info ni;
1397        block_t old_blkaddr;
1398        blkcnt_t count = 1;
1399        int err;
1400
1401        if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1402                return -EPERM;
1403
1404        err = f2fs_get_node_info(sbi, dn->nid, &ni);
1405        if (err)
1406                return err;
1407
1408        dn->data_blkaddr = f2fs_data_blkaddr(dn);
1409        if (dn->data_blkaddr != NULL_ADDR)
1410                goto alloc;
1411
1412        if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1413                return err;
1414
1415alloc:
1416        set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1417        old_blkaddr = dn->data_blkaddr;
1418        f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1419                                        &sum, seg_type, NULL);
1420        if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1421                invalidate_mapping_pages(META_MAPPING(sbi),
1422                                        old_blkaddr, old_blkaddr);
1423        f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1424
1425        /*
1426         * i_size will be updated by direct_IO. Otherwise, we'll get stale
1427         * data from unwritten block via dio_read.
1428         */
1429        return 0;
1430}
1431
1432int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1433{
1434        struct inode *inode = file_inode(iocb->ki_filp);
1435        struct f2fs_map_blocks map;
1436        int flag;
1437        int err = 0;
1438        bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1439
1440        map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1441        map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1442        if (map.m_len > map.m_lblk)
1443                map.m_len -= map.m_lblk;
1444        else
1445                map.m_len = 0;
1446
1447        map.m_next_pgofs = NULL;
1448        map.m_next_extent = NULL;
1449        map.m_seg_type = NO_CHECK_TYPE;
1450        map.m_may_create = true;
1451
1452        if (direct_io) {
1453                map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1454                flag = f2fs_force_buffered_io(inode, iocb, from) ?
1455                                        F2FS_GET_BLOCK_PRE_AIO :
1456                                        F2FS_GET_BLOCK_PRE_DIO;
1457                goto map_blocks;
1458        }
1459        if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1460                err = f2fs_convert_inline_inode(inode);
1461                if (err)
1462                        return err;
1463        }
1464        if (f2fs_has_inline_data(inode))
1465                return err;
1466
1467        flag = F2FS_GET_BLOCK_PRE_AIO;
1468
1469map_blocks:
1470        err = f2fs_map_blocks(inode, &map, 1, flag);
1471        if (map.m_len > 0 && err == -ENOSPC) {
1472                if (!direct_io)
1473                        set_inode_flag(inode, FI_NO_PREALLOC);
1474                err = 0;
1475        }
1476        return err;
1477}
1478
1479void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1480{
1481        if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1482                if (lock)
1483                        down_read(&sbi->node_change);
1484                else
1485                        up_read(&sbi->node_change);
1486        } else {
1487                if (lock)
1488                        f2fs_lock_op(sbi);
1489                else
1490                        f2fs_unlock_op(sbi);
1491        }
1492}
1493
1494/*
1495 * f2fs_map_blocks() tries to find or build mapping relationship which
1496 * maps continuous logical blocks to physical blocks, and return such
1497 * info via f2fs_map_blocks structure.
1498 */
1499int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1500                                                int create, int flag)
1501{
1502        unsigned int maxblocks = map->m_len;
1503        struct dnode_of_data dn;
1504        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1505        int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1506        pgoff_t pgofs, end_offset, end;
1507        int err = 0, ofs = 1;
1508        unsigned int ofs_in_node, last_ofs_in_node;
1509        blkcnt_t prealloc;
1510        struct extent_info ei = {0,0,0};
1511        block_t blkaddr;
1512        unsigned int start_pgofs;
1513
1514        if (!maxblocks)
1515                return 0;
1516
1517        map->m_len = 0;
1518        map->m_flags = 0;
1519
1520        /* it only supports block size == page size */
1521        pgofs = (pgoff_t)map->m_lblk;
1522        end = pgofs + maxblocks;
1523
1524        if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1525                if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1526                                                        map->m_may_create)
1527                        goto next_dnode;
1528
1529                map->m_pblk = ei.blk + pgofs - ei.fofs;
1530                map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1531                map->m_flags = F2FS_MAP_MAPPED;
1532                if (map->m_next_extent)
1533                        *map->m_next_extent = pgofs + map->m_len;
1534
1535                /* for hardware encryption, but to avoid potential issue in future */
1536                if (flag == F2FS_GET_BLOCK_DIO)
1537                        f2fs_wait_on_block_writeback_range(inode,
1538                                                map->m_pblk, map->m_len);
1539                goto out;
1540        }
1541
1542next_dnode:
1543        if (map->m_may_create)
1544                f2fs_do_map_lock(sbi, flag, true);
1545
1546        /* When reading holes, we need its node page */
1547        set_new_dnode(&dn, inode, NULL, NULL, 0);
1548        err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1549        if (err) {
1550                if (flag == F2FS_GET_BLOCK_BMAP)
1551                        map->m_pblk = 0;
1552                if (err == -ENOENT) {
1553                        err = 0;
1554                        if (map->m_next_pgofs)
1555                                *map->m_next_pgofs =
1556                                        f2fs_get_next_page_offset(&dn, pgofs);
1557                        if (map->m_next_extent)
1558                                *map->m_next_extent =
1559                                        f2fs_get_next_page_offset(&dn, pgofs);
1560                }
1561                goto unlock_out;
1562        }
1563
1564        start_pgofs = pgofs;
1565        prealloc = 0;
1566        last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1567        end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1568
1569next_block:
1570        blkaddr = f2fs_data_blkaddr(&dn);
1571
1572        if (__is_valid_data_blkaddr(blkaddr) &&
1573                !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1574                err = -EFSCORRUPTED;
1575                goto sync_out;
1576        }
1577
1578        if (__is_valid_data_blkaddr(blkaddr)) {
1579                /* use out-place-update for driect IO under LFS mode */
1580                if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1581                                                        map->m_may_create) {
1582                        err = __allocate_data_block(&dn, map->m_seg_type);
1583                        if (err)
1584                                goto sync_out;
1585                        blkaddr = dn.data_blkaddr;
1586                        set_inode_flag(inode, FI_APPEND_WRITE);
1587                }
1588        } else {
1589                if (create) {
1590                        if (unlikely(f2fs_cp_error(sbi))) {
1591                                err = -EIO;
1592                                goto sync_out;
1593                        }
1594                        if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1595                                if (blkaddr == NULL_ADDR) {
1596                                        prealloc++;
1597                                        last_ofs_in_node = dn.ofs_in_node;
1598                                }
1599                        } else {
1600                                WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1601                                        flag != F2FS_GET_BLOCK_DIO);
1602                                err = __allocate_data_block(&dn,
1603                                                        map->m_seg_type);
1604                                if (!err)
1605                                        set_inode_flag(inode, FI_APPEND_WRITE);
1606                        }
1607                        if (err)
1608                                goto sync_out;
1609                        map->m_flags |= F2FS_MAP_NEW;
1610                        blkaddr = dn.data_blkaddr;
1611                } else {
1612                        if (flag == F2FS_GET_BLOCK_BMAP) {
1613                                map->m_pblk = 0;
1614                                goto sync_out;
1615                        }
1616                        if (flag == F2FS_GET_BLOCK_PRECACHE)
1617                                goto sync_out;
1618                        if (flag == F2FS_GET_BLOCK_FIEMAP &&
1619                                                blkaddr == NULL_ADDR) {
1620                                if (map->m_next_pgofs)
1621                                        *map->m_next_pgofs = pgofs + 1;
1622                                goto sync_out;
1623                        }
1624                        if (flag != F2FS_GET_BLOCK_FIEMAP) {
1625                                /* for defragment case */
1626                                if (map->m_next_pgofs)
1627                                        *map->m_next_pgofs = pgofs + 1;
1628                                goto sync_out;
1629                        }
1630                }
1631        }
1632
1633        if (flag == F2FS_GET_BLOCK_PRE_AIO)
1634                goto skip;
1635
1636        if (map->m_len == 0) {
1637                /* preallocated unwritten block should be mapped for fiemap. */
1638                if (blkaddr == NEW_ADDR)
1639                        map->m_flags |= F2FS_MAP_UNWRITTEN;
1640                map->m_flags |= F2FS_MAP_MAPPED;
1641
1642                map->m_pblk = blkaddr;
1643                map->m_len = 1;
1644        } else if ((map->m_pblk != NEW_ADDR &&
1645                        blkaddr == (map->m_pblk + ofs)) ||
1646                        (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1647                        flag == F2FS_GET_BLOCK_PRE_DIO) {
1648                ofs++;
1649                map->m_len++;
1650        } else {
1651                goto sync_out;
1652        }
1653
1654skip:
1655        dn.ofs_in_node++;
1656        pgofs++;
1657
1658        /* preallocate blocks in batch for one dnode page */
1659        if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1660                        (pgofs == end || dn.ofs_in_node == end_offset)) {
1661
1662                dn.ofs_in_node = ofs_in_node;
1663                err = f2fs_reserve_new_blocks(&dn, prealloc);
1664                if (err)
1665                        goto sync_out;
1666
1667                map->m_len += dn.ofs_in_node - ofs_in_node;
1668                if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1669                        err = -ENOSPC;
1670                        goto sync_out;
1671                }
1672                dn.ofs_in_node = end_offset;
1673        }
1674
1675        if (pgofs >= end)
1676                goto sync_out;
1677        else if (dn.ofs_in_node < end_offset)
1678                goto next_block;
1679
1680        if (flag == F2FS_GET_BLOCK_PRECACHE) {
1681                if (map->m_flags & F2FS_MAP_MAPPED) {
1682                        unsigned int ofs = start_pgofs - map->m_lblk;
1683
1684                        f2fs_update_extent_cache_range(&dn,
1685                                start_pgofs, map->m_pblk + ofs,
1686                                map->m_len - ofs);
1687                }
1688        }
1689
1690        f2fs_put_dnode(&dn);
1691
1692        if (map->m_may_create) {
1693                f2fs_do_map_lock(sbi, flag, false);
1694                f2fs_balance_fs(sbi, dn.node_changed);
1695        }
1696        goto next_dnode;
1697
1698sync_out:
1699
1700        /* for hardware encryption, but to avoid potential issue in future */
1701        if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1702                f2fs_wait_on_block_writeback_range(inode,
1703                                                map->m_pblk, map->m_len);
1704
1705        if (flag == F2FS_GET_BLOCK_PRECACHE) {
1706                if (map->m_flags & F2FS_MAP_MAPPED) {
1707                        unsigned int ofs = start_pgofs - map->m_lblk;
1708
1709                        f2fs_update_extent_cache_range(&dn,
1710                                start_pgofs, map->m_pblk + ofs,
1711                                map->m_len - ofs);
1712                }
1713                if (map->m_next_extent)
1714                        *map->m_next_extent = pgofs + 1;
1715        }
1716        f2fs_put_dnode(&dn);
1717unlock_out:
1718        if (map->m_may_create) {
1719                f2fs_do_map_lock(sbi, flag, false);
1720                f2fs_balance_fs(sbi, dn.node_changed);
1721        }
1722out:
1723        trace_f2fs_map_blocks(inode, map, err);
1724        return err;
1725}
1726
1727bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1728{
1729        struct f2fs_map_blocks map;
1730        block_t last_lblk;
1731        int err;
1732
1733        if (pos + len > i_size_read(inode))
1734                return false;
1735
1736        map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1737        map.m_next_pgofs = NULL;
1738        map.m_next_extent = NULL;
1739        map.m_seg_type = NO_CHECK_TYPE;
1740        map.m_may_create = false;
1741        last_lblk = F2FS_BLK_ALIGN(pos + len);
1742
1743        while (map.m_lblk < last_lblk) {
1744                map.m_len = last_lblk - map.m_lblk;
1745                err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1746                if (err || map.m_len == 0)
1747                        return false;
1748                map.m_lblk += map.m_len;
1749        }
1750        return true;
1751}
1752
1753static int __get_data_block(struct inode *inode, sector_t iblock,
1754                        struct buffer_head *bh, int create, int flag,
1755                        pgoff_t *next_pgofs, int seg_type, bool may_write)
1756{
1757        struct f2fs_map_blocks map;
1758        int err;
1759
1760        map.m_lblk = iblock;
1761        map.m_len = bh->b_size >> inode->i_blkbits;
1762        map.m_next_pgofs = next_pgofs;
1763        map.m_next_extent = NULL;
1764        map.m_seg_type = seg_type;
1765        map.m_may_create = may_write;
1766
1767        err = f2fs_map_blocks(inode, &map, create, flag);
1768        if (!err) {
1769                map_bh(bh, inode->i_sb, map.m_pblk);
1770                bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1771                bh->b_size = (u64)map.m_len << inode->i_blkbits;
1772        }
1773        return err;
1774}
1775
1776static int get_data_block(struct inode *inode, sector_t iblock,
1777                        struct buffer_head *bh_result, int create, int flag,
1778                        pgoff_t *next_pgofs)
1779{
1780        return __get_data_block(inode, iblock, bh_result, create,
1781                                                        flag, next_pgofs,
1782                                                        NO_CHECK_TYPE, create);
1783}
1784
1785static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1786                        struct buffer_head *bh_result, int create)
1787{
1788        return __get_data_block(inode, iblock, bh_result, create,
1789                                F2FS_GET_BLOCK_DIO, NULL,
1790                                f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1791                                IS_SWAPFILE(inode) ? false : true);
1792}
1793
1794static int get_data_block_dio(struct inode *inode, sector_t iblock,
1795                        struct buffer_head *bh_result, int create)
1796{
1797        return __get_data_block(inode, iblock, bh_result, create,
1798                                F2FS_GET_BLOCK_DIO, NULL,
1799                                f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1800                                false);
1801}
1802
1803static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1804                        struct buffer_head *bh_result, int create)
1805{
1806        /* Block number less than F2FS MAX BLOCKS */
1807        if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1808                return -EFBIG;
1809
1810        return __get_data_block(inode, iblock, bh_result, create,
1811                                                F2FS_GET_BLOCK_BMAP, NULL,
1812                                                NO_CHECK_TYPE, create);
1813}
1814
1815static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1816{
1817        return (offset >> inode->i_blkbits);
1818}
1819
1820static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1821{
1822        return (blk << inode->i_blkbits);
1823}
1824
1825static int f2fs_xattr_fiemap(struct inode *inode,
1826                                struct fiemap_extent_info *fieinfo)
1827{
1828        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1829        struct page *page;
1830        struct node_info ni;
1831        __u64 phys = 0, len;
1832        __u32 flags;
1833        nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1834        int err = 0;
1835
1836        if (f2fs_has_inline_xattr(inode)) {
1837                int offset;
1838
1839                page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1840                                                inode->i_ino, false);
1841                if (!page)
1842                        return -ENOMEM;
1843
1844                err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1845                if (err) {
1846                        f2fs_put_page(page, 1);
1847                        return err;
1848                }
1849
1850                phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1851                offset = offsetof(struct f2fs_inode, i_addr) +
1852                                        sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1853                                        get_inline_xattr_addrs(inode));
1854
1855                phys += offset;
1856                len = inline_xattr_size(inode);
1857
1858                f2fs_put_page(page, 1);
1859
1860                flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1861
1862                if (!xnid)
1863                        flags |= FIEMAP_EXTENT_LAST;
1864
1865                err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1866                trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1867                if (err || err == 1)
1868                        return err;
1869        }
1870
1871        if (xnid) {
1872                page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1873                if (!page)
1874                        return -ENOMEM;
1875
1876                err = f2fs_get_node_info(sbi, xnid, &ni);
1877                if (err) {
1878                        f2fs_put_page(page, 1);
1879                        return err;
1880                }
1881
1882                phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1883                len = inode->i_sb->s_blocksize;
1884
1885                f2fs_put_page(page, 1);
1886
1887                flags = FIEMAP_EXTENT_LAST;
1888        }
1889
1890        if (phys) {
1891                err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1892                trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1893        }
1894
1895        return (err < 0 ? err : 0);
1896}
1897
1898static loff_t max_inode_blocks(struct inode *inode)
1899{
1900        loff_t result = ADDRS_PER_INODE(inode);
1901        loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1902
1903        /* two direct node blocks */
1904        result += (leaf_count * 2);
1905
1906        /* two indirect node blocks */
1907        leaf_count *= NIDS_PER_BLOCK;
1908        result += (leaf_count * 2);
1909
1910        /* one double indirect node block */
1911        leaf_count *= NIDS_PER_BLOCK;
1912        result += leaf_count;
1913
1914        return result;
1915}
1916
1917int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1918                u64 start, u64 len)
1919{
1920        struct buffer_head map_bh;
1921        sector_t start_blk, last_blk;
1922        pgoff_t next_pgofs;
1923        u64 logical = 0, phys = 0, size = 0;
1924        u32 flags = 0;
1925        int ret = 0;
1926        bool compr_cluster = false;
1927        unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1928
1929        if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1930                ret = f2fs_precache_extents(inode);
1931                if (ret)
1932                        return ret;
1933        }
1934
1935        ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1936        if (ret)
1937                return ret;
1938
1939        inode_lock(inode);
1940
1941        if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1942                ret = f2fs_xattr_fiemap(inode, fieinfo);
1943                goto out;
1944        }
1945
1946        if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1947                ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1948                if (ret != -EAGAIN)
1949                        goto out;
1950        }
1951
1952        if (logical_to_blk(inode, len) == 0)
1953                len = blk_to_logical(inode, 1);
1954
1955        start_blk = logical_to_blk(inode, start);
1956        last_blk = logical_to_blk(inode, start + len - 1);
1957
1958next:
1959        memset(&map_bh, 0, sizeof(struct buffer_head));
1960        map_bh.b_size = len;
1961
1962        if (compr_cluster)
1963                map_bh.b_size = blk_to_logical(inode, cluster_size - 1);
1964
1965        ret = get_data_block(inode, start_blk, &map_bh, 0,
1966                                        F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1967        if (ret)
1968                goto out;
1969
1970        /* HOLE */
1971        if (!buffer_mapped(&map_bh)) {
1972                start_blk = next_pgofs;
1973
1974                if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
1975                                                max_inode_blocks(inode)))
1976                        goto prep_next;
1977
1978                flags |= FIEMAP_EXTENT_LAST;
1979        }
1980
1981        if (size) {
1982                if (IS_ENCRYPTED(inode))
1983                        flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1984
1985                ret = fiemap_fill_next_extent(fieinfo, logical,
1986                                phys, size, flags);
1987                trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
1988                if (ret)
1989                        goto out;
1990                size = 0;
1991        }
1992
1993        if (start_blk > last_blk)
1994                goto out;
1995
1996        if (compr_cluster) {
1997                compr_cluster = false;
1998
1999
2000                logical = blk_to_logical(inode, start_blk - 1);
2001                phys = blk_to_logical(inode, map_bh.b_blocknr);
2002                size = blk_to_logical(inode, cluster_size);
2003
2004                flags |= FIEMAP_EXTENT_ENCODED;
2005
2006                start_blk += cluster_size - 1;
2007
2008                if (start_blk > last_blk)
2009                        goto out;
2010
2011                goto prep_next;
2012        }
2013
2014        if (map_bh.b_blocknr == COMPRESS_ADDR) {
2015                compr_cluster = true;
2016                start_blk++;
2017                goto prep_next;
2018        }
2019
2020        logical = blk_to_logical(inode, start_blk);
2021        phys = blk_to_logical(inode, map_bh.b_blocknr);
2022        size = map_bh.b_size;
2023        flags = 0;
2024        if (buffer_unwritten(&map_bh))
2025                flags = FIEMAP_EXTENT_UNWRITTEN;
2026
2027        start_blk += logical_to_blk(inode, size);
2028
2029prep_next:
2030        cond_resched();
2031        if (fatal_signal_pending(current))
2032                ret = -EINTR;
2033        else
2034                goto next;
2035out:
2036        if (ret == 1)
2037                ret = 0;
2038
2039        inode_unlock(inode);
2040        return ret;
2041}
2042
2043static inline loff_t f2fs_readpage_limit(struct inode *inode)
2044{
2045        if (IS_ENABLED(CONFIG_FS_VERITY) &&
2046            (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
2047                return inode->i_sb->s_maxbytes;
2048
2049        return i_size_read(inode);
2050}
2051
2052static int f2fs_read_single_page(struct inode *inode, struct page *page,
2053                                        unsigned nr_pages,
2054                                        struct f2fs_map_blocks *map,
2055                                        struct bio **bio_ret,
2056                                        sector_t *last_block_in_bio,
2057                                        bool is_readahead)
2058{
2059        struct bio *bio = *bio_ret;
2060        const unsigned blkbits = inode->i_blkbits;
2061        const unsigned blocksize = 1 << blkbits;
2062        sector_t block_in_file;
2063        sector_t last_block;
2064        sector_t last_block_in_file;
2065        sector_t block_nr;
2066        int ret = 0;
2067
2068        block_in_file = (sector_t)page_index(page);
2069        last_block = block_in_file + nr_pages;
2070        last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >>
2071                                                        blkbits;
2072        if (last_block > last_block_in_file)
2073                last_block = last_block_in_file;
2074
2075        /* just zeroing out page which is beyond EOF */
2076        if (block_in_file >= last_block)
2077                goto zero_out;
2078        /*
2079         * Map blocks using the previous result first.
2080         */
2081        if ((map->m_flags & F2FS_MAP_MAPPED) &&
2082                        block_in_file > map->m_lblk &&
2083                        block_in_file < (map->m_lblk + map->m_len))
2084                goto got_it;
2085
2086        /*
2087         * Then do more f2fs_map_blocks() calls until we are
2088         * done with this page.
2089         */
2090        map->m_lblk = block_in_file;
2091        map->m_len = last_block - block_in_file;
2092
2093        ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2094        if (ret)
2095                goto out;
2096got_it:
2097        if ((map->m_flags & F2FS_MAP_MAPPED)) {
2098                block_nr = map->m_pblk + block_in_file - map->m_lblk;
2099                SetPageMappedToDisk(page);
2100
2101                if (!PageUptodate(page) && (!PageSwapCache(page) &&
2102                                        !cleancache_get_page(page))) {
2103                        SetPageUptodate(page);
2104                        goto confused;
2105                }
2106
2107                if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2108                                                DATA_GENERIC_ENHANCE_READ)) {
2109                        ret = -EFSCORRUPTED;
2110                        goto out;
2111                }
2112        } else {
2113zero_out:
2114                zero_user_segment(page, 0, PAGE_SIZE);
2115                if (f2fs_need_verity(inode, page->index) &&
2116                    !fsverity_verify_page(page)) {
2117                        ret = -EIO;
2118                        goto out;
2119                }
2120                if (!PageUptodate(page))
2121                        SetPageUptodate(page);
2122                unlock_page(page);
2123                goto out;
2124        }
2125
2126        /*
2127         * This page will go to BIO.  Do we need to send this
2128         * BIO off first?
2129         */
2130        if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2131                                       *last_block_in_bio, block_nr) ||
2132                    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2133submit_and_realloc:
2134                __submit_bio(F2FS_I_SB(inode), bio, DATA);
2135                bio = NULL;
2136        }
2137        if (bio == NULL) {
2138                bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2139                                is_readahead ? REQ_RAHEAD : 0, page->index,
2140                                false);
2141                if (IS_ERR(bio)) {
2142                        ret = PTR_ERR(bio);
2143                        bio = NULL;
2144                        goto out;
2145                }
2146        }
2147
2148        /*
2149         * If the page is under writeback, we need to wait for
2150         * its completion to see the correct decrypted data.
2151         */
2152        f2fs_wait_on_block_writeback(inode, block_nr);
2153
2154        if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2155                goto submit_and_realloc;
2156
2157        inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2158        f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2159        ClearPageError(page);
2160        *last_block_in_bio = block_nr;
2161        goto out;
2162confused:
2163        if (bio) {
2164                __submit_bio(F2FS_I_SB(inode), bio, DATA);
2165                bio = NULL;
2166        }
2167        unlock_page(page);
2168out:
2169        *bio_ret = bio;
2170        return ret;
2171}
2172
2173#ifdef CONFIG_F2FS_FS_COMPRESSION
2174int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2175                                unsigned nr_pages, sector_t *last_block_in_bio,
2176                                bool is_readahead, bool for_write)
2177{
2178        struct dnode_of_data dn;
2179        struct inode *inode = cc->inode;
2180        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2181        struct bio *bio = *bio_ret;
2182        unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2183        sector_t last_block_in_file;
2184        const unsigned blkbits = inode->i_blkbits;
2185        const unsigned blocksize = 1 << blkbits;
2186        struct decompress_io_ctx *dic = NULL;
2187        int i;
2188        int ret = 0;
2189
2190        f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2191
2192        last_block_in_file = (f2fs_readpage_limit(inode) +
2193                                        blocksize - 1) >> blkbits;
2194
2195        /* get rid of pages beyond EOF */
2196        for (i = 0; i < cc->cluster_size; i++) {
2197                struct page *page = cc->rpages[i];
2198
2199                if (!page)
2200                        continue;
2201                if ((sector_t)page->index >= last_block_in_file) {
2202                        zero_user_segment(page, 0, PAGE_SIZE);
2203                        if (!PageUptodate(page))
2204                                SetPageUptodate(page);
2205                } else if (!PageUptodate(page)) {
2206                        continue;
2207                }
2208                unlock_page(page);
2209                cc->rpages[i] = NULL;
2210                cc->nr_rpages--;
2211        }
2212
2213        /* we are done since all pages are beyond EOF */
2214        if (f2fs_cluster_is_empty(cc))
2215                goto out;
2216
2217        set_new_dnode(&dn, inode, NULL, NULL, 0);
2218        ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2219        if (ret)
2220                goto out;
2221
2222        f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2223
2224        for (i = 1; i < cc->cluster_size; i++) {
2225                block_t blkaddr;
2226
2227                blkaddr = data_blkaddr(dn.inode, dn.node_page,
2228                                                dn.ofs_in_node + i);
2229
2230                if (!__is_valid_data_blkaddr(blkaddr))
2231                        break;
2232
2233                if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2234                        ret = -EFAULT;
2235                        goto out_put_dnode;
2236                }
2237                cc->nr_cpages++;
2238        }
2239
2240        /* nothing to decompress */
2241        if (cc->nr_cpages == 0) {
2242                ret = 0;
2243                goto out_put_dnode;
2244        }
2245
2246        dic = f2fs_alloc_dic(cc);
2247        if (IS_ERR(dic)) {
2248                ret = PTR_ERR(dic);
2249                goto out_put_dnode;
2250        }
2251
2252        for (i = 0; i < dic->nr_cpages; i++) {
2253                struct page *page = dic->cpages[i];
2254                block_t blkaddr;
2255                struct bio_post_read_ctx *ctx;
2256
2257                blkaddr = data_blkaddr(dn.inode, dn.node_page,
2258                                                dn.ofs_in_node + i + 1);
2259
2260                if (bio && (!page_is_mergeable(sbi, bio,
2261                                        *last_block_in_bio, blkaddr) ||
2262                    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2263submit_and_realloc:
2264                        __submit_bio(sbi, bio, DATA);
2265                        bio = NULL;
2266                }
2267
2268                if (!bio) {
2269                        bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2270                                        is_readahead ? REQ_RAHEAD : 0,
2271                                        page->index, for_write);
2272                        if (IS_ERR(bio)) {
2273                                ret = PTR_ERR(bio);
2274                                dic->failed = true;
2275                                if (refcount_sub_and_test(dic->nr_cpages - i,
2276                                                        &dic->ref)) {
2277                                        f2fs_decompress_end_io(dic->rpages,
2278                                                        cc->cluster_size, true,
2279                                                        false);
2280                                        f2fs_free_dic(dic);
2281                                }
2282                                f2fs_put_dnode(&dn);
2283                                *bio_ret = NULL;
2284                                return ret;
2285                        }
2286                }
2287
2288                f2fs_wait_on_block_writeback(inode, blkaddr);
2289
2290                if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2291                        goto submit_and_realloc;
2292
2293                /* tag STEP_DECOMPRESS to handle IO in wq */
2294                ctx = bio->bi_private;
2295                if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS)))
2296                        ctx->enabled_steps |= 1 << STEP_DECOMPRESS;
2297
2298                inc_page_count(sbi, F2FS_RD_DATA);
2299                f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2300                f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2301                ClearPageError(page);
2302                *last_block_in_bio = blkaddr;
2303        }
2304
2305        f2fs_put_dnode(&dn);
2306
2307        *bio_ret = bio;
2308        return 0;
2309
2310out_put_dnode:
2311        f2fs_put_dnode(&dn);
2312out:
2313        f2fs_decompress_end_io(cc->rpages, cc->cluster_size, true, false);
2314        *bio_ret = bio;
2315        return ret;
2316}
2317#endif
2318
2319/*
2320 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2321 * Major change was from block_size == page_size in f2fs by default.
2322 *
2323 * Note that the aops->readpages() function is ONLY used for read-ahead. If
2324 * this function ever deviates from doing just read-ahead, it should either
2325 * use ->readpage() or do the necessary surgery to decouple ->readpages()
2326 * from read-ahead.
2327 */
2328static int f2fs_mpage_readpages(struct inode *inode,
2329                struct readahead_control *rac, struct page *page)
2330{
2331        struct bio *bio = NULL;
2332        sector_t last_block_in_bio = 0;
2333        struct f2fs_map_blocks map;
2334#ifdef CONFIG_F2FS_FS_COMPRESSION
2335        struct compress_ctx cc = {
2336                .inode = inode,
2337                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2338                .cluster_size = F2FS_I(inode)->i_cluster_size,
2339                .cluster_idx = NULL_CLUSTER,
2340                .rpages = NULL,
2341                .cpages = NULL,
2342                .nr_rpages = 0,
2343                .nr_cpages = 0,
2344        };
2345#endif
2346        unsigned nr_pages = rac ? readahead_count(rac) : 1;
2347        unsigned max_nr_pages = nr_pages;
2348        int ret = 0;
2349        bool drop_ra = false;
2350
2351        map.m_pblk = 0;
2352        map.m_lblk = 0;
2353        map.m_len = 0;
2354        map.m_flags = 0;
2355        map.m_next_pgofs = NULL;
2356        map.m_next_extent = NULL;
2357        map.m_seg_type = NO_CHECK_TYPE;
2358        map.m_may_create = false;
2359
2360        /*
2361         * Two readahead threads for same address range can cause race condition
2362         * which fragments sequential read IOs. So let's avoid each other.
2363         */
2364        if (rac && readahead_count(rac)) {
2365                if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac))
2366                        drop_ra = true;
2367                else
2368                        WRITE_ONCE(F2FS_I(inode)->ra_offset,
2369                                                readahead_index(rac));
2370        }
2371
2372        for (; nr_pages; nr_pages--) {
2373                if (rac) {
2374                        page = readahead_page(rac);
2375                        prefetchw(&page->flags);
2376                        if (drop_ra) {
2377                                f2fs_put_page(page, 1);
2378                                continue;
2379                        }
2380                }
2381
2382#ifdef CONFIG_F2FS_FS_COMPRESSION
2383                if (f2fs_compressed_file(inode)) {
2384                        /* there are remained comressed pages, submit them */
2385                        if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2386                                ret = f2fs_read_multi_pages(&cc, &bio,
2387                                                        max_nr_pages,
2388                                                        &last_block_in_bio,
2389                                                        rac != NULL, false);
2390                                f2fs_destroy_compress_ctx(&cc);
2391                                if (ret)
2392                                        goto set_error_page;
2393                        }
2394                        ret = f2fs_is_compressed_cluster(inode, page->index);
2395                        if (ret < 0)
2396                                goto set_error_page;
2397                        else if (!ret)
2398                                goto read_single_page;
2399
2400                        ret = f2fs_init_compress_ctx(&cc);
2401                        if (ret)
2402                                goto set_error_page;
2403
2404                        f2fs_compress_ctx_add_page(&cc, page);
2405
2406                        goto next_page;
2407                }
2408read_single_page:
2409#endif
2410
2411                ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2412                                        &bio, &last_block_in_bio, rac);
2413                if (ret) {
2414#ifdef CONFIG_F2FS_FS_COMPRESSION
2415set_error_page:
2416#endif
2417                        SetPageError(page);
2418                        zero_user_segment(page, 0, PAGE_SIZE);
2419                        unlock_page(page);
2420                }
2421#ifdef CONFIG_F2FS_FS_COMPRESSION
2422next_page:
2423#endif
2424                if (rac)
2425                        put_page(page);
2426
2427#ifdef CONFIG_F2FS_FS_COMPRESSION
2428                if (f2fs_compressed_file(inode)) {
2429                        /* last page */
2430                        if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2431                                ret = f2fs_read_multi_pages(&cc, &bio,
2432                                                        max_nr_pages,
2433                                                        &last_block_in_bio,
2434                                                        rac != NULL, false);
2435                                f2fs_destroy_compress_ctx(&cc);
2436                        }
2437                }
2438#endif
2439        }
2440        if (bio)
2441                __submit_bio(F2FS_I_SB(inode), bio, DATA);
2442
2443        if (rac && readahead_count(rac) && !drop_ra)
2444                WRITE_ONCE(F2FS_I(inode)->ra_offset, -1);
2445        return ret;
2446}
2447
2448static int f2fs_read_data_page(struct file *file, struct page *page)
2449{
2450        struct inode *inode = page_file_mapping(page)->host;
2451        int ret = -EAGAIN;
2452
2453        trace_f2fs_readpage(page, DATA);
2454
2455        if (!f2fs_is_compress_backend_ready(inode)) {
2456                unlock_page(page);
2457                return -EOPNOTSUPP;
2458        }
2459
2460        /* If the file has inline data, try to read it directly */
2461        if (f2fs_has_inline_data(inode))
2462                ret = f2fs_read_inline_data(inode, page);
2463        if (ret == -EAGAIN)
2464                ret = f2fs_mpage_readpages(inode, NULL, page);
2465        return ret;
2466}
2467
2468static void f2fs_readahead(struct readahead_control *rac)
2469{
2470        struct inode *inode = rac->mapping->host;
2471
2472        trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2473
2474        if (!f2fs_is_compress_backend_ready(inode))
2475                return;
2476
2477        /* If the file has inline data, skip readpages */
2478        if (f2fs_has_inline_data(inode))
2479                return;
2480
2481        f2fs_mpage_readpages(inode, rac, NULL);
2482}
2483
2484int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2485{
2486        struct inode *inode = fio->page->mapping->host;
2487        struct page *mpage, *page;
2488        gfp_t gfp_flags = GFP_NOFS;
2489
2490        if (!f2fs_encrypted_file(inode))
2491                return 0;
2492
2493        page = fio->compressed_page ? fio->compressed_page : fio->page;
2494
2495        /* wait for GCed page writeback via META_MAPPING */
2496        f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2497
2498        if (fscrypt_inode_uses_inline_crypto(inode))
2499                return 0;
2500
2501retry_encrypt:
2502        fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2503                                        PAGE_SIZE, 0, gfp_flags);
2504        if (IS_ERR(fio->encrypted_page)) {
2505                /* flush pending IOs and wait for a while in the ENOMEM case */
2506                if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2507                        f2fs_flush_merged_writes(fio->sbi);
2508                        congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2509                        gfp_flags |= __GFP_NOFAIL;
2510                        goto retry_encrypt;
2511                }
2512                return PTR_ERR(fio->encrypted_page);
2513        }
2514
2515        mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2516        if (mpage) {
2517                if (PageUptodate(mpage))
2518                        memcpy(page_address(mpage),
2519                                page_address(fio->encrypted_page), PAGE_SIZE);
2520                f2fs_put_page(mpage, 1);
2521        }
2522        return 0;
2523}
2524
2525static inline bool check_inplace_update_policy(struct inode *inode,
2526                                struct f2fs_io_info *fio)
2527{
2528        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2529        unsigned int policy = SM_I(sbi)->ipu_policy;
2530
2531        if (policy & (0x1 << F2FS_IPU_FORCE))
2532                return true;
2533        if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2534                return true;
2535        if (policy & (0x1 << F2FS_IPU_UTIL) &&
2536                        utilization(sbi) > SM_I(sbi)->min_ipu_util)
2537                return true;
2538        if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2539                        utilization(sbi) > SM_I(sbi)->min_ipu_util)
2540                return true;
2541
2542        /*
2543         * IPU for rewrite async pages
2544         */
2545        if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2546                        fio && fio->op == REQ_OP_WRITE &&
2547                        !(fio->op_flags & REQ_SYNC) &&
2548                        !IS_ENCRYPTED(inode))
2549                return true;
2550
2551        /* this is only set during fdatasync */
2552        if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2553                        is_inode_flag_set(inode, FI_NEED_IPU))
2554                return true;
2555
2556        if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2557                        !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2558                return true;
2559
2560        return false;
2561}
2562
2563bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2564{
2565        if (f2fs_is_pinned_file(inode))
2566                return true;
2567
2568        /* if this is cold file, we should overwrite to avoid fragmentation */
2569        if (file_is_cold(inode))
2570                return true;
2571
2572        return check_inplace_update_policy(inode, fio);
2573}
2574
2575bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2576{
2577        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2578
2579        if (f2fs_lfs_mode(sbi))
2580                return true;
2581        if (S_ISDIR(inode->i_mode))
2582                return true;
2583        if (IS_NOQUOTA(inode))
2584                return true;
2585        if (f2fs_is_atomic_file(inode))
2586                return true;
2587        if (fio) {
2588                if (is_cold_data(fio->page))
2589                        return true;
2590                if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
2591                        return true;
2592                if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2593                        f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2594                        return true;
2595        }
2596        return false;
2597}
2598
2599static inline bool need_inplace_update(struct f2fs_io_info *fio)
2600{
2601        struct inode *inode = fio->page->mapping->host;
2602
2603        if (f2fs_should_update_outplace(inode, fio))
2604                return false;
2605
2606        return f2fs_should_update_inplace(inode, fio);
2607}
2608
2609int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2610{
2611        struct page *page = fio->page;
2612        struct inode *inode = page->mapping->host;
2613        struct dnode_of_data dn;
2614        struct extent_info ei = {0,0,0};
2615        struct node_info ni;
2616        bool ipu_force = false;
2617        int err = 0;
2618
2619        set_new_dnode(&dn, inode, NULL, NULL, 0);
2620        if (need_inplace_update(fio) &&
2621                        f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2622                fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2623
2624                if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2625                                                DATA_GENERIC_ENHANCE))
2626                        return -EFSCORRUPTED;
2627
2628                ipu_force = true;
2629                fio->need_lock = LOCK_DONE;
2630                goto got_it;
2631        }
2632
2633        /* Deadlock due to between page->lock and f2fs_lock_op */
2634        if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2635                return -EAGAIN;
2636
2637        err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2638        if (err)
2639                goto out;
2640
2641        fio->old_blkaddr = dn.data_blkaddr;
2642
2643        /* This page is already truncated */
2644        if (fio->old_blkaddr == NULL_ADDR) {
2645                ClearPageUptodate(page);
2646                clear_cold_data(page);
2647                goto out_writepage;
2648        }
2649got_it:
2650        if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2651                !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2652                                                DATA_GENERIC_ENHANCE)) {
2653                err = -EFSCORRUPTED;
2654                goto out_writepage;
2655        }
2656        /*
2657         * If current allocation needs SSR,
2658         * it had better in-place writes for updated data.
2659         */
2660        if (ipu_force ||
2661                (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2662                                        need_inplace_update(fio))) {
2663                err = f2fs_encrypt_one_page(fio);
2664                if (err)
2665                        goto out_writepage;
2666
2667                set_page_writeback(page);
2668                ClearPageError(page);
2669                f2fs_put_dnode(&dn);
2670                if (fio->need_lock == LOCK_REQ)
2671                        f2fs_unlock_op(fio->sbi);
2672                err = f2fs_inplace_write_data(fio);
2673                if (err) {
2674                        if (fscrypt_inode_uses_fs_layer_crypto(inode))
2675                                fscrypt_finalize_bounce_page(&fio->encrypted_page);
2676                        if (PageWriteback(page))
2677                                end_page_writeback(page);
2678                } else {
2679                        set_inode_flag(inode, FI_UPDATE_WRITE);
2680                }
2681                trace_f2fs_do_write_data_page(fio->page, IPU);
2682                return err;
2683        }
2684
2685        if (fio->need_lock == LOCK_RETRY) {
2686                if (!f2fs_trylock_op(fio->sbi)) {
2687                        err = -EAGAIN;
2688                        goto out_writepage;
2689                }
2690                fio->need_lock = LOCK_REQ;
2691        }
2692
2693        err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2694        if (err)
2695                goto out_writepage;
2696
2697        fio->version = ni.version;
2698
2699        err = f2fs_encrypt_one_page(fio);
2700        if (err)
2701                goto out_writepage;
2702
2703        set_page_writeback(page);
2704        ClearPageError(page);
2705
2706        if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2707                f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2708
2709        /* LFS mode write path */
2710        f2fs_outplace_write_data(&dn, fio);
2711        trace_f2fs_do_write_data_page(page, OPU);
2712        set_inode_flag(inode, FI_APPEND_WRITE);
2713        if (page->index == 0)
2714                set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2715out_writepage:
2716        f2fs_put_dnode(&dn);
2717out:
2718        if (fio->need_lock == LOCK_REQ)
2719                f2fs_unlock_op(fio->sbi);
2720        return err;
2721}
2722
2723int f2fs_write_single_data_page(struct page *page, int *submitted,
2724                                struct bio **bio,
2725                                sector_t *last_block,
2726                                struct writeback_control *wbc,
2727                                enum iostat_type io_type,
2728                                int compr_blocks)
2729{
2730        struct inode *inode = page->mapping->host;
2731        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2732        loff_t i_size = i_size_read(inode);
2733        const pgoff_t end_index = ((unsigned long long)i_size)
2734                                                        >> PAGE_SHIFT;
2735        loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2736        unsigned offset = 0;
2737        bool need_balance_fs = false;
2738        int err = 0;
2739        struct f2fs_io_info fio = {
2740                .sbi = sbi,
2741                .ino = inode->i_ino,
2742                .type = DATA,
2743                .op = REQ_OP_WRITE,
2744                .op_flags = wbc_to_write_flags(wbc),
2745                .old_blkaddr = NULL_ADDR,
2746                .page = page,
2747                .encrypted_page = NULL,
2748                .submitted = false,
2749                .compr_blocks = compr_blocks,
2750                .need_lock = LOCK_RETRY,
2751                .io_type = io_type,
2752                .io_wbc = wbc,
2753                .bio = bio,
2754                .last_block = last_block,
2755        };
2756
2757        trace_f2fs_writepage(page, DATA);
2758
2759        /* we should bypass data pages to proceed the kworkder jobs */
2760        if (unlikely(f2fs_cp_error(sbi))) {
2761                mapping_set_error(page->mapping, -EIO);
2762                /*
2763                 * don't drop any dirty dentry pages for keeping lastest
2764                 * directory structure.
2765                 */
2766                if (S_ISDIR(inode->i_mode))
2767                        goto redirty_out;
2768                goto out;
2769        }
2770
2771        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2772                goto redirty_out;
2773
2774        if (page->index < end_index ||
2775                        f2fs_verity_in_progress(inode) ||
2776                        compr_blocks)
2777                goto write;
2778
2779        /*
2780         * If the offset is out-of-range of file size,
2781         * this page does not have to be written to disk.
2782         */
2783        offset = i_size & (PAGE_SIZE - 1);
2784        if ((page->index >= end_index + 1) || !offset)
2785                goto out;
2786
2787        zero_user_segment(page, offset, PAGE_SIZE);
2788write:
2789        if (f2fs_is_drop_cache(inode))
2790                goto out;
2791        /* we should not write 0'th page having journal header */
2792        if (f2fs_is_volatile_file(inode) && (!page->index ||
2793                        (!wbc->for_reclaim &&
2794                        f2fs_available_free_memory(sbi, BASE_CHECK))))
2795                goto redirty_out;
2796
2797        /* Dentry/quota blocks are controlled by checkpoint */
2798        if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2799                /*
2800                 * We need to wait for node_write to avoid block allocation during
2801                 * checkpoint. This can only happen to quota writes which can cause
2802                 * the below discard race condition.
2803                 */
2804                if (IS_NOQUOTA(inode))
2805                        down_read(&sbi->node_write);
2806
2807                fio.need_lock = LOCK_DONE;
2808                err = f2fs_do_write_data_page(&fio);
2809
2810                if (IS_NOQUOTA(inode))
2811                        up_read(&sbi->node_write);
2812
2813                goto done;
2814        }
2815
2816        if (!wbc->for_reclaim)
2817                need_balance_fs = true;
2818        else if (has_not_enough_free_secs(sbi, 0, 0))
2819                goto redirty_out;
2820        else
2821                set_inode_flag(inode, FI_HOT_DATA);
2822
2823        err = -EAGAIN;
2824        if (f2fs_has_inline_data(inode)) {
2825                err = f2fs_write_inline_data(inode, page);
2826                if (!err)
2827                        goto out;
2828        }
2829
2830        if (err == -EAGAIN) {
2831                err = f2fs_do_write_data_page(&fio);
2832                if (err == -EAGAIN) {
2833                        fio.need_lock = LOCK_REQ;
2834                        err = f2fs_do_write_data_page(&fio);
2835                }
2836        }
2837
2838        if (err) {
2839                file_set_keep_isize(inode);
2840        } else {
2841                spin_lock(&F2FS_I(inode)->i_size_lock);
2842                if (F2FS_I(inode)->last_disk_size < psize)
2843                        F2FS_I(inode)->last_disk_size = psize;
2844                spin_unlock(&F2FS_I(inode)->i_size_lock);
2845        }
2846
2847done:
2848        if (err && err != -ENOENT)
2849                goto redirty_out;
2850
2851out:
2852        inode_dec_dirty_pages(inode);
2853        if (err) {
2854                ClearPageUptodate(page);
2855                clear_cold_data(page);
2856        }
2857
2858        if (wbc->for_reclaim) {
2859                f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2860                clear_inode_flag(inode, FI_HOT_DATA);
2861                f2fs_remove_dirty_inode(inode);
2862                submitted = NULL;
2863        }
2864        unlock_page(page);
2865        if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2866                                        !F2FS_I(inode)->cp_task)
2867                f2fs_balance_fs(sbi, need_balance_fs);
2868
2869        if (unlikely(f2fs_cp_error(sbi))) {
2870                f2fs_submit_merged_write(sbi, DATA);
2871                f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2872                submitted = NULL;
2873        }
2874
2875        if (submitted)
2876                *submitted = fio.submitted ? 1 : 0;
2877
2878        return 0;
2879
2880redirty_out:
2881        redirty_page_for_writepage(wbc, page);
2882        /*
2883         * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2884         * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2885         * file_write_and_wait_range() will see EIO error, which is critical
2886         * to return value of fsync() followed by atomic_write failure to user.
2887         */
2888        if (!err || wbc->for_reclaim)
2889                return AOP_WRITEPAGE_ACTIVATE;
2890        unlock_page(page);
2891        return err;
2892}
2893
2894static int f2fs_write_data_page(struct page *page,
2895                                        struct writeback_control *wbc)
2896{
2897#ifdef CONFIG_F2FS_FS_COMPRESSION
2898        struct inode *inode = page->mapping->host;
2899
2900        if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2901                goto out;
2902
2903        if (f2fs_compressed_file(inode)) {
2904                if (f2fs_is_compressed_cluster(inode, page->index)) {
2905                        redirty_page_for_writepage(wbc, page);
2906                        return AOP_WRITEPAGE_ACTIVATE;
2907                }
2908        }
2909out:
2910#endif
2911
2912        return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2913                                                wbc, FS_DATA_IO, 0);
2914}
2915
2916/*
2917 * This function was copied from write_cche_pages from mm/page-writeback.c.
2918 * The major change is making write step of cold data page separately from
2919 * warm/hot data page.
2920 */
2921static int f2fs_write_cache_pages(struct address_space *mapping,
2922                                        struct writeback_control *wbc,
2923                                        enum iostat_type io_type)
2924{
2925        int ret = 0;
2926        int done = 0, retry = 0;
2927        struct pagevec pvec;
2928        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2929        struct bio *bio = NULL;
2930        sector_t last_block;
2931#ifdef CONFIG_F2FS_FS_COMPRESSION
2932        struct inode *inode = mapping->host;
2933        struct compress_ctx cc = {
2934                .inode = inode,
2935                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2936                .cluster_size = F2FS_I(inode)->i_cluster_size,
2937                .cluster_idx = NULL_CLUSTER,
2938                .rpages = NULL,
2939                .nr_rpages = 0,
2940                .cpages = NULL,
2941                .rbuf = NULL,
2942                .cbuf = NULL,
2943                .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2944                .private = NULL,
2945        };
2946#endif
2947        int nr_pages;
2948        pgoff_t index;
2949        pgoff_t end;            /* Inclusive */
2950        pgoff_t done_index;
2951        int range_whole = 0;
2952        xa_mark_t tag;
2953        int nwritten = 0;
2954        int submitted = 0;
2955        int i;
2956
2957        pagevec_init(&pvec);
2958
2959        if (get_dirty_pages(mapping->host) <=
2960                                SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2961                set_inode_flag(mapping->host, FI_HOT_DATA);
2962        else
2963                clear_inode_flag(mapping->host, FI_HOT_DATA);
2964
2965        if (wbc->range_cyclic) {
2966                index = mapping->writeback_index; /* prev offset */
2967                end = -1;
2968        } else {
2969                index = wbc->range_start >> PAGE_SHIFT;
2970                end = wbc->range_end >> PAGE_SHIFT;
2971                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2972                        range_whole = 1;
2973        }
2974        if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2975                tag = PAGECACHE_TAG_TOWRITE;
2976        else
2977                tag = PAGECACHE_TAG_DIRTY;
2978retry:
2979        retry = 0;
2980        if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2981                tag_pages_for_writeback(mapping, index, end);
2982        done_index = index;
2983        while (!done && !retry && (index <= end)) {
2984                nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2985                                tag);
2986                if (nr_pages == 0)
2987                        break;
2988
2989                for (i = 0; i < nr_pages; i++) {
2990                        struct page *page = pvec.pages[i];
2991                        bool need_readd;
2992readd:
2993                        need_readd = false;
2994#ifdef CONFIG_F2FS_FS_COMPRESSION
2995                        if (f2fs_compressed_file(inode)) {
2996                                ret = f2fs_init_compress_ctx(&cc);
2997                                if (ret) {
2998                                        done = 1;
2999                                        break;
3000                                }
3001
3002                                if (!f2fs_cluster_can_merge_page(&cc,
3003                                                                page->index)) {
3004                                        ret = f2fs_write_multi_pages(&cc,
3005                                                &submitted, wbc, io_type);
3006                                        if (!ret)
3007                                                need_readd = true;
3008                                        goto result;
3009                                }
3010
3011                                if (unlikely(f2fs_cp_error(sbi)))
3012                                        goto lock_page;
3013
3014                                if (f2fs_cluster_is_empty(&cc)) {
3015                                        void *fsdata = NULL;
3016                                        struct page *pagep;
3017                                        int ret2;
3018
3019                                        ret2 = f2fs_prepare_compress_overwrite(
3020                                                        inode, &pagep,
3021                                                        page->index, &fsdata);
3022                                        if (ret2 < 0) {
3023                                                ret = ret2;
3024                                                done = 1;
3025                                                break;
3026                                        } else if (ret2 &&
3027                                                !f2fs_compress_write_end(inode,
3028                                                                fsdata, page->index,
3029                                                                1)) {
3030                                                retry = 1;
3031                                                break;
3032                                        }
3033                                } else {
3034                                        goto lock_page;
3035                                }
3036                        }
3037#endif
3038                        /* give a priority to WB_SYNC threads */
3039                        if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3040                                        wbc->sync_mode == WB_SYNC_NONE) {
3041                                done = 1;
3042                                break;
3043                        }
3044#ifdef CONFIG_F2FS_FS_COMPRESSION
3045lock_page:
3046#endif
3047                        done_index = page->index;
3048retry_write:
3049                        lock_page(page);
3050
3051                        if (unlikely(page->mapping != mapping)) {
3052continue_unlock:
3053                                unlock_page(page);
3054                                continue;
3055                        }
3056
3057                        if (!PageDirty(page)) {
3058                                /* someone wrote it for us */
3059                                goto continue_unlock;
3060                        }
3061
3062                        if (PageWriteback(page)) {
3063                                if (wbc->sync_mode != WB_SYNC_NONE)
3064                                        f2fs_wait_on_page_writeback(page,
3065                                                        DATA, true, true);
3066                                else
3067                                        goto continue_unlock;
3068                        }
3069
3070                        if (!clear_page_dirty_for_io(page))
3071                                goto continue_unlock;
3072
3073#ifdef CONFIG_F2FS_FS_COMPRESSION
3074                        if (f2fs_compressed_file(inode)) {
3075                                get_page(page);
3076                                f2fs_compress_ctx_add_page(&cc, page);
3077                                continue;
3078                        }
3079#endif
3080                        ret = f2fs_write_single_data_page(page, &submitted,
3081                                        &bio, &last_block, wbc, io_type, 0);
3082                        if (ret == AOP_WRITEPAGE_ACTIVATE)
3083                                unlock_page(page);
3084#ifdef CONFIG_F2FS_FS_COMPRESSION
3085result:
3086#endif
3087                        nwritten += submitted;
3088                        wbc->nr_to_write -= submitted;
3089
3090                        if (unlikely(ret)) {
3091                                /*
3092                                 * keep nr_to_write, since vfs uses this to
3093                                 * get # of written pages.
3094                                 */
3095                                if (ret == AOP_WRITEPAGE_ACTIVATE) {
3096                                        ret = 0;
3097                                        goto next;
3098                                } else if (ret == -EAGAIN) {
3099                                        ret = 0;
3100                                        if (wbc->sync_mode == WB_SYNC_ALL) {
3101                                                cond_resched();
3102                                                congestion_wait(BLK_RW_ASYNC,
3103                                                        DEFAULT_IO_TIMEOUT);
3104                                                goto retry_write;
3105                                        }
3106                                        goto next;
3107                                }
3108                                done_index = page->index + 1;
3109                                done = 1;
3110                                break;
3111                        }
3112
3113                        if (wbc->nr_to_write <= 0 &&
3114                                        wbc->sync_mode == WB_SYNC_NONE) {
3115                                done = 1;
3116                                break;
3117                        }
3118next:
3119                        if (need_readd)
3120                                goto readd;
3121                }
3122                pagevec_release(&pvec);
3123                cond_resched();
3124        }
3125#ifdef CONFIG_F2FS_FS_COMPRESSION
3126        /* flush remained pages in compress cluster */
3127        if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3128                ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3129                nwritten += submitted;
3130                wbc->nr_to_write -= submitted;
3131                if (ret) {
3132                        done = 1;
3133                        retry = 0;
3134                }
3135        }
3136#endif
3137        if (retry) {
3138                index = 0;
3139                end = -1;
3140                goto retry;
3141        }
3142        if (wbc->range_cyclic && !done)
3143                done_index = 0;
3144        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3145                mapping->writeback_index = done_index;
3146
3147        if (nwritten)
3148                f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3149                                                                NULL, 0, DATA);
3150        /* submit cached bio of IPU write */
3151        if (bio)
3152                f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3153
3154        return ret;
3155}
3156
3157static inline bool __should_serialize_io(struct inode *inode,
3158                                        struct writeback_control *wbc)
3159{
3160        /* to avoid deadlock in path of data flush */
3161        if (F2FS_I(inode)->cp_task)
3162                return false;
3163
3164        if (!S_ISREG(inode->i_mode))
3165                return false;
3166        if (IS_NOQUOTA(inode))
3167                return false;
3168
3169        if (f2fs_compressed_file(inode))
3170                return true;
3171        if (wbc->sync_mode != WB_SYNC_ALL)
3172                return true;
3173        if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3174                return true;
3175        return false;
3176}
3177
3178static int __f2fs_write_data_pages(struct address_space *mapping,
3179                                                struct writeback_control *wbc,
3180                                                enum iostat_type io_type)
3181{
3182        struct inode *inode = mapping->host;
3183        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3184        struct blk_plug plug;
3185        int ret;
3186        bool locked = false;
3187
3188        /* deal with chardevs and other special file */
3189        if (!mapping->a_ops->writepage)
3190                return 0;
3191
3192        /* skip writing if there is no dirty page in this inode */
3193        if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3194                return 0;
3195
3196        /* during POR, we don't need to trigger writepage at all. */
3197        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3198                goto skip_write;
3199
3200        if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3201                        wbc->sync_mode == WB_SYNC_NONE &&
3202                        get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3203                        f2fs_available_free_memory(sbi, DIRTY_DENTS))
3204                goto skip_write;
3205
3206        /* skip writing during file defragment */
3207        if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3208                goto skip_write;
3209
3210        trace_f2fs_writepages(mapping->host, wbc, DATA);
3211
3212        /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3213        if (wbc->sync_mode == WB_SYNC_ALL)
3214                atomic_inc(&sbi->wb_sync_req[DATA]);
3215        else if (atomic_read(&sbi->wb_sync_req[DATA]))
3216                goto skip_write;
3217
3218        if (__should_serialize_io(inode, wbc)) {
3219                mutex_lock(&sbi->writepages);
3220                locked = true;
3221        }
3222
3223        blk_start_plug(&plug);
3224        ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3225        blk_finish_plug(&plug);
3226
3227        if (locked)
3228                mutex_unlock(&sbi->writepages);
3229
3230        if (wbc->sync_mode == WB_SYNC_ALL)
3231                atomic_dec(&sbi->wb_sync_req[DATA]);
3232        /*
3233         * if some pages were truncated, we cannot guarantee its mapping->host
3234         * to detect pending bios.
3235         */
3236
3237        f2fs_remove_dirty_inode(inode);
3238        return ret;
3239
3240skip_write:
3241        wbc->pages_skipped += get_dirty_pages(inode);
3242        trace_f2fs_writepages(mapping->host, wbc, DATA);
3243        return 0;
3244}
3245
3246static int f2fs_write_data_pages(struct address_space *mapping,
3247                            struct writeback_control *wbc)
3248{
3249        struct inode *inode = mapping->host;
3250
3251        return __f2fs_write_data_pages(mapping, wbc,
3252                        F2FS_I(inode)->cp_task == current ?
3253                        FS_CP_DATA_IO : FS_DATA_IO);
3254}
3255
3256static void f2fs_write_failed(struct address_space *mapping, loff_t to)
3257{
3258        struct inode *inode = mapping->host;
3259        loff_t i_size = i_size_read(inode);
3260
3261        if (IS_NOQUOTA(inode))
3262                return;
3263
3264        /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3265        if (to > i_size && !f2fs_verity_in_progress(inode)) {
3266                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3267                down_write(&F2FS_I(inode)->i_mmap_sem);
3268
3269                truncate_pagecache(inode, i_size);
3270                f2fs_truncate_blocks(inode, i_size, true);
3271
3272                up_write(&F2FS_I(inode)->i_mmap_sem);
3273                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3274        }
3275}
3276
3277static int prepare_write_begin(struct f2fs_sb_info *sbi,
3278                        struct page *page, loff_t pos, unsigned len,
3279                        block_t *blk_addr, bool *node_changed)
3280{
3281        struct inode *inode = page->mapping->host;
3282        pgoff_t index = page->index;
3283        struct dnode_of_data dn;
3284        struct page *ipage;
3285        bool locked = false;
3286        struct extent_info ei = {0,0,0};
3287        int err = 0;
3288        int flag;
3289
3290        /*
3291         * we already allocated all the blocks, so we don't need to get
3292         * the block addresses when there is no need to fill the page.
3293         */
3294        if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3295            !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3296            !f2fs_verity_in_progress(inode))
3297                return 0;
3298
3299        /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3300        if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3301                flag = F2FS_GET_BLOCK_DEFAULT;
3302        else
3303                flag = F2FS_GET_BLOCK_PRE_AIO;
3304
3305        if (f2fs_has_inline_data(inode) ||
3306                        (pos & PAGE_MASK) >= i_size_read(inode)) {
3307                f2fs_do_map_lock(sbi, flag, true);
3308                locked = true;
3309        }
3310
3311restart:
3312        /* check inline_data */
3313        ipage = f2fs_get_node_page(sbi, inode->i_ino);
3314        if (IS_ERR(ipage)) {
3315                err = PTR_ERR(ipage);
3316                goto unlock_out;
3317        }
3318
3319        set_new_dnode(&dn, inode, ipage, ipage, 0);
3320
3321        if (f2fs_has_inline_data(inode)) {
3322                if (pos + len <= MAX_INLINE_DATA(inode)) {
3323                        f2fs_do_read_inline_data(page, ipage);
3324                        set_inode_flag(inode, FI_DATA_EXIST);
3325                        if (inode->i_nlink)
3326                                set_inline_node(ipage);
3327                } else {
3328                        err = f2fs_convert_inline_page(&dn, page);
3329                        if (err)
3330                                goto out;
3331                        if (dn.data_blkaddr == NULL_ADDR)
3332                                err = f2fs_get_block(&dn, index);
3333                }
3334        } else if (locked) {
3335                err = f2fs_get_block(&dn, index);
3336        } else {
3337                if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3338                        dn.data_blkaddr = ei.blk + index - ei.fofs;
3339                } else {
3340                        /* hole case */
3341                        err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3342                        if (err || dn.data_blkaddr == NULL_ADDR) {
3343                                f2fs_put_dnode(&dn);
3344                                f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3345                                                                true);
3346                                WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3347                                locked = true;
3348                                goto restart;
3349                        }
3350                }
3351        }
3352
3353        /* convert_inline_page can make node_changed */
3354        *blk_addr = dn.data_blkaddr;
3355        *node_changed = dn.node_changed;
3356out:
3357        f2fs_put_dnode(&dn);
3358unlock_out:
3359        if (locked)
3360                f2fs_do_map_lock(sbi, flag, false);
3361        return err;
3362}
3363
3364static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3365                loff_t pos, unsigned len, unsigned flags,
3366                struct page **pagep, void **fsdata)
3367{
3368        struct inode *inode = mapping->host;
3369        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3370        struct page *page = NULL;
3371        pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3372        bool need_balance = false, drop_atomic = false;
3373        block_t blkaddr = NULL_ADDR;
3374        int err = 0;
3375
3376        trace_f2fs_write_begin(inode, pos, len, flags);
3377
3378        if (!f2fs_is_checkpoint_ready(sbi)) {
3379                err = -ENOSPC;
3380                goto fail;
3381        }
3382
3383        if ((f2fs_is_atomic_file(inode) &&
3384                        !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3385                        is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3386                err = -ENOMEM;
3387                drop_atomic = true;
3388                goto fail;
3389        }
3390
3391        /*
3392         * We should check this at this moment to avoid deadlock on inode page
3393         * and #0 page. The locking rule for inline_data conversion should be:
3394         * lock_page(page #0) -> lock_page(inode_page)
3395         */
3396        if (index != 0) {
3397                err = f2fs_convert_inline_inode(inode);
3398                if (err)
3399                        goto fail;
3400        }
3401
3402#ifdef CONFIG_F2FS_FS_COMPRESSION
3403        if (f2fs_compressed_file(inode)) {
3404                int ret;
3405
3406                *fsdata = NULL;
3407
3408                ret = f2fs_prepare_compress_overwrite(inode, pagep,
3409                                                        index, fsdata);
3410                if (ret < 0) {
3411                        err = ret;
3412                        goto fail;
3413                } else if (ret) {
3414                        return 0;
3415                }
3416        }
3417#endif
3418
3419repeat:
3420        /*
3421         * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3422         * wait_for_stable_page. Will wait that below with our IO control.
3423         */
3424        page = f2fs_pagecache_get_page(mapping, index,
3425                                FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3426        if (!page) {
3427                err = -ENOMEM;
3428                goto fail;
3429        }
3430
3431        /* TODO: cluster can be compressed due to race with .writepage */
3432
3433        *pagep = page;
3434
3435        err = prepare_write_begin(sbi, page, pos, len,
3436                                        &blkaddr, &need_balance);
3437        if (err)
3438                goto fail;
3439
3440        if (need_balance && !IS_NOQUOTA(inode) &&
3441                        has_not_enough_free_secs(sbi, 0, 0)) {
3442                unlock_page(page);
3443                f2fs_balance_fs(sbi, true);
3444                lock_page(page);
3445                if (page->mapping != mapping) {
3446                        /* The page got truncated from under us */
3447                        f2fs_put_page(page, 1);
3448                        goto repeat;
3449                }
3450        }
3451
3452        f2fs_wait_on_page_writeback(page, DATA, false, true);
3453
3454        if (len == PAGE_SIZE || PageUptodate(page))
3455                return 0;
3456
3457        if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3458            !f2fs_verity_in_progress(inode)) {
3459                zero_user_segment(page, len, PAGE_SIZE);
3460                return 0;
3461        }
3462
3463        if (blkaddr == NEW_ADDR) {
3464                zero_user_segment(page, 0, PAGE_SIZE);
3465                SetPageUptodate(page);
3466        } else {
3467                if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3468                                DATA_GENERIC_ENHANCE_READ)) {
3469                        err = -EFSCORRUPTED;
3470                        goto fail;
3471                }
3472                err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3473                if (err)
3474                        goto fail;
3475
3476                lock_page(page);
3477                if (unlikely(page->mapping != mapping)) {
3478                        f2fs_put_page(page, 1);
3479                        goto repeat;
3480                }
3481                if (unlikely(!PageUptodate(page))) {
3482                        err = -EIO;
3483                        goto fail;
3484                }
3485        }
3486        return 0;
3487
3488fail:
3489        f2fs_put_page(page, 1);
3490        f2fs_write_failed(mapping, pos + len);
3491        if (drop_atomic)
3492                f2fs_drop_inmem_pages_all(sbi, false);
3493        return err;
3494}
3495
3496static int f2fs_write_end(struct file *file,
3497                        struct address_space *mapping,
3498                        loff_t pos, unsigned len, unsigned copied,
3499                        struct page *page, void *fsdata)
3500{
3501        struct inode *inode = page->mapping->host;
3502
3503        trace_f2fs_write_end(inode, pos, len, copied);
3504
3505        /*
3506         * This should be come from len == PAGE_SIZE, and we expect copied
3507         * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3508         * let generic_perform_write() try to copy data again through copied=0.
3509         */
3510        if (!PageUptodate(page)) {
3511                if (unlikely(copied != len))
3512                        copied = 0;
3513                else
3514                        SetPageUptodate(page);
3515        }
3516
3517#ifdef CONFIG_F2FS_FS_COMPRESSION
3518        /* overwrite compressed file */
3519        if (f2fs_compressed_file(inode) && fsdata) {
3520                f2fs_compress_write_end(inode, fsdata, page->index, copied);
3521                f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3522
3523                if (pos + copied > i_size_read(inode) &&
3524                                !f2fs_verity_in_progress(inode))
3525                        f2fs_i_size_write(inode, pos + copied);
3526                return copied;
3527        }
3528#endif
3529
3530        if (!copied)
3531                goto unlock_out;
3532
3533        set_page_dirty(page);
3534
3535        if (pos + copied > i_size_read(inode) &&
3536            !f2fs_verity_in_progress(inode))
3537                f2fs_i_size_write(inode, pos + copied);
3538unlock_out:
3539        f2fs_put_page(page, 1);
3540        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3541        return copied;
3542}
3543
3544static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3545                           loff_t offset)
3546{
3547        unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3548        unsigned blkbits = i_blkbits;
3549        unsigned blocksize_mask = (1 << blkbits) - 1;
3550        unsigned long align = offset | iov_iter_alignment(iter);
3551        struct block_device *bdev = inode->i_sb->s_bdev;
3552
3553        if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3554                return 1;
3555
3556        if (align & blocksize_mask) {
3557                if (bdev)
3558                        blkbits = blksize_bits(bdev_logical_block_size(bdev));
3559                blocksize_mask = (1 << blkbits) - 1;
3560                if (align & blocksize_mask)
3561                        return -EINVAL;
3562                return 1;
3563        }
3564        return 0;
3565}
3566
3567static void f2fs_dio_end_io(struct bio *bio)
3568{
3569        struct f2fs_private_dio *dio = bio->bi_private;
3570
3571        dec_page_count(F2FS_I_SB(dio->inode),
3572                        dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3573
3574        bio->bi_private = dio->orig_private;
3575        bio->bi_end_io = dio->orig_end_io;
3576
3577        kvfree(dio);
3578
3579        bio_endio(bio);
3580}
3581
3582static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3583                                                        loff_t file_offset)
3584{
3585        struct f2fs_private_dio *dio;
3586        bool write = (bio_op(bio) == REQ_OP_WRITE);
3587
3588        dio = f2fs_kzalloc(F2FS_I_SB(inode),
3589                        sizeof(struct f2fs_private_dio), GFP_NOFS);
3590        if (!dio)
3591                goto out;
3592
3593        dio->inode = inode;
3594        dio->orig_end_io = bio->bi_end_io;
3595        dio->orig_private = bio->bi_private;
3596        dio->write = write;
3597
3598        bio->bi_end_io = f2fs_dio_end_io;
3599        bio->bi_private = dio;
3600
3601        inc_page_count(F2FS_I_SB(inode),
3602                        write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3603
3604        submit_bio(bio);
3605        return;
3606out:
3607        bio->bi_status = BLK_STS_IOERR;
3608        bio_endio(bio);
3609}
3610
3611static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3612{
3613        struct address_space *mapping = iocb->ki_filp->f_mapping;
3614        struct inode *inode = mapping->host;
3615        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3616        struct f2fs_inode_info *fi = F2FS_I(inode);
3617        size_t count = iov_iter_count(iter);
3618        loff_t offset = iocb->ki_pos;
3619        int rw = iov_iter_rw(iter);
3620        int err;
3621        enum rw_hint hint = iocb->ki_hint;
3622        int whint_mode = F2FS_OPTION(sbi).whint_mode;
3623        bool do_opu;
3624
3625        err = check_direct_IO(inode, iter, offset);
3626        if (err)
3627                return err < 0 ? err : 0;
3628
3629        if (f2fs_force_buffered_io(inode, iocb, iter))
3630                return 0;
3631
3632        do_opu = allow_outplace_dio(inode, iocb, iter);
3633
3634        trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3635
3636        if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3637                iocb->ki_hint = WRITE_LIFE_NOT_SET;
3638
3639        if (iocb->ki_flags & IOCB_NOWAIT) {
3640                if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3641                        iocb->ki_hint = hint;
3642                        err = -EAGAIN;
3643                        goto out;
3644                }
3645                if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3646                        up_read(&fi->i_gc_rwsem[rw]);
3647                        iocb->ki_hint = hint;
3648                        err = -EAGAIN;
3649                        goto out;
3650                }
3651        } else {
3652                down_read(&fi->i_gc_rwsem[rw]);
3653                if (do_opu)
3654                        down_read(&fi->i_gc_rwsem[READ]);
3655        }
3656
3657        err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3658                        iter, rw == WRITE ? get_data_block_dio_write :
3659                        get_data_block_dio, NULL, f2fs_dio_submit_bio,
3660                        rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3661                        DIO_SKIP_HOLES);
3662
3663        if (do_opu)
3664                up_read(&fi->i_gc_rwsem[READ]);
3665
3666        up_read(&fi->i_gc_rwsem[rw]);
3667
3668        if (rw == WRITE) {
3669                if (whint_mode == WHINT_MODE_OFF)
3670                        iocb->ki_hint = hint;
3671                if (err > 0) {
3672                        f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3673                                                                        err);
3674                        if (!do_opu)
3675                                set_inode_flag(inode, FI_UPDATE_WRITE);
3676                } else if (err < 0) {
3677                        f2fs_write_failed(mapping, offset + count);
3678                }
3679        } else {
3680                if (err > 0)
3681                        f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3682        }
3683
3684out:
3685        trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3686
3687        return err;
3688}
3689
3690void f2fs_invalidate_page(struct page *page, unsigned int offset,
3691                                                        unsigned int length)
3692{
3693        struct inode *inode = page->mapping->host;
3694        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3695
3696        if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3697                (offset % PAGE_SIZE || length != PAGE_SIZE))
3698                return;
3699
3700        if (PageDirty(page)) {
3701                if (inode->i_ino == F2FS_META_INO(sbi)) {
3702                        dec_page_count(sbi, F2FS_DIRTY_META);
3703                } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3704                        dec_page_count(sbi, F2FS_DIRTY_NODES);
3705                } else {
3706                        inode_dec_dirty_pages(inode);
3707                        f2fs_remove_dirty_inode(inode);
3708                }
3709        }
3710
3711        clear_cold_data(page);
3712
3713        if (IS_ATOMIC_WRITTEN_PAGE(page))
3714                return f2fs_drop_inmem_page(inode, page);
3715
3716        f2fs_clear_page_private(page);
3717}
3718
3719int f2fs_release_page(struct page *page, gfp_t wait)
3720{
3721        /* If this is dirty page, keep PagePrivate */
3722        if (PageDirty(page))
3723                return 0;
3724
3725        /* This is atomic written page, keep Private */
3726        if (IS_ATOMIC_WRITTEN_PAGE(page))
3727                return 0;
3728
3729        clear_cold_data(page);
3730        f2fs_clear_page_private(page);
3731        return 1;
3732}
3733
3734static int f2fs_set_data_page_dirty(struct page *page)
3735{
3736        struct inode *inode = page_file_mapping(page)->host;
3737
3738        trace_f2fs_set_page_dirty(page, DATA);
3739
3740        if (!PageUptodate(page))
3741                SetPageUptodate(page);
3742        if (PageSwapCache(page))
3743                return __set_page_dirty_nobuffers(page);
3744
3745        if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3746                if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
3747                        f2fs_register_inmem_page(inode, page);
3748                        return 1;
3749                }
3750                /*
3751                 * Previously, this page has been registered, we just
3752                 * return here.
3753                 */
3754                return 0;
3755        }
3756
3757        if (!PageDirty(page)) {
3758                __set_page_dirty_nobuffers(page);
3759                f2fs_update_dirty_page(inode, page);
3760                return 1;
3761        }
3762        return 0;
3763}
3764
3765
3766static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3767{
3768#ifdef CONFIG_F2FS_FS_COMPRESSION
3769        struct dnode_of_data dn;
3770        sector_t start_idx, blknr = 0;
3771        int ret;
3772
3773        start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3774
3775        set_new_dnode(&dn, inode, NULL, NULL, 0);
3776        ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3777        if (ret)
3778                return 0;
3779
3780        if (dn.data_blkaddr != COMPRESS_ADDR) {
3781                dn.ofs_in_node += block - start_idx;
3782                blknr = f2fs_data_blkaddr(&dn);
3783                if (!__is_valid_data_blkaddr(blknr))
3784                        blknr = 0;
3785        }
3786
3787        f2fs_put_dnode(&dn);
3788        return blknr;
3789#else
3790        return 0;
3791#endif
3792}
3793
3794
3795static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3796{
3797        struct inode *inode = mapping->host;
3798        struct buffer_head tmp = {
3799                .b_size = i_blocksize(inode),
3800        };
3801        sector_t blknr = 0;
3802
3803        if (f2fs_has_inline_data(inode))
3804                goto out;
3805
3806        /* make sure allocating whole blocks */
3807        if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3808                filemap_write_and_wait(mapping);
3809
3810        if (f2fs_compressed_file(inode))
3811                blknr = f2fs_bmap_compress(inode, block);
3812
3813        if (!get_data_block_bmap(inode, block, &tmp, 0))
3814                blknr = tmp.b_blocknr;
3815out:
3816        trace_f2fs_bmap(inode, block, blknr);
3817        return blknr;
3818}
3819
3820#ifdef CONFIG_MIGRATION
3821#include <linux/migrate.h>
3822
3823int f2fs_migrate_page(struct address_space *mapping,
3824                struct page *newpage, struct page *page, enum migrate_mode mode)
3825{
3826        int rc, extra_count;
3827        struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3828        bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
3829
3830        BUG_ON(PageWriteback(page));
3831
3832        /* migrating an atomic written page is safe with the inmem_lock hold */
3833        if (atomic_written) {
3834                if (mode != MIGRATE_SYNC)
3835                        return -EBUSY;
3836                if (!mutex_trylock(&fi->inmem_lock))
3837                        return -EAGAIN;
3838        }
3839
3840        /* one extra reference was held for atomic_write page */
3841        extra_count = atomic_written ? 1 : 0;
3842        rc = migrate_page_move_mapping(mapping, newpage,
3843                                page, extra_count);
3844        if (rc != MIGRATEPAGE_SUCCESS) {
3845                if (atomic_written)
3846                        mutex_unlock(&fi->inmem_lock);
3847                return rc;
3848        }
3849
3850        if (atomic_written) {
3851                struct inmem_pages *cur;
3852                list_for_each_entry(cur, &fi->inmem_pages, list)
3853                        if (cur->page == page) {
3854                                cur->page = newpage;
3855                                break;
3856                        }
3857                mutex_unlock(&fi->inmem_lock);
3858                put_page(page);
3859                get_page(newpage);
3860        }
3861
3862        if (PagePrivate(page)) {
3863                f2fs_set_page_private(newpage, page_private(page));
3864                f2fs_clear_page_private(page);
3865        }
3866
3867        if (mode != MIGRATE_SYNC_NO_COPY)
3868                migrate_page_copy(newpage, page);
3869        else
3870                migrate_page_states(newpage, page);
3871
3872        return MIGRATEPAGE_SUCCESS;
3873}
3874#endif
3875
3876#ifdef CONFIG_SWAP
3877/* Copied from generic_swapfile_activate() to check any holes */
3878static int check_swap_activate(struct swap_info_struct *sis,
3879                                struct file *swap_file, sector_t *span)
3880{
3881        struct address_space *mapping = swap_file->f_mapping;
3882        struct inode *inode = mapping->host;
3883        unsigned blocks_per_page;
3884        unsigned long page_no;
3885        unsigned blkbits;
3886        sector_t probe_block;
3887        sector_t last_block;
3888        sector_t lowest_block = -1;
3889        sector_t highest_block = 0;
3890        int nr_extents = 0;
3891        int ret;
3892
3893        blkbits = inode->i_blkbits;
3894        blocks_per_page = PAGE_SIZE >> blkbits;
3895
3896        /*
3897         * Map all the blocks into the extent list.  This code doesn't try
3898         * to be very smart.
3899         */
3900        probe_block = 0;
3901        page_no = 0;
3902        last_block = i_size_read(inode) >> blkbits;
3903        while ((probe_block + blocks_per_page) <= last_block &&
3904                        page_no < sis->max) {
3905                unsigned block_in_page;
3906                sector_t first_block;
3907                sector_t block = 0;
3908                int      err = 0;
3909
3910                cond_resched();
3911
3912                block = probe_block;
3913                err = bmap(inode, &block);
3914                if (err || !block)
3915                        goto bad_bmap;
3916                first_block = block;
3917
3918                /*
3919                 * It must be PAGE_SIZE aligned on-disk
3920                 */
3921                if (first_block & (blocks_per_page - 1)) {
3922                        probe_block++;
3923                        goto reprobe;
3924                }
3925
3926                for (block_in_page = 1; block_in_page < blocks_per_page;
3927                                        block_in_page++) {
3928
3929                        block = probe_block + block_in_page;
3930                        err = bmap(inode, &block);
3931
3932                        if (err || !block)
3933                                goto bad_bmap;
3934
3935                        if (block != first_block + block_in_page) {
3936                                /* Discontiguity */
3937                                probe_block++;
3938                                goto reprobe;
3939                        }
3940                }
3941
3942                first_block >>= (PAGE_SHIFT - blkbits);
3943                if (page_no) {  /* exclude the header page */
3944                        if (first_block < lowest_block)
3945                                lowest_block = first_block;
3946                        if (first_block > highest_block)
3947                                highest_block = first_block;
3948                }
3949
3950                /*
3951                 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3952                 */
3953                ret = add_swap_extent(sis, page_no, 1, first_block);
3954                if (ret < 0)
3955                        goto out;
3956                nr_extents += ret;
3957                page_no++;
3958                probe_block += blocks_per_page;
3959reprobe:
3960                continue;
3961        }
3962        ret = nr_extents;
3963        *span = 1 + highest_block - lowest_block;
3964        if (page_no == 0)
3965                page_no = 1;    /* force Empty message */
3966        sis->max = page_no;
3967        sis->pages = page_no - 1;
3968        sis->highest_bit = page_no - 1;
3969out:
3970        return ret;
3971bad_bmap:
3972        pr_err("swapon: swapfile has holes\n");
3973        return -EINVAL;
3974}
3975
3976static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3977                                sector_t *span)
3978{
3979        struct inode *inode = file_inode(file);
3980        int ret;
3981
3982        if (!S_ISREG(inode->i_mode))
3983                return -EINVAL;
3984
3985        if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3986                return -EROFS;
3987
3988        ret = f2fs_convert_inline_inode(inode);
3989        if (ret)
3990                return ret;
3991
3992        if (f2fs_disable_compressed_file(inode))
3993                return -EINVAL;
3994
3995        ret = check_swap_activate(sis, file, span);
3996        if (ret < 0)
3997                return ret;
3998
3999        set_inode_flag(inode, FI_PIN_FILE);
4000        f2fs_precache_extents(inode);
4001        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4002        return ret;
4003}
4004
4005static void f2fs_swap_deactivate(struct file *file)
4006{
4007        struct inode *inode = file_inode(file);
4008
4009        clear_inode_flag(inode, FI_PIN_FILE);
4010}
4011#else
4012static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4013                                sector_t *span)
4014{
4015        return -EOPNOTSUPP;
4016}
4017
4018static void f2fs_swap_deactivate(struct file *file)
4019{
4020}
4021#endif
4022
4023const struct address_space_operations f2fs_dblock_aops = {
4024        .readpage       = f2fs_read_data_page,
4025        .readahead      = f2fs_readahead,
4026        .writepage      = f2fs_write_data_page,
4027        .writepages     = f2fs_write_data_pages,
4028        .write_begin    = f2fs_write_begin,
4029        .write_end      = f2fs_write_end,
4030        .set_page_dirty = f2fs_set_data_page_dirty,
4031        .invalidatepage = f2fs_invalidate_page,
4032        .releasepage    = f2fs_release_page,
4033        .direct_IO      = f2fs_direct_IO,
4034        .bmap           = f2fs_bmap,
4035        .swap_activate  = f2fs_swap_activate,
4036        .swap_deactivate = f2fs_swap_deactivate,
4037#ifdef CONFIG_MIGRATION
4038        .migratepage    = f2fs_migrate_page,
4039#endif
4040};
4041
4042void f2fs_clear_page_cache_dirty_tag(struct page *page)
4043{
4044        struct address_space *mapping = page_mapping(page);
4045        unsigned long flags;
4046
4047        xa_lock_irqsave(&mapping->i_pages, flags);
4048        __xa_clear_mark(&mapping->i_pages, page_index(page),
4049                                                PAGECACHE_TAG_DIRTY);
4050        xa_unlock_irqrestore(&mapping->i_pages, flags);
4051}
4052
4053int __init f2fs_init_post_read_processing(void)
4054{
4055        bio_post_read_ctx_cache =
4056                kmem_cache_create("f2fs_bio_post_read_ctx",
4057                                  sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4058        if (!bio_post_read_ctx_cache)
4059                goto fail;
4060        bio_post_read_ctx_pool =
4061                mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4062                                         bio_post_read_ctx_cache);
4063        if (!bio_post_read_ctx_pool)
4064                goto fail_free_cache;
4065        return 0;
4066
4067fail_free_cache:
4068        kmem_cache_destroy(bio_post_read_ctx_cache);
4069fail:
4070        return -ENOMEM;
4071}
4072
4073void f2fs_destroy_post_read_processing(void)
4074{
4075        mempool_destroy(bio_post_read_ctx_pool);
4076        kmem_cache_destroy(bio_post_read_ctx_cache);
4077}
4078
4079int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4080{
4081        if (!f2fs_sb_has_encrypt(sbi) &&
4082                !f2fs_sb_has_verity(sbi) &&
4083                !f2fs_sb_has_compression(sbi))
4084                return 0;
4085
4086        sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4087                                                 WQ_UNBOUND | WQ_HIGHPRI,
4088                                                 num_online_cpus());
4089        if (!sbi->post_read_wq)
4090                return -ENOMEM;
4091        return 0;
4092}
4093
4094void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4095{
4096        if (sbi->post_read_wq)
4097                destroy_workqueue(sbi->post_read_wq);
4098}
4099
4100int __init f2fs_init_bio_entry_cache(void)
4101{
4102        bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4103                        sizeof(struct bio_entry));
4104        if (!bio_entry_slab)
4105                return -ENOMEM;
4106        return 0;
4107}
4108
4109void f2fs_destroy_bio_entry_cache(void)
4110{
4111        kmem_cache_destroy(bio_entry_slab);
4112}
4113