linux/fs/f2fs/data.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/data.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/buffer_head.h>
  11#include <linux/mpage.h>
  12#include <linux/writeback.h>
  13#include <linux/backing-dev.h>
  14#include <linux/pagevec.h>
  15#include <linux/blkdev.h>
  16#include <linux/bio.h>
  17#include <linux/blk-crypto.h>
  18#include <linux/swap.h>
  19#include <linux/prefetch.h>
  20#include <linux/uio.h>
  21#include <linux/cleancache.h>
  22#include <linux/sched/signal.h>
  23#include <linux/fiemap.h>
  24
  25#include "f2fs.h"
  26#include "node.h"
  27#include "segment.h"
  28#include "iostat.h"
  29#include <trace/events/f2fs.h>
  30
  31#define NUM_PREALLOC_POST_READ_CTXS     128
  32
  33static struct kmem_cache *bio_post_read_ctx_cache;
  34static struct kmem_cache *bio_entry_slab;
  35static mempool_t *bio_post_read_ctx_pool;
  36static struct bio_set f2fs_bioset;
  37
  38#define F2FS_BIO_POOL_SIZE      NR_CURSEG_TYPE
  39
  40int __init f2fs_init_bioset(void)
  41{
  42        if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
  43                                        0, BIOSET_NEED_BVECS))
  44                return -ENOMEM;
  45        return 0;
  46}
  47
  48void f2fs_destroy_bioset(void)
  49{
  50        bioset_exit(&f2fs_bioset);
  51}
  52
  53static bool __is_cp_guaranteed(struct page *page)
  54{
  55        struct address_space *mapping = page->mapping;
  56        struct inode *inode;
  57        struct f2fs_sb_info *sbi;
  58
  59        if (!mapping)
  60                return false;
  61
  62        inode = mapping->host;
  63        sbi = F2FS_I_SB(inode);
  64
  65        if (inode->i_ino == F2FS_META_INO(sbi) ||
  66                        inode->i_ino == F2FS_NODE_INO(sbi) ||
  67                        S_ISDIR(inode->i_mode))
  68                return true;
  69
  70        if (f2fs_is_compressed_page(page))
  71                return false;
  72        if ((S_ISREG(inode->i_mode) &&
  73                        (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
  74                        page_private_gcing(page))
  75                return true;
  76        return false;
  77}
  78
  79static enum count_type __read_io_type(struct page *page)
  80{
  81        struct address_space *mapping = page_file_mapping(page);
  82
  83        if (mapping) {
  84                struct inode *inode = mapping->host;
  85                struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  86
  87                if (inode->i_ino == F2FS_META_INO(sbi))
  88                        return F2FS_RD_META;
  89
  90                if (inode->i_ino == F2FS_NODE_INO(sbi))
  91                        return F2FS_RD_NODE;
  92        }
  93        return F2FS_RD_DATA;
  94}
  95
  96/* postprocessing steps for read bios */
  97enum bio_post_read_step {
  98#ifdef CONFIG_FS_ENCRYPTION
  99        STEP_DECRYPT    = 1 << 0,
 100#else
 101        STEP_DECRYPT    = 0,    /* compile out the decryption-related code */
 102#endif
 103#ifdef CONFIG_F2FS_FS_COMPRESSION
 104        STEP_DECOMPRESS = 1 << 1,
 105#else
 106        STEP_DECOMPRESS = 0,    /* compile out the decompression-related code */
 107#endif
 108#ifdef CONFIG_FS_VERITY
 109        STEP_VERITY     = 1 << 2,
 110#else
 111        STEP_VERITY     = 0,    /* compile out the verity-related code */
 112#endif
 113};
 114
 115struct bio_post_read_ctx {
 116        struct bio *bio;
 117        struct f2fs_sb_info *sbi;
 118        struct work_struct work;
 119        unsigned int enabled_steps;
 120        block_t fs_blkaddr;
 121};
 122
 123static void f2fs_finish_read_bio(struct bio *bio)
 124{
 125        struct bio_vec *bv;
 126        struct bvec_iter_all iter_all;
 127
 128        /*
 129         * Update and unlock the bio's pagecache pages, and put the
 130         * decompression context for any compressed pages.
 131         */
 132        bio_for_each_segment_all(bv, bio, iter_all) {
 133                struct page *page = bv->bv_page;
 134
 135                if (f2fs_is_compressed_page(page)) {
 136                        if (bio->bi_status)
 137                                f2fs_end_read_compressed_page(page, true, 0);
 138                        f2fs_put_page_dic(page);
 139                        continue;
 140                }
 141
 142                /* PG_error was set if decryption or verity failed. */
 143                if (bio->bi_status || PageError(page)) {
 144                        ClearPageUptodate(page);
 145                        /* will re-read again later */
 146                        ClearPageError(page);
 147                } else {
 148                        SetPageUptodate(page);
 149                }
 150                dec_page_count(F2FS_P_SB(page), __read_io_type(page));
 151                unlock_page(page);
 152        }
 153
 154        if (bio->bi_private)
 155                mempool_free(bio->bi_private, bio_post_read_ctx_pool);
 156        bio_put(bio);
 157}
 158
 159static void f2fs_verify_bio(struct work_struct *work)
 160{
 161        struct bio_post_read_ctx *ctx =
 162                container_of(work, struct bio_post_read_ctx, work);
 163        struct bio *bio = ctx->bio;
 164        bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
 165
 166        /*
 167         * fsverity_verify_bio() may call readpages() again, and while verity
 168         * will be disabled for this, decryption and/or decompression may still
 169         * be needed, resulting in another bio_post_read_ctx being allocated.
 170         * So to prevent deadlocks we need to release the current ctx to the
 171         * mempool first.  This assumes that verity is the last post-read step.
 172         */
 173        mempool_free(ctx, bio_post_read_ctx_pool);
 174        bio->bi_private = NULL;
 175
 176        /*
 177         * Verify the bio's pages with fs-verity.  Exclude compressed pages,
 178         * as those were handled separately by f2fs_end_read_compressed_page().
 179         */
 180        if (may_have_compressed_pages) {
 181                struct bio_vec *bv;
 182                struct bvec_iter_all iter_all;
 183
 184                bio_for_each_segment_all(bv, bio, iter_all) {
 185                        struct page *page = bv->bv_page;
 186
 187                        if (!f2fs_is_compressed_page(page) &&
 188                            !PageError(page) && !fsverity_verify_page(page))
 189                                SetPageError(page);
 190                }
 191        } else {
 192                fsverity_verify_bio(bio);
 193        }
 194
 195        f2fs_finish_read_bio(bio);
 196}
 197
 198/*
 199 * If the bio's data needs to be verified with fs-verity, then enqueue the
 200 * verity work for the bio.  Otherwise finish the bio now.
 201 *
 202 * Note that to avoid deadlocks, the verity work can't be done on the
 203 * decryption/decompression workqueue.  This is because verifying the data pages
 204 * can involve reading verity metadata pages from the file, and these verity
 205 * metadata pages may be encrypted and/or compressed.
 206 */
 207static void f2fs_verify_and_finish_bio(struct bio *bio)
 208{
 209        struct bio_post_read_ctx *ctx = bio->bi_private;
 210
 211        if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
 212                INIT_WORK(&ctx->work, f2fs_verify_bio);
 213                fsverity_enqueue_verify_work(&ctx->work);
 214        } else {
 215                f2fs_finish_read_bio(bio);
 216        }
 217}
 218
 219/*
 220 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
 221 * remaining page was read by @ctx->bio.
 222 *
 223 * Note that a bio may span clusters (even a mix of compressed and uncompressed
 224 * clusters) or be for just part of a cluster.  STEP_DECOMPRESS just indicates
 225 * that the bio includes at least one compressed page.  The actual decompression
 226 * is done on a per-cluster basis, not a per-bio basis.
 227 */
 228static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
 229{
 230        struct bio_vec *bv;
 231        struct bvec_iter_all iter_all;
 232        bool all_compressed = true;
 233        block_t blkaddr = ctx->fs_blkaddr;
 234
 235        bio_for_each_segment_all(bv, ctx->bio, iter_all) {
 236                struct page *page = bv->bv_page;
 237
 238                /* PG_error was set if decryption failed. */
 239                if (f2fs_is_compressed_page(page))
 240                        f2fs_end_read_compressed_page(page, PageError(page),
 241                                                blkaddr);
 242                else
 243                        all_compressed = false;
 244
 245                blkaddr++;
 246        }
 247
 248        /*
 249         * Optimization: if all the bio's pages are compressed, then scheduling
 250         * the per-bio verity work is unnecessary, as verity will be fully
 251         * handled at the compression cluster level.
 252         */
 253        if (all_compressed)
 254                ctx->enabled_steps &= ~STEP_VERITY;
 255}
 256
 257static void f2fs_post_read_work(struct work_struct *work)
 258{
 259        struct bio_post_read_ctx *ctx =
 260                container_of(work, struct bio_post_read_ctx, work);
 261
 262        if (ctx->enabled_steps & STEP_DECRYPT)
 263                fscrypt_decrypt_bio(ctx->bio);
 264
 265        if (ctx->enabled_steps & STEP_DECOMPRESS)
 266                f2fs_handle_step_decompress(ctx);
 267
 268        f2fs_verify_and_finish_bio(ctx->bio);
 269}
 270
 271static void f2fs_read_end_io(struct bio *bio)
 272{
 273        struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
 274        struct bio_post_read_ctx *ctx;
 275
 276        iostat_update_and_unbind_ctx(bio, 0);
 277        ctx = bio->bi_private;
 278
 279        if (time_to_inject(sbi, FAULT_READ_IO)) {
 280                f2fs_show_injection_info(sbi, FAULT_READ_IO);
 281                bio->bi_status = BLK_STS_IOERR;
 282        }
 283
 284        if (bio->bi_status) {
 285                f2fs_finish_read_bio(bio);
 286                return;
 287        }
 288
 289        if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
 290                INIT_WORK(&ctx->work, f2fs_post_read_work);
 291                queue_work(ctx->sbi->post_read_wq, &ctx->work);
 292        } else {
 293                f2fs_verify_and_finish_bio(bio);
 294        }
 295}
 296
 297static void f2fs_write_end_io(struct bio *bio)
 298{
 299        struct f2fs_sb_info *sbi;
 300        struct bio_vec *bvec;
 301        struct bvec_iter_all iter_all;
 302
 303        iostat_update_and_unbind_ctx(bio, 1);
 304        sbi = bio->bi_private;
 305
 306        if (time_to_inject(sbi, FAULT_WRITE_IO)) {
 307                f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
 308                bio->bi_status = BLK_STS_IOERR;
 309        }
 310
 311        bio_for_each_segment_all(bvec, bio, iter_all) {
 312                struct page *page = bvec->bv_page;
 313                enum count_type type = WB_DATA_TYPE(page);
 314
 315                if (page_private_dummy(page)) {
 316                        clear_page_private_dummy(page);
 317                        unlock_page(page);
 318                        mempool_free(page, sbi->write_io_dummy);
 319
 320                        if (unlikely(bio->bi_status))
 321                                f2fs_stop_checkpoint(sbi, true);
 322                        continue;
 323                }
 324
 325                fscrypt_finalize_bounce_page(&page);
 326
 327#ifdef CONFIG_F2FS_FS_COMPRESSION
 328                if (f2fs_is_compressed_page(page)) {
 329                        f2fs_compress_write_end_io(bio, page);
 330                        continue;
 331                }
 332#endif
 333
 334                if (unlikely(bio->bi_status)) {
 335                        mapping_set_error(page->mapping, -EIO);
 336                        if (type == F2FS_WB_CP_DATA)
 337                                f2fs_stop_checkpoint(sbi, true);
 338                }
 339
 340                f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
 341                                        page->index != nid_of_node(page));
 342
 343                dec_page_count(sbi, type);
 344                if (f2fs_in_warm_node_list(sbi, page))
 345                        f2fs_del_fsync_node_entry(sbi, page);
 346                clear_page_private_gcing(page);
 347                end_page_writeback(page);
 348        }
 349        if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
 350                                wq_has_sleeper(&sbi->cp_wait))
 351                wake_up(&sbi->cp_wait);
 352
 353        bio_put(bio);
 354}
 355
 356struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
 357                                block_t blk_addr, struct bio *bio)
 358{
 359        struct block_device *bdev = sbi->sb->s_bdev;
 360        int i;
 361
 362        if (f2fs_is_multi_device(sbi)) {
 363                for (i = 0; i < sbi->s_ndevs; i++) {
 364                        if (FDEV(i).start_blk <= blk_addr &&
 365                            FDEV(i).end_blk >= blk_addr) {
 366                                blk_addr -= FDEV(i).start_blk;
 367                                bdev = FDEV(i).bdev;
 368                                break;
 369                        }
 370                }
 371        }
 372        if (bio) {
 373                bio_set_dev(bio, bdev);
 374                bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
 375        }
 376        return bdev;
 377}
 378
 379int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
 380{
 381        int i;
 382
 383        if (!f2fs_is_multi_device(sbi))
 384                return 0;
 385
 386        for (i = 0; i < sbi->s_ndevs; i++)
 387                if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
 388                        return i;
 389        return 0;
 390}
 391
 392static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
 393{
 394        struct f2fs_sb_info *sbi = fio->sbi;
 395        struct bio *bio;
 396
 397        bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
 398
 399        f2fs_target_device(sbi, fio->new_blkaddr, bio);
 400        if (is_read_io(fio->op)) {
 401                bio->bi_end_io = f2fs_read_end_io;
 402                bio->bi_private = NULL;
 403        } else {
 404                bio->bi_end_io = f2fs_write_end_io;
 405                bio->bi_private = sbi;
 406                bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
 407                                                fio->type, fio->temp);
 408        }
 409        iostat_alloc_and_bind_ctx(sbi, bio, NULL);
 410
 411        if (fio->io_wbc)
 412                wbc_init_bio(fio->io_wbc, bio);
 413
 414        return bio;
 415}
 416
 417static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
 418                                  pgoff_t first_idx,
 419                                  const struct f2fs_io_info *fio,
 420                                  gfp_t gfp_mask)
 421{
 422        /*
 423         * The f2fs garbage collector sets ->encrypted_page when it wants to
 424         * read/write raw data without encryption.
 425         */
 426        if (!fio || !fio->encrypted_page)
 427                fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
 428}
 429
 430static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
 431                                     pgoff_t next_idx,
 432                                     const struct f2fs_io_info *fio)
 433{
 434        /*
 435         * The f2fs garbage collector sets ->encrypted_page when it wants to
 436         * read/write raw data without encryption.
 437         */
 438        if (fio && fio->encrypted_page)
 439                return !bio_has_crypt_ctx(bio);
 440
 441        return fscrypt_mergeable_bio(bio, inode, next_idx);
 442}
 443
 444static inline void __submit_bio(struct f2fs_sb_info *sbi,
 445                                struct bio *bio, enum page_type type)
 446{
 447        if (!is_read_io(bio_op(bio))) {
 448                unsigned int start;
 449
 450                if (type != DATA && type != NODE)
 451                        goto submit_io;
 452
 453                if (f2fs_lfs_mode(sbi) && current->plug)
 454                        blk_finish_plug(current->plug);
 455
 456                if (!F2FS_IO_ALIGNED(sbi))
 457                        goto submit_io;
 458
 459                start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
 460                start %= F2FS_IO_SIZE(sbi);
 461
 462                if (start == 0)
 463                        goto submit_io;
 464
 465                /* fill dummy pages */
 466                for (; start < F2FS_IO_SIZE(sbi); start++) {
 467                        struct page *page =
 468                                mempool_alloc(sbi->write_io_dummy,
 469                                              GFP_NOIO | __GFP_NOFAIL);
 470                        f2fs_bug_on(sbi, !page);
 471
 472                        lock_page(page);
 473
 474                        zero_user_segment(page, 0, PAGE_SIZE);
 475                        set_page_private_dummy(page);
 476
 477                        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
 478                                f2fs_bug_on(sbi, 1);
 479                }
 480                /*
 481                 * In the NODE case, we lose next block address chain. So, we
 482                 * need to do checkpoint in f2fs_sync_file.
 483                 */
 484                if (type == NODE)
 485                        set_sbi_flag(sbi, SBI_NEED_CP);
 486        }
 487submit_io:
 488        if (is_read_io(bio_op(bio)))
 489                trace_f2fs_submit_read_bio(sbi->sb, type, bio);
 490        else
 491                trace_f2fs_submit_write_bio(sbi->sb, type, bio);
 492
 493        iostat_update_submit_ctx(bio, type);
 494        submit_bio(bio);
 495}
 496
 497void f2fs_submit_bio(struct f2fs_sb_info *sbi,
 498                                struct bio *bio, enum page_type type)
 499{
 500        __submit_bio(sbi, bio, type);
 501}
 502
 503static void __attach_io_flag(struct f2fs_io_info *fio)
 504{
 505        struct f2fs_sb_info *sbi = fio->sbi;
 506        unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
 507        unsigned int io_flag, fua_flag, meta_flag;
 508
 509        if (fio->type == DATA)
 510                io_flag = sbi->data_io_flag;
 511        else if (fio->type == NODE)
 512                io_flag = sbi->node_io_flag;
 513        else
 514                return;
 515
 516        fua_flag = io_flag & temp_mask;
 517        meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
 518
 519        /*
 520         * data/node io flag bits per temp:
 521         *      REQ_META     |      REQ_FUA      |
 522         *    5 |    4 |   3 |    2 |    1 |   0 |
 523         * Cold | Warm | Hot | Cold | Warm | Hot |
 524         */
 525        if ((1 << fio->temp) & meta_flag)
 526                fio->op_flags |= REQ_META;
 527        if ((1 << fio->temp) & fua_flag)
 528                fio->op_flags |= REQ_FUA;
 529}
 530
 531static void __submit_merged_bio(struct f2fs_bio_info *io)
 532{
 533        struct f2fs_io_info *fio = &io->fio;
 534
 535        if (!io->bio)
 536                return;
 537
 538        __attach_io_flag(fio);
 539        bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
 540
 541        if (is_read_io(fio->op))
 542                trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
 543        else
 544                trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
 545
 546        __submit_bio(io->sbi, io->bio, fio->type);
 547        io->bio = NULL;
 548}
 549
 550static bool __has_merged_page(struct bio *bio, struct inode *inode,
 551                                                struct page *page, nid_t ino)
 552{
 553        struct bio_vec *bvec;
 554        struct bvec_iter_all iter_all;
 555
 556        if (!bio)
 557                return false;
 558
 559        if (!inode && !page && !ino)
 560                return true;
 561
 562        bio_for_each_segment_all(bvec, bio, iter_all) {
 563                struct page *target = bvec->bv_page;
 564
 565                if (fscrypt_is_bounce_page(target)) {
 566                        target = fscrypt_pagecache_page(target);
 567                        if (IS_ERR(target))
 568                                continue;
 569                }
 570                if (f2fs_is_compressed_page(target)) {
 571                        target = f2fs_compress_control_page(target);
 572                        if (IS_ERR(target))
 573                                continue;
 574                }
 575
 576                if (inode && inode == target->mapping->host)
 577                        return true;
 578                if (page && page == target)
 579                        return true;
 580                if (ino && ino == ino_of_node(target))
 581                        return true;
 582        }
 583
 584        return false;
 585}
 586
 587static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
 588                                enum page_type type, enum temp_type temp)
 589{
 590        enum page_type btype = PAGE_TYPE_OF_BIO(type);
 591        struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
 592
 593        down_write(&io->io_rwsem);
 594
 595        /* change META to META_FLUSH in the checkpoint procedure */
 596        if (type >= META_FLUSH) {
 597                io->fio.type = META_FLUSH;
 598                io->fio.op = REQ_OP_WRITE;
 599                io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
 600                if (!test_opt(sbi, NOBARRIER))
 601                        io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
 602        }
 603        __submit_merged_bio(io);
 604        up_write(&io->io_rwsem);
 605}
 606
 607static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
 608                                struct inode *inode, struct page *page,
 609                                nid_t ino, enum page_type type, bool force)
 610{
 611        enum temp_type temp;
 612        bool ret = true;
 613
 614        for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
 615                if (!force)     {
 616                        enum page_type btype = PAGE_TYPE_OF_BIO(type);
 617                        struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
 618
 619                        down_read(&io->io_rwsem);
 620                        ret = __has_merged_page(io->bio, inode, page, ino);
 621                        up_read(&io->io_rwsem);
 622                }
 623                if (ret)
 624                        __f2fs_submit_merged_write(sbi, type, temp);
 625
 626                /* TODO: use HOT temp only for meta pages now. */
 627                if (type >= META)
 628                        break;
 629        }
 630}
 631
 632void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
 633{
 634        __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
 635}
 636
 637void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
 638                                struct inode *inode, struct page *page,
 639                                nid_t ino, enum page_type type)
 640{
 641        __submit_merged_write_cond(sbi, inode, page, ino, type, false);
 642}
 643
 644void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
 645{
 646        f2fs_submit_merged_write(sbi, DATA);
 647        f2fs_submit_merged_write(sbi, NODE);
 648        f2fs_submit_merged_write(sbi, META);
 649}
 650
 651/*
 652 * Fill the locked page with data located in the block address.
 653 * A caller needs to unlock the page on failure.
 654 */
 655int f2fs_submit_page_bio(struct f2fs_io_info *fio)
 656{
 657        struct bio *bio;
 658        struct page *page = fio->encrypted_page ?
 659                        fio->encrypted_page : fio->page;
 660
 661        if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
 662                        fio->is_por ? META_POR : (__is_meta_io(fio) ?
 663                        META_GENERIC : DATA_GENERIC_ENHANCE)))
 664                return -EFSCORRUPTED;
 665
 666        trace_f2fs_submit_page_bio(page, fio);
 667
 668        /* Allocate a new bio */
 669        bio = __bio_alloc(fio, 1);
 670
 671        f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
 672                               fio->page->index, fio, GFP_NOIO);
 673
 674        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 675                bio_put(bio);
 676                return -EFAULT;
 677        }
 678
 679        if (fio->io_wbc && !is_read_io(fio->op))
 680                wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
 681
 682        __attach_io_flag(fio);
 683        bio_set_op_attrs(bio, fio->op, fio->op_flags);
 684
 685        inc_page_count(fio->sbi, is_read_io(fio->op) ?
 686                        __read_io_type(page): WB_DATA_TYPE(fio->page));
 687
 688        __submit_bio(fio->sbi, bio, fio->type);
 689        return 0;
 690}
 691
 692static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
 693                                block_t last_blkaddr, block_t cur_blkaddr)
 694{
 695        if (unlikely(sbi->max_io_bytes &&
 696                        bio->bi_iter.bi_size >= sbi->max_io_bytes))
 697                return false;
 698        if (last_blkaddr + 1 != cur_blkaddr)
 699                return false;
 700        return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
 701}
 702
 703static bool io_type_is_mergeable(struct f2fs_bio_info *io,
 704                                                struct f2fs_io_info *fio)
 705{
 706        if (io->fio.op != fio->op)
 707                return false;
 708        return io->fio.op_flags == fio->op_flags;
 709}
 710
 711static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
 712                                        struct f2fs_bio_info *io,
 713                                        struct f2fs_io_info *fio,
 714                                        block_t last_blkaddr,
 715                                        block_t cur_blkaddr)
 716{
 717        if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
 718                unsigned int filled_blocks =
 719                                F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
 720                unsigned int io_size = F2FS_IO_SIZE(sbi);
 721                unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
 722
 723                /* IOs in bio is aligned and left space of vectors is not enough */
 724                if (!(filled_blocks % io_size) && left_vecs < io_size)
 725                        return false;
 726        }
 727        if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
 728                return false;
 729        return io_type_is_mergeable(io, fio);
 730}
 731
 732static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
 733                                struct page *page, enum temp_type temp)
 734{
 735        struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
 736        struct bio_entry *be;
 737
 738        be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
 739        be->bio = bio;
 740        bio_get(bio);
 741
 742        if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
 743                f2fs_bug_on(sbi, 1);
 744
 745        down_write(&io->bio_list_lock);
 746        list_add_tail(&be->list, &io->bio_list);
 747        up_write(&io->bio_list_lock);
 748}
 749
 750static void del_bio_entry(struct bio_entry *be)
 751{
 752        list_del(&be->list);
 753        kmem_cache_free(bio_entry_slab, be);
 754}
 755
 756static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
 757                                                        struct page *page)
 758{
 759        struct f2fs_sb_info *sbi = fio->sbi;
 760        enum temp_type temp;
 761        bool found = false;
 762        int ret = -EAGAIN;
 763
 764        for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
 765                struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
 766                struct list_head *head = &io->bio_list;
 767                struct bio_entry *be;
 768
 769                down_write(&io->bio_list_lock);
 770                list_for_each_entry(be, head, list) {
 771                        if (be->bio != *bio)
 772                                continue;
 773
 774                        found = true;
 775
 776                        f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
 777                                                            *fio->last_block,
 778                                                            fio->new_blkaddr));
 779                        if (f2fs_crypt_mergeable_bio(*bio,
 780                                        fio->page->mapping->host,
 781                                        fio->page->index, fio) &&
 782                            bio_add_page(*bio, page, PAGE_SIZE, 0) ==
 783                                        PAGE_SIZE) {
 784                                ret = 0;
 785                                break;
 786                        }
 787
 788                        /* page can't be merged into bio; submit the bio */
 789                        del_bio_entry(be);
 790                        __submit_bio(sbi, *bio, DATA);
 791                        break;
 792                }
 793                up_write(&io->bio_list_lock);
 794        }
 795
 796        if (ret) {
 797                bio_put(*bio);
 798                *bio = NULL;
 799        }
 800
 801        return ret;
 802}
 803
 804void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
 805                                        struct bio **bio, struct page *page)
 806{
 807        enum temp_type temp;
 808        bool found = false;
 809        struct bio *target = bio ? *bio : NULL;
 810
 811        for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
 812                struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
 813                struct list_head *head = &io->bio_list;
 814                struct bio_entry *be;
 815
 816                if (list_empty(head))
 817                        continue;
 818
 819                down_read(&io->bio_list_lock);
 820                list_for_each_entry(be, head, list) {
 821                        if (target)
 822                                found = (target == be->bio);
 823                        else
 824                                found = __has_merged_page(be->bio, NULL,
 825                                                                page, 0);
 826                        if (found)
 827                                break;
 828                }
 829                up_read(&io->bio_list_lock);
 830
 831                if (!found)
 832                        continue;
 833
 834                found = false;
 835
 836                down_write(&io->bio_list_lock);
 837                list_for_each_entry(be, head, list) {
 838                        if (target)
 839                                found = (target == be->bio);
 840                        else
 841                                found = __has_merged_page(be->bio, NULL,
 842                                                                page, 0);
 843                        if (found) {
 844                                target = be->bio;
 845                                del_bio_entry(be);
 846                                break;
 847                        }
 848                }
 849                up_write(&io->bio_list_lock);
 850        }
 851
 852        if (found)
 853                __submit_bio(sbi, target, DATA);
 854        if (bio && *bio) {
 855                bio_put(*bio);
 856                *bio = NULL;
 857        }
 858}
 859
 860int f2fs_merge_page_bio(struct f2fs_io_info *fio)
 861{
 862        struct bio *bio = *fio->bio;
 863        struct page *page = fio->encrypted_page ?
 864                        fio->encrypted_page : fio->page;
 865
 866        if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
 867                        __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
 868                return -EFSCORRUPTED;
 869
 870        trace_f2fs_submit_page_bio(page, fio);
 871
 872        if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
 873                                                fio->new_blkaddr))
 874                f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
 875alloc_new:
 876        if (!bio) {
 877                bio = __bio_alloc(fio, BIO_MAX_VECS);
 878                __attach_io_flag(fio);
 879                f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
 880                                       fio->page->index, fio, GFP_NOIO);
 881                bio_set_op_attrs(bio, fio->op, fio->op_flags);
 882
 883                add_bio_entry(fio->sbi, bio, page, fio->temp);
 884        } else {
 885                if (add_ipu_page(fio, &bio, page))
 886                        goto alloc_new;
 887        }
 888
 889        if (fio->io_wbc)
 890                wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
 891
 892        inc_page_count(fio->sbi, WB_DATA_TYPE(page));
 893
 894        *fio->last_block = fio->new_blkaddr;
 895        *fio->bio = bio;
 896
 897        return 0;
 898}
 899
 900void f2fs_submit_page_write(struct f2fs_io_info *fio)
 901{
 902        struct f2fs_sb_info *sbi = fio->sbi;
 903        enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
 904        struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
 905        struct page *bio_page;
 906
 907        f2fs_bug_on(sbi, is_read_io(fio->op));
 908
 909        down_write(&io->io_rwsem);
 910next:
 911        if (fio->in_list) {
 912                spin_lock(&io->io_lock);
 913                if (list_empty(&io->io_list)) {
 914                        spin_unlock(&io->io_lock);
 915                        goto out;
 916                }
 917                fio = list_first_entry(&io->io_list,
 918                                                struct f2fs_io_info, list);
 919                list_del(&fio->list);
 920                spin_unlock(&io->io_lock);
 921        }
 922
 923        verify_fio_blkaddr(fio);
 924
 925        if (fio->encrypted_page)
 926                bio_page = fio->encrypted_page;
 927        else if (fio->compressed_page)
 928                bio_page = fio->compressed_page;
 929        else
 930                bio_page = fio->page;
 931
 932        /* set submitted = true as a return value */
 933        fio->submitted = true;
 934
 935        inc_page_count(sbi, WB_DATA_TYPE(bio_page));
 936
 937        if (io->bio &&
 938            (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
 939                              fio->new_blkaddr) ||
 940             !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
 941                                       bio_page->index, fio)))
 942                __submit_merged_bio(io);
 943alloc_new:
 944        if (io->bio == NULL) {
 945                if (F2FS_IO_ALIGNED(sbi) &&
 946                                (fio->type == DATA || fio->type == NODE) &&
 947                                fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
 948                        dec_page_count(sbi, WB_DATA_TYPE(bio_page));
 949                        fio->retry = true;
 950                        goto skip;
 951                }
 952                io->bio = __bio_alloc(fio, BIO_MAX_VECS);
 953                f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
 954                                       bio_page->index, fio, GFP_NOIO);
 955                io->fio = *fio;
 956        }
 957
 958        if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
 959                __submit_merged_bio(io);
 960                goto alloc_new;
 961        }
 962
 963        if (fio->io_wbc)
 964                wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
 965
 966        io->last_block_in_bio = fio->new_blkaddr;
 967
 968        trace_f2fs_submit_page_write(fio->page, fio);
 969skip:
 970        if (fio->in_list)
 971                goto next;
 972out:
 973        if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
 974                                !f2fs_is_checkpoint_ready(sbi))
 975                __submit_merged_bio(io);
 976        up_write(&io->io_rwsem);
 977}
 978
 979static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
 980                                      unsigned nr_pages, unsigned op_flag,
 981                                      pgoff_t first_idx, bool for_write)
 982{
 983        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 984        struct bio *bio;
 985        struct bio_post_read_ctx *ctx = NULL;
 986        unsigned int post_read_steps = 0;
 987
 988        bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
 989                               bio_max_segs(nr_pages), &f2fs_bioset);
 990        if (!bio)
 991                return ERR_PTR(-ENOMEM);
 992
 993        f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
 994
 995        f2fs_target_device(sbi, blkaddr, bio);
 996        bio->bi_end_io = f2fs_read_end_io;
 997        bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
 998
 999        if (fscrypt_inode_uses_fs_layer_crypto(inode))
1000                post_read_steps |= STEP_DECRYPT;
1001
1002        if (f2fs_need_verity(inode, first_idx))
1003                post_read_steps |= STEP_VERITY;
1004
1005        /*
1006         * STEP_DECOMPRESS is handled specially, since a compressed file might
1007         * contain both compressed and uncompressed clusters.  We'll allocate a
1008         * bio_post_read_ctx if the file is compressed, but the caller is
1009         * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1010         */
1011
1012        if (post_read_steps || f2fs_compressed_file(inode)) {
1013                /* Due to the mempool, this never fails. */
1014                ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1015                ctx->bio = bio;
1016                ctx->sbi = sbi;
1017                ctx->enabled_steps = post_read_steps;
1018                ctx->fs_blkaddr = blkaddr;
1019                bio->bi_private = ctx;
1020        }
1021        iostat_alloc_and_bind_ctx(sbi, bio, ctx);
1022
1023        return bio;
1024}
1025
1026/* This can handle encryption stuffs */
1027static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1028                                 block_t blkaddr, int op_flags, bool for_write)
1029{
1030        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1031        struct bio *bio;
1032
1033        bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1034                                        page->index, for_write);
1035        if (IS_ERR(bio))
1036                return PTR_ERR(bio);
1037
1038        /* wait for GCed page writeback via META_MAPPING */
1039        f2fs_wait_on_block_writeback(inode, blkaddr);
1040
1041        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1042                bio_put(bio);
1043                return -EFAULT;
1044        }
1045        ClearPageError(page);
1046        inc_page_count(sbi, F2FS_RD_DATA);
1047        f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1048        __submit_bio(sbi, bio, DATA);
1049        return 0;
1050}
1051
1052static void __set_data_blkaddr(struct dnode_of_data *dn)
1053{
1054        struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1055        __le32 *addr_array;
1056        int base = 0;
1057
1058        if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1059                base = get_extra_isize(dn->inode);
1060
1061        /* Get physical address of data block */
1062        addr_array = blkaddr_in_node(rn);
1063        addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1064}
1065
1066/*
1067 * Lock ordering for the change of data block address:
1068 * ->data_page
1069 *  ->node_page
1070 *    update block addresses in the node page
1071 */
1072void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1073{
1074        f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1075        __set_data_blkaddr(dn);
1076        if (set_page_dirty(dn->node_page))
1077                dn->node_changed = true;
1078}
1079
1080void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1081{
1082        dn->data_blkaddr = blkaddr;
1083        f2fs_set_data_blkaddr(dn);
1084        f2fs_update_extent_cache(dn);
1085}
1086
1087/* dn->ofs_in_node will be returned with up-to-date last block pointer */
1088int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1089{
1090        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1091        int err;
1092
1093        if (!count)
1094                return 0;
1095
1096        if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1097                return -EPERM;
1098        if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1099                return err;
1100
1101        trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1102                                                dn->ofs_in_node, count);
1103
1104        f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1105
1106        for (; count > 0; dn->ofs_in_node++) {
1107                block_t blkaddr = f2fs_data_blkaddr(dn);
1108
1109                if (blkaddr == NULL_ADDR) {
1110                        dn->data_blkaddr = NEW_ADDR;
1111                        __set_data_blkaddr(dn);
1112                        count--;
1113                }
1114        }
1115
1116        if (set_page_dirty(dn->node_page))
1117                dn->node_changed = true;
1118        return 0;
1119}
1120
1121/* Should keep dn->ofs_in_node unchanged */
1122int f2fs_reserve_new_block(struct dnode_of_data *dn)
1123{
1124        unsigned int ofs_in_node = dn->ofs_in_node;
1125        int ret;
1126
1127        ret = f2fs_reserve_new_blocks(dn, 1);
1128        dn->ofs_in_node = ofs_in_node;
1129        return ret;
1130}
1131
1132int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1133{
1134        bool need_put = dn->inode_page ? false : true;
1135        int err;
1136
1137        err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1138        if (err)
1139                return err;
1140
1141        if (dn->data_blkaddr == NULL_ADDR)
1142                err = f2fs_reserve_new_block(dn);
1143        if (err || need_put)
1144                f2fs_put_dnode(dn);
1145        return err;
1146}
1147
1148int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1149{
1150        struct extent_info ei = {0, };
1151        struct inode *inode = dn->inode;
1152
1153        if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1154                dn->data_blkaddr = ei.blk + index - ei.fofs;
1155                return 0;
1156        }
1157
1158        return f2fs_reserve_block(dn, index);
1159}
1160
1161struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1162                                                int op_flags, bool for_write)
1163{
1164        struct address_space *mapping = inode->i_mapping;
1165        struct dnode_of_data dn;
1166        struct page *page;
1167        struct extent_info ei = {0, };
1168        int err;
1169
1170        page = f2fs_grab_cache_page(mapping, index, for_write);
1171        if (!page)
1172                return ERR_PTR(-ENOMEM);
1173
1174        if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1175                dn.data_blkaddr = ei.blk + index - ei.fofs;
1176                if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1177                                                DATA_GENERIC_ENHANCE_READ)) {
1178                        err = -EFSCORRUPTED;
1179                        goto put_err;
1180                }
1181                goto got_it;
1182        }
1183
1184        set_new_dnode(&dn, inode, NULL, NULL, 0);
1185        err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1186        if (err)
1187                goto put_err;
1188        f2fs_put_dnode(&dn);
1189
1190        if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1191                err = -ENOENT;
1192                goto put_err;
1193        }
1194        if (dn.data_blkaddr != NEW_ADDR &&
1195                        !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1196                                                dn.data_blkaddr,
1197                                                DATA_GENERIC_ENHANCE)) {
1198                err = -EFSCORRUPTED;
1199                goto put_err;
1200        }
1201got_it:
1202        if (PageUptodate(page)) {
1203                unlock_page(page);
1204                return page;
1205        }
1206
1207        /*
1208         * A new dentry page is allocated but not able to be written, since its
1209         * new inode page couldn't be allocated due to -ENOSPC.
1210         * In such the case, its blkaddr can be remained as NEW_ADDR.
1211         * see, f2fs_add_link -> f2fs_get_new_data_page ->
1212         * f2fs_init_inode_metadata.
1213         */
1214        if (dn.data_blkaddr == NEW_ADDR) {
1215                zero_user_segment(page, 0, PAGE_SIZE);
1216                if (!PageUptodate(page))
1217                        SetPageUptodate(page);
1218                unlock_page(page);
1219                return page;
1220        }
1221
1222        err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1223                                                op_flags, for_write);
1224        if (err)
1225                goto put_err;
1226        return page;
1227
1228put_err:
1229        f2fs_put_page(page, 1);
1230        return ERR_PTR(err);
1231}
1232
1233struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1234{
1235        struct address_space *mapping = inode->i_mapping;
1236        struct page *page;
1237
1238        page = find_get_page(mapping, index);
1239        if (page && PageUptodate(page))
1240                return page;
1241        f2fs_put_page(page, 0);
1242
1243        page = f2fs_get_read_data_page(inode, index, 0, false);
1244        if (IS_ERR(page))
1245                return page;
1246
1247        if (PageUptodate(page))
1248                return page;
1249
1250        wait_on_page_locked(page);
1251        if (unlikely(!PageUptodate(page))) {
1252                f2fs_put_page(page, 0);
1253                return ERR_PTR(-EIO);
1254        }
1255        return page;
1256}
1257
1258/*
1259 * If it tries to access a hole, return an error.
1260 * Because, the callers, functions in dir.c and GC, should be able to know
1261 * whether this page exists or not.
1262 */
1263struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1264                                                        bool for_write)
1265{
1266        struct address_space *mapping = inode->i_mapping;
1267        struct page *page;
1268repeat:
1269        page = f2fs_get_read_data_page(inode, index, 0, for_write);
1270        if (IS_ERR(page))
1271                return page;
1272
1273        /* wait for read completion */
1274        lock_page(page);
1275        if (unlikely(page->mapping != mapping)) {
1276                f2fs_put_page(page, 1);
1277                goto repeat;
1278        }
1279        if (unlikely(!PageUptodate(page))) {
1280                f2fs_put_page(page, 1);
1281                return ERR_PTR(-EIO);
1282        }
1283        return page;
1284}
1285
1286/*
1287 * Caller ensures that this data page is never allocated.
1288 * A new zero-filled data page is allocated in the page cache.
1289 *
1290 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1291 * f2fs_unlock_op().
1292 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1293 * ipage should be released by this function.
1294 */
1295struct page *f2fs_get_new_data_page(struct inode *inode,
1296                struct page *ipage, pgoff_t index, bool new_i_size)
1297{
1298        struct address_space *mapping = inode->i_mapping;
1299        struct page *page;
1300        struct dnode_of_data dn;
1301        int err;
1302
1303        page = f2fs_grab_cache_page(mapping, index, true);
1304        if (!page) {
1305                /*
1306                 * before exiting, we should make sure ipage will be released
1307                 * if any error occur.
1308                 */
1309                f2fs_put_page(ipage, 1);
1310                return ERR_PTR(-ENOMEM);
1311        }
1312
1313        set_new_dnode(&dn, inode, ipage, NULL, 0);
1314        err = f2fs_reserve_block(&dn, index);
1315        if (err) {
1316                f2fs_put_page(page, 1);
1317                return ERR_PTR(err);
1318        }
1319        if (!ipage)
1320                f2fs_put_dnode(&dn);
1321
1322        if (PageUptodate(page))
1323                goto got_it;
1324
1325        if (dn.data_blkaddr == NEW_ADDR) {
1326                zero_user_segment(page, 0, PAGE_SIZE);
1327                if (!PageUptodate(page))
1328                        SetPageUptodate(page);
1329        } else {
1330                f2fs_put_page(page, 1);
1331
1332                /* if ipage exists, blkaddr should be NEW_ADDR */
1333                f2fs_bug_on(F2FS_I_SB(inode), ipage);
1334                page = f2fs_get_lock_data_page(inode, index, true);
1335                if (IS_ERR(page))
1336                        return page;
1337        }
1338got_it:
1339        if (new_i_size && i_size_read(inode) <
1340                                ((loff_t)(index + 1) << PAGE_SHIFT))
1341                f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1342        return page;
1343}
1344
1345static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1346{
1347        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1348        struct f2fs_summary sum;
1349        struct node_info ni;
1350        block_t old_blkaddr;
1351        blkcnt_t count = 1;
1352        int err;
1353
1354        if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1355                return -EPERM;
1356
1357        err = f2fs_get_node_info(sbi, dn->nid, &ni);
1358        if (err)
1359                return err;
1360
1361        dn->data_blkaddr = f2fs_data_blkaddr(dn);
1362        if (dn->data_blkaddr != NULL_ADDR)
1363                goto alloc;
1364
1365        if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1366                return err;
1367
1368alloc:
1369        set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1370        old_blkaddr = dn->data_blkaddr;
1371        f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1372                                &sum, seg_type, NULL);
1373        if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
1374                invalidate_mapping_pages(META_MAPPING(sbi),
1375                                        old_blkaddr, old_blkaddr);
1376                f2fs_invalidate_compress_page(sbi, old_blkaddr);
1377        }
1378        f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1379
1380        /*
1381         * i_size will be updated by direct_IO. Otherwise, we'll get stale
1382         * data from unwritten block via dio_read.
1383         */
1384        return 0;
1385}
1386
1387int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1388{
1389        struct inode *inode = file_inode(iocb->ki_filp);
1390        struct f2fs_map_blocks map;
1391        int flag;
1392        int err = 0;
1393        bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1394
1395        map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1396        map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1397        if (map.m_len > map.m_lblk)
1398                map.m_len -= map.m_lblk;
1399        else
1400                map.m_len = 0;
1401
1402        map.m_next_pgofs = NULL;
1403        map.m_next_extent = NULL;
1404        map.m_seg_type = NO_CHECK_TYPE;
1405        map.m_may_create = true;
1406
1407        if (direct_io) {
1408                map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1409                flag = f2fs_force_buffered_io(inode, iocb, from) ?
1410                                        F2FS_GET_BLOCK_PRE_AIO :
1411                                        F2FS_GET_BLOCK_PRE_DIO;
1412                goto map_blocks;
1413        }
1414        if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1415                err = f2fs_convert_inline_inode(inode);
1416                if (err)
1417                        return err;
1418        }
1419        if (f2fs_has_inline_data(inode))
1420                return err;
1421
1422        flag = F2FS_GET_BLOCK_PRE_AIO;
1423
1424map_blocks:
1425        err = f2fs_map_blocks(inode, &map, 1, flag);
1426        if (map.m_len > 0 && err == -ENOSPC) {
1427                if (!direct_io)
1428                        set_inode_flag(inode, FI_NO_PREALLOC);
1429                err = 0;
1430        }
1431        return err;
1432}
1433
1434void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1435{
1436        if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1437                if (lock)
1438                        down_read(&sbi->node_change);
1439                else
1440                        up_read(&sbi->node_change);
1441        } else {
1442                if (lock)
1443                        f2fs_lock_op(sbi);
1444                else
1445                        f2fs_unlock_op(sbi);
1446        }
1447}
1448
1449/*
1450 * f2fs_map_blocks() tries to find or build mapping relationship which
1451 * maps continuous logical blocks to physical blocks, and return such
1452 * info via f2fs_map_blocks structure.
1453 */
1454int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1455                                                int create, int flag)
1456{
1457        unsigned int maxblocks = map->m_len;
1458        struct dnode_of_data dn;
1459        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1460        int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1461        pgoff_t pgofs, end_offset, end;
1462        int err = 0, ofs = 1;
1463        unsigned int ofs_in_node, last_ofs_in_node;
1464        blkcnt_t prealloc;
1465        struct extent_info ei = {0, };
1466        block_t blkaddr;
1467        unsigned int start_pgofs;
1468
1469        if (!maxblocks)
1470                return 0;
1471
1472        map->m_len = 0;
1473        map->m_flags = 0;
1474
1475        /* it only supports block size == page size */
1476        pgofs = (pgoff_t)map->m_lblk;
1477        end = pgofs + maxblocks;
1478
1479        if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1480                if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1481                                                        map->m_may_create)
1482                        goto next_dnode;
1483
1484                map->m_pblk = ei.blk + pgofs - ei.fofs;
1485                map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1486                map->m_flags = F2FS_MAP_MAPPED;
1487                if (map->m_next_extent)
1488                        *map->m_next_extent = pgofs + map->m_len;
1489
1490                /* for hardware encryption, but to avoid potential issue in future */
1491                if (flag == F2FS_GET_BLOCK_DIO)
1492                        f2fs_wait_on_block_writeback_range(inode,
1493                                                map->m_pblk, map->m_len);
1494                goto out;
1495        }
1496
1497next_dnode:
1498        if (map->m_may_create)
1499                f2fs_do_map_lock(sbi, flag, true);
1500
1501        /* When reading holes, we need its node page */
1502        set_new_dnode(&dn, inode, NULL, NULL, 0);
1503        err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1504        if (err) {
1505                if (flag == F2FS_GET_BLOCK_BMAP)
1506                        map->m_pblk = 0;
1507
1508                if (err == -ENOENT) {
1509                        /*
1510                         * There is one exceptional case that read_node_page()
1511                         * may return -ENOENT due to filesystem has been
1512                         * shutdown or cp_error, so force to convert error
1513                         * number to EIO for such case.
1514                         */
1515                        if (map->m_may_create &&
1516                                (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1517                                f2fs_cp_error(sbi))) {
1518                                err = -EIO;
1519                                goto unlock_out;
1520                        }
1521
1522                        err = 0;
1523                        if (map->m_next_pgofs)
1524                                *map->m_next_pgofs =
1525                                        f2fs_get_next_page_offset(&dn, pgofs);
1526                        if (map->m_next_extent)
1527                                *map->m_next_extent =
1528                                        f2fs_get_next_page_offset(&dn, pgofs);
1529                }
1530                goto unlock_out;
1531        }
1532
1533        start_pgofs = pgofs;
1534        prealloc = 0;
1535        last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1536        end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1537
1538next_block:
1539        blkaddr = f2fs_data_blkaddr(&dn);
1540
1541        if (__is_valid_data_blkaddr(blkaddr) &&
1542                !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1543                err = -EFSCORRUPTED;
1544                goto sync_out;
1545        }
1546
1547        if (__is_valid_data_blkaddr(blkaddr)) {
1548                /* use out-place-update for driect IO under LFS mode */
1549                if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1550                                                        map->m_may_create) {
1551                        err = __allocate_data_block(&dn, map->m_seg_type);
1552                        if (err)
1553                                goto sync_out;
1554                        blkaddr = dn.data_blkaddr;
1555                        set_inode_flag(inode, FI_APPEND_WRITE);
1556                }
1557        } else {
1558                if (create) {
1559                        if (unlikely(f2fs_cp_error(sbi))) {
1560                                err = -EIO;
1561                                goto sync_out;
1562                        }
1563                        if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1564                                if (blkaddr == NULL_ADDR) {
1565                                        prealloc++;
1566                                        last_ofs_in_node = dn.ofs_in_node;
1567                                }
1568                        } else {
1569                                WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1570                                        flag != F2FS_GET_BLOCK_DIO);
1571                                err = __allocate_data_block(&dn,
1572                                                        map->m_seg_type);
1573                                if (!err)
1574                                        set_inode_flag(inode, FI_APPEND_WRITE);
1575                        }
1576                        if (err)
1577                                goto sync_out;
1578                        map->m_flags |= F2FS_MAP_NEW;
1579                        blkaddr = dn.data_blkaddr;
1580                } else {
1581                        if (f2fs_compressed_file(inode) &&
1582                                        f2fs_sanity_check_cluster(&dn) &&
1583                                        (flag != F2FS_GET_BLOCK_FIEMAP ||
1584                                        IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
1585                                err = -EFSCORRUPTED;
1586                                goto sync_out;
1587                        }
1588                        if (flag == F2FS_GET_BLOCK_BMAP) {
1589                                map->m_pblk = 0;
1590                                goto sync_out;
1591                        }
1592                        if (flag == F2FS_GET_BLOCK_PRECACHE)
1593                                goto sync_out;
1594                        if (flag == F2FS_GET_BLOCK_FIEMAP &&
1595                                                blkaddr == NULL_ADDR) {
1596                                if (map->m_next_pgofs)
1597                                        *map->m_next_pgofs = pgofs + 1;
1598                                goto sync_out;
1599                        }
1600                        if (flag != F2FS_GET_BLOCK_FIEMAP) {
1601                                /* for defragment case */
1602                                if (map->m_next_pgofs)
1603                                        *map->m_next_pgofs = pgofs + 1;
1604                                goto sync_out;
1605                        }
1606                }
1607        }
1608
1609        if (flag == F2FS_GET_BLOCK_PRE_AIO)
1610                goto skip;
1611
1612        if (map->m_len == 0) {
1613                /* preallocated unwritten block should be mapped for fiemap. */
1614                if (blkaddr == NEW_ADDR)
1615                        map->m_flags |= F2FS_MAP_UNWRITTEN;
1616                map->m_flags |= F2FS_MAP_MAPPED;
1617
1618                map->m_pblk = blkaddr;
1619                map->m_len = 1;
1620        } else if ((map->m_pblk != NEW_ADDR &&
1621                        blkaddr == (map->m_pblk + ofs)) ||
1622                        (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1623                        flag == F2FS_GET_BLOCK_PRE_DIO) {
1624                ofs++;
1625                map->m_len++;
1626        } else {
1627                goto sync_out;
1628        }
1629
1630skip:
1631        dn.ofs_in_node++;
1632        pgofs++;
1633
1634        /* preallocate blocks in batch for one dnode page */
1635        if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1636                        (pgofs == end || dn.ofs_in_node == end_offset)) {
1637
1638                dn.ofs_in_node = ofs_in_node;
1639                err = f2fs_reserve_new_blocks(&dn, prealloc);
1640                if (err)
1641                        goto sync_out;
1642
1643                map->m_len += dn.ofs_in_node - ofs_in_node;
1644                if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1645                        err = -ENOSPC;
1646                        goto sync_out;
1647                }
1648                dn.ofs_in_node = end_offset;
1649        }
1650
1651        if (pgofs >= end)
1652                goto sync_out;
1653        else if (dn.ofs_in_node < end_offset)
1654                goto next_block;
1655
1656        if (flag == F2FS_GET_BLOCK_PRECACHE) {
1657                if (map->m_flags & F2FS_MAP_MAPPED) {
1658                        unsigned int ofs = start_pgofs - map->m_lblk;
1659
1660                        f2fs_update_extent_cache_range(&dn,
1661                                start_pgofs, map->m_pblk + ofs,
1662                                map->m_len - ofs);
1663                }
1664        }
1665
1666        f2fs_put_dnode(&dn);
1667
1668        if (map->m_may_create) {
1669                f2fs_do_map_lock(sbi, flag, false);
1670                f2fs_balance_fs(sbi, dn.node_changed);
1671        }
1672        goto next_dnode;
1673
1674sync_out:
1675
1676        /* for hardware encryption, but to avoid potential issue in future */
1677        if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1678                f2fs_wait_on_block_writeback_range(inode,
1679                                                map->m_pblk, map->m_len);
1680
1681        if (flag == F2FS_GET_BLOCK_PRECACHE) {
1682                if (map->m_flags & F2FS_MAP_MAPPED) {
1683                        unsigned int ofs = start_pgofs - map->m_lblk;
1684
1685                        f2fs_update_extent_cache_range(&dn,
1686                                start_pgofs, map->m_pblk + ofs,
1687                                map->m_len - ofs);
1688                }
1689                if (map->m_next_extent)
1690                        *map->m_next_extent = pgofs + 1;
1691        }
1692        f2fs_put_dnode(&dn);
1693unlock_out:
1694        if (map->m_may_create) {
1695                f2fs_do_map_lock(sbi, flag, false);
1696                f2fs_balance_fs(sbi, dn.node_changed);
1697        }
1698out:
1699        trace_f2fs_map_blocks(inode, map, err);
1700        return err;
1701}
1702
1703bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1704{
1705        struct f2fs_map_blocks map;
1706        block_t last_lblk;
1707        int err;
1708
1709        if (pos + len > i_size_read(inode))
1710                return false;
1711
1712        map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1713        map.m_next_pgofs = NULL;
1714        map.m_next_extent = NULL;
1715        map.m_seg_type = NO_CHECK_TYPE;
1716        map.m_may_create = false;
1717        last_lblk = F2FS_BLK_ALIGN(pos + len);
1718
1719        while (map.m_lblk < last_lblk) {
1720                map.m_len = last_lblk - map.m_lblk;
1721                err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1722                if (err || map.m_len == 0)
1723                        return false;
1724                map.m_lblk += map.m_len;
1725        }
1726        return true;
1727}
1728
1729static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1730{
1731        return (bytes >> inode->i_blkbits);
1732}
1733
1734static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1735{
1736        return (blks << inode->i_blkbits);
1737}
1738
1739static int __get_data_block(struct inode *inode, sector_t iblock,
1740                        struct buffer_head *bh, int create, int flag,
1741                        pgoff_t *next_pgofs, int seg_type, bool may_write)
1742{
1743        struct f2fs_map_blocks map;
1744        int err;
1745
1746        map.m_lblk = iblock;
1747        map.m_len = bytes_to_blks(inode, bh->b_size);
1748        map.m_next_pgofs = next_pgofs;
1749        map.m_next_extent = NULL;
1750        map.m_seg_type = seg_type;
1751        map.m_may_create = may_write;
1752
1753        err = f2fs_map_blocks(inode, &map, create, flag);
1754        if (!err) {
1755                map_bh(bh, inode->i_sb, map.m_pblk);
1756                bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1757                bh->b_size = blks_to_bytes(inode, map.m_len);
1758        }
1759        return err;
1760}
1761
1762static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1763                        struct buffer_head *bh_result, int create)
1764{
1765        return __get_data_block(inode, iblock, bh_result, create,
1766                                F2FS_GET_BLOCK_DIO, NULL,
1767                                f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1768                                true);
1769}
1770
1771static int get_data_block_dio(struct inode *inode, sector_t iblock,
1772                        struct buffer_head *bh_result, int create)
1773{
1774        return __get_data_block(inode, iblock, bh_result, create,
1775                                F2FS_GET_BLOCK_DIO, NULL,
1776                                f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1777                                false);
1778}
1779
1780static int f2fs_xattr_fiemap(struct inode *inode,
1781                                struct fiemap_extent_info *fieinfo)
1782{
1783        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1784        struct page *page;
1785        struct node_info ni;
1786        __u64 phys = 0, len;
1787        __u32 flags;
1788        nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1789        int err = 0;
1790
1791        if (f2fs_has_inline_xattr(inode)) {
1792                int offset;
1793
1794                page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1795                                                inode->i_ino, false);
1796                if (!page)
1797                        return -ENOMEM;
1798
1799                err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1800                if (err) {
1801                        f2fs_put_page(page, 1);
1802                        return err;
1803                }
1804
1805                phys = blks_to_bytes(inode, ni.blk_addr);
1806                offset = offsetof(struct f2fs_inode, i_addr) +
1807                                        sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1808                                        get_inline_xattr_addrs(inode));
1809
1810                phys += offset;
1811                len = inline_xattr_size(inode);
1812
1813                f2fs_put_page(page, 1);
1814
1815                flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1816
1817                if (!xnid)
1818                        flags |= FIEMAP_EXTENT_LAST;
1819
1820                err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1821                trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1822                if (err || err == 1)
1823                        return err;
1824        }
1825
1826        if (xnid) {
1827                page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1828                if (!page)
1829                        return -ENOMEM;
1830
1831                err = f2fs_get_node_info(sbi, xnid, &ni);
1832                if (err) {
1833                        f2fs_put_page(page, 1);
1834                        return err;
1835                }
1836
1837                phys = blks_to_bytes(inode, ni.blk_addr);
1838                len = inode->i_sb->s_blocksize;
1839
1840                f2fs_put_page(page, 1);
1841
1842                flags = FIEMAP_EXTENT_LAST;
1843        }
1844
1845        if (phys) {
1846                err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1847                trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1848        }
1849
1850        return (err < 0 ? err : 0);
1851}
1852
1853static loff_t max_inode_blocks(struct inode *inode)
1854{
1855        loff_t result = ADDRS_PER_INODE(inode);
1856        loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1857
1858        /* two direct node blocks */
1859        result += (leaf_count * 2);
1860
1861        /* two indirect node blocks */
1862        leaf_count *= NIDS_PER_BLOCK;
1863        result += (leaf_count * 2);
1864
1865        /* one double indirect node block */
1866        leaf_count *= NIDS_PER_BLOCK;
1867        result += leaf_count;
1868
1869        return result;
1870}
1871
1872int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1873                u64 start, u64 len)
1874{
1875        struct f2fs_map_blocks map;
1876        sector_t start_blk, last_blk;
1877        pgoff_t next_pgofs;
1878        u64 logical = 0, phys = 0, size = 0;
1879        u32 flags = 0;
1880        int ret = 0;
1881        bool compr_cluster = false, compr_appended;
1882        unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1883        unsigned int count_in_cluster = 0;
1884        loff_t maxbytes;
1885
1886        if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1887                ret = f2fs_precache_extents(inode);
1888                if (ret)
1889                        return ret;
1890        }
1891
1892        ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1893        if (ret)
1894                return ret;
1895
1896        inode_lock(inode);
1897
1898        maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
1899        if (start > maxbytes) {
1900                ret = -EFBIG;
1901                goto out;
1902        }
1903
1904        if (len > maxbytes || (maxbytes - len) < start)
1905                len = maxbytes - start;
1906
1907        if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1908                ret = f2fs_xattr_fiemap(inode, fieinfo);
1909                goto out;
1910        }
1911
1912        if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1913                ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1914                if (ret != -EAGAIN)
1915                        goto out;
1916        }
1917
1918        if (bytes_to_blks(inode, len) == 0)
1919                len = blks_to_bytes(inode, 1);
1920
1921        start_blk = bytes_to_blks(inode, start);
1922        last_blk = bytes_to_blks(inode, start + len - 1);
1923
1924next:
1925        memset(&map, 0, sizeof(map));
1926        map.m_lblk = start_blk;
1927        map.m_len = bytes_to_blks(inode, len);
1928        map.m_next_pgofs = &next_pgofs;
1929        map.m_seg_type = NO_CHECK_TYPE;
1930
1931        if (compr_cluster) {
1932                map.m_lblk += 1;
1933                map.m_len = cluster_size - count_in_cluster;
1934        }
1935
1936        ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
1937        if (ret)
1938                goto out;
1939
1940        /* HOLE */
1941        if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
1942                start_blk = next_pgofs;
1943
1944                if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
1945                                                max_inode_blocks(inode)))
1946                        goto prep_next;
1947
1948                flags |= FIEMAP_EXTENT_LAST;
1949        }
1950
1951        compr_appended = false;
1952        /* In a case of compressed cluster, append this to the last extent */
1953        if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
1954                        !(map.m_flags & F2FS_MAP_FLAGS))) {
1955                compr_appended = true;
1956                goto skip_fill;
1957        }
1958
1959        if (size) {
1960                flags |= FIEMAP_EXTENT_MERGED;
1961                if (IS_ENCRYPTED(inode))
1962                        flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1963
1964                ret = fiemap_fill_next_extent(fieinfo, logical,
1965                                phys, size, flags);
1966                trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
1967                if (ret)
1968                        goto out;
1969                size = 0;
1970        }
1971
1972        if (start_blk > last_blk)
1973                goto out;
1974
1975skip_fill:
1976        if (map.m_pblk == COMPRESS_ADDR) {
1977                compr_cluster = true;
1978                count_in_cluster = 1;
1979        } else if (compr_appended) {
1980                unsigned int appended_blks = cluster_size -
1981                                                count_in_cluster + 1;
1982                size += blks_to_bytes(inode, appended_blks);
1983                start_blk += appended_blks;
1984                compr_cluster = false;
1985        } else {
1986                logical = blks_to_bytes(inode, start_blk);
1987                phys = __is_valid_data_blkaddr(map.m_pblk) ?
1988                        blks_to_bytes(inode, map.m_pblk) : 0;
1989                size = blks_to_bytes(inode, map.m_len);
1990                flags = 0;
1991
1992                if (compr_cluster) {
1993                        flags = FIEMAP_EXTENT_ENCODED;
1994                        count_in_cluster += map.m_len;
1995                        if (count_in_cluster == cluster_size) {
1996                                compr_cluster = false;
1997                                size += blks_to_bytes(inode, 1);
1998                        }
1999                } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
2000                        flags = FIEMAP_EXTENT_UNWRITTEN;
2001                }
2002
2003                start_blk += bytes_to_blks(inode, size);
2004        }
2005
2006prep_next:
2007        cond_resched();
2008        if (fatal_signal_pending(current))
2009                ret = -EINTR;
2010        else
2011                goto next;
2012out:
2013        if (ret == 1)
2014                ret = 0;
2015
2016        inode_unlock(inode);
2017        return ret;
2018}
2019
2020static inline loff_t f2fs_readpage_limit(struct inode *inode)
2021{
2022        if (IS_ENABLED(CONFIG_FS_VERITY) &&
2023            (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
2024                return inode->i_sb->s_maxbytes;
2025
2026        return i_size_read(inode);
2027}
2028
2029static int f2fs_read_single_page(struct inode *inode, struct page *page,
2030                                        unsigned nr_pages,
2031                                        struct f2fs_map_blocks *map,
2032                                        struct bio **bio_ret,
2033                                        sector_t *last_block_in_bio,
2034                                        bool is_readahead)
2035{
2036        struct bio *bio = *bio_ret;
2037        const unsigned blocksize = blks_to_bytes(inode, 1);
2038        sector_t block_in_file;
2039        sector_t last_block;
2040        sector_t last_block_in_file;
2041        sector_t block_nr;
2042        int ret = 0;
2043
2044        block_in_file = (sector_t)page_index(page);
2045        last_block = block_in_file + nr_pages;
2046        last_block_in_file = bytes_to_blks(inode,
2047                        f2fs_readpage_limit(inode) + blocksize - 1);
2048        if (last_block > last_block_in_file)
2049                last_block = last_block_in_file;
2050
2051        /* just zeroing out page which is beyond EOF */
2052        if (block_in_file >= last_block)
2053                goto zero_out;
2054        /*
2055         * Map blocks using the previous result first.
2056         */
2057        if ((map->m_flags & F2FS_MAP_MAPPED) &&
2058                        block_in_file > map->m_lblk &&
2059                        block_in_file < (map->m_lblk + map->m_len))
2060                goto got_it;
2061
2062        /*
2063         * Then do more f2fs_map_blocks() calls until we are
2064         * done with this page.
2065         */
2066        map->m_lblk = block_in_file;
2067        map->m_len = last_block - block_in_file;
2068
2069        ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2070        if (ret)
2071                goto out;
2072got_it:
2073        if ((map->m_flags & F2FS_MAP_MAPPED)) {
2074                block_nr = map->m_pblk + block_in_file - map->m_lblk;
2075                SetPageMappedToDisk(page);
2076
2077                if (!PageUptodate(page) && (!PageSwapCache(page) &&
2078                                        !cleancache_get_page(page))) {
2079                        SetPageUptodate(page);
2080                        goto confused;
2081                }
2082
2083                if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2084                                                DATA_GENERIC_ENHANCE_READ)) {
2085                        ret = -EFSCORRUPTED;
2086                        goto out;
2087                }
2088        } else {
2089zero_out:
2090                zero_user_segment(page, 0, PAGE_SIZE);
2091                if (f2fs_need_verity(inode, page->index) &&
2092                    !fsverity_verify_page(page)) {
2093                        ret = -EIO;
2094                        goto out;
2095                }
2096                if (!PageUptodate(page))
2097                        SetPageUptodate(page);
2098                unlock_page(page);
2099                goto out;
2100        }
2101
2102        /*
2103         * This page will go to BIO.  Do we need to send this
2104         * BIO off first?
2105         */
2106        if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2107                                       *last_block_in_bio, block_nr) ||
2108                    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2109submit_and_realloc:
2110                __submit_bio(F2FS_I_SB(inode), bio, DATA);
2111                bio = NULL;
2112        }
2113        if (bio == NULL) {
2114                bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2115                                is_readahead ? REQ_RAHEAD : 0, page->index,
2116                                false);
2117                if (IS_ERR(bio)) {
2118                        ret = PTR_ERR(bio);
2119                        bio = NULL;
2120                        goto out;
2121                }
2122        }
2123
2124        /*
2125         * If the page is under writeback, we need to wait for
2126         * its completion to see the correct decrypted data.
2127         */
2128        f2fs_wait_on_block_writeback(inode, block_nr);
2129
2130        if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2131                goto submit_and_realloc;
2132
2133        inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2134        f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2135        ClearPageError(page);
2136        *last_block_in_bio = block_nr;
2137        goto out;
2138confused:
2139        if (bio) {
2140                __submit_bio(F2FS_I_SB(inode), bio, DATA);
2141                bio = NULL;
2142        }
2143        unlock_page(page);
2144out:
2145        *bio_ret = bio;
2146        return ret;
2147}
2148
2149#ifdef CONFIG_F2FS_FS_COMPRESSION
2150int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2151                                unsigned nr_pages, sector_t *last_block_in_bio,
2152                                bool is_readahead, bool for_write)
2153{
2154        struct dnode_of_data dn;
2155        struct inode *inode = cc->inode;
2156        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2157        struct bio *bio = *bio_ret;
2158        unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2159        sector_t last_block_in_file;
2160        const unsigned blocksize = blks_to_bytes(inode, 1);
2161        struct decompress_io_ctx *dic = NULL;
2162        struct extent_info ei = {0, };
2163        bool from_dnode = true;
2164        int i;
2165        int ret = 0;
2166
2167        f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2168
2169        last_block_in_file = bytes_to_blks(inode,
2170                        f2fs_readpage_limit(inode) + blocksize - 1);
2171
2172        /* get rid of pages beyond EOF */
2173        for (i = 0; i < cc->cluster_size; i++) {
2174                struct page *page = cc->rpages[i];
2175
2176                if (!page)
2177                        continue;
2178                if ((sector_t)page->index >= last_block_in_file) {
2179                        zero_user_segment(page, 0, PAGE_SIZE);
2180                        if (!PageUptodate(page))
2181                                SetPageUptodate(page);
2182                } else if (!PageUptodate(page)) {
2183                        continue;
2184                }
2185                unlock_page(page);
2186                if (for_write)
2187                        put_page(page);
2188                cc->rpages[i] = NULL;
2189                cc->nr_rpages--;
2190        }
2191
2192        /* we are done since all pages are beyond EOF */
2193        if (f2fs_cluster_is_empty(cc))
2194                goto out;
2195
2196        if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
2197                from_dnode = false;
2198
2199        if (!from_dnode)
2200                goto skip_reading_dnode;
2201
2202        set_new_dnode(&dn, inode, NULL, NULL, 0);
2203        ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2204        if (ret)
2205                goto out;
2206
2207        f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2208
2209skip_reading_dnode:
2210        for (i = 1; i < cc->cluster_size; i++) {
2211                block_t blkaddr;
2212
2213                blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2214                                        dn.ofs_in_node + i) :
2215                                        ei.blk + i - 1;
2216
2217                if (!__is_valid_data_blkaddr(blkaddr))
2218                        break;
2219
2220                if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2221                        ret = -EFAULT;
2222                        goto out_put_dnode;
2223                }
2224                cc->nr_cpages++;
2225
2226                if (!from_dnode && i >= ei.c_len)
2227                        break;
2228        }
2229
2230        /* nothing to decompress */
2231        if (cc->nr_cpages == 0) {
2232                ret = 0;
2233                goto out_put_dnode;
2234        }
2235
2236        dic = f2fs_alloc_dic(cc);
2237        if (IS_ERR(dic)) {
2238                ret = PTR_ERR(dic);
2239                goto out_put_dnode;
2240        }
2241
2242        for (i = 0; i < cc->nr_cpages; i++) {
2243                struct page *page = dic->cpages[i];
2244                block_t blkaddr;
2245                struct bio_post_read_ctx *ctx;
2246
2247                blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2248                                        dn.ofs_in_node + i + 1) :
2249                                        ei.blk + i;
2250
2251                f2fs_wait_on_block_writeback(inode, blkaddr);
2252
2253                if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2254                        if (atomic_dec_and_test(&dic->remaining_pages))
2255                                f2fs_decompress_cluster(dic);
2256                        continue;
2257                }
2258
2259                if (bio && (!page_is_mergeable(sbi, bio,
2260                                        *last_block_in_bio, blkaddr) ||
2261                    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2262submit_and_realloc:
2263                        __submit_bio(sbi, bio, DATA);
2264                        bio = NULL;
2265                }
2266
2267                if (!bio) {
2268                        bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2269                                        is_readahead ? REQ_RAHEAD : 0,
2270                                        page->index, for_write);
2271                        if (IS_ERR(bio)) {
2272                                ret = PTR_ERR(bio);
2273                                f2fs_decompress_end_io(dic, ret);
2274                                f2fs_put_dnode(&dn);
2275                                *bio_ret = NULL;
2276                                return ret;
2277                        }
2278                }
2279
2280                if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2281                        goto submit_and_realloc;
2282
2283                ctx = get_post_read_ctx(bio);
2284                ctx->enabled_steps |= STEP_DECOMPRESS;
2285                refcount_inc(&dic->refcnt);
2286
2287                inc_page_count(sbi, F2FS_RD_DATA);
2288                f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2289                f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2290                ClearPageError(page);
2291                *last_block_in_bio = blkaddr;
2292        }
2293
2294        if (from_dnode)
2295                f2fs_put_dnode(&dn);
2296
2297        *bio_ret = bio;
2298        return 0;
2299
2300out_put_dnode:
2301        if (from_dnode)
2302                f2fs_put_dnode(&dn);
2303out:
2304        for (i = 0; i < cc->cluster_size; i++) {
2305                if (cc->rpages[i]) {
2306                        ClearPageUptodate(cc->rpages[i]);
2307                        ClearPageError(cc->rpages[i]);
2308                        unlock_page(cc->rpages[i]);
2309                }
2310        }
2311        *bio_ret = bio;
2312        return ret;
2313}
2314#endif
2315
2316/*
2317 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2318 * Major change was from block_size == page_size in f2fs by default.
2319 */
2320static int f2fs_mpage_readpages(struct inode *inode,
2321                struct readahead_control *rac, struct page *page)
2322{
2323        struct bio *bio = NULL;
2324        sector_t last_block_in_bio = 0;
2325        struct f2fs_map_blocks map;
2326#ifdef CONFIG_F2FS_FS_COMPRESSION
2327        struct compress_ctx cc = {
2328                .inode = inode,
2329                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2330                .cluster_size = F2FS_I(inode)->i_cluster_size,
2331                .cluster_idx = NULL_CLUSTER,
2332                .rpages = NULL,
2333                .cpages = NULL,
2334                .nr_rpages = 0,
2335                .nr_cpages = 0,
2336        };
2337        pgoff_t nc_cluster_idx = NULL_CLUSTER;
2338#endif
2339        unsigned nr_pages = rac ? readahead_count(rac) : 1;
2340        unsigned max_nr_pages = nr_pages;
2341        int ret = 0;
2342
2343        map.m_pblk = 0;
2344        map.m_lblk = 0;
2345        map.m_len = 0;
2346        map.m_flags = 0;
2347        map.m_next_pgofs = NULL;
2348        map.m_next_extent = NULL;
2349        map.m_seg_type = NO_CHECK_TYPE;
2350        map.m_may_create = false;
2351
2352        for (; nr_pages; nr_pages--) {
2353                if (rac) {
2354                        page = readahead_page(rac);
2355                        prefetchw(&page->flags);
2356                }
2357
2358#ifdef CONFIG_F2FS_FS_COMPRESSION
2359                if (f2fs_compressed_file(inode)) {
2360                        /* there are remained comressed pages, submit them */
2361                        if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2362                                ret = f2fs_read_multi_pages(&cc, &bio,
2363                                                        max_nr_pages,
2364                                                        &last_block_in_bio,
2365                                                        rac != NULL, false);
2366                                f2fs_destroy_compress_ctx(&cc, false);
2367                                if (ret)
2368                                        goto set_error_page;
2369                        }
2370                        if (cc.cluster_idx == NULL_CLUSTER) {
2371                                if (nc_cluster_idx ==
2372                                        page->index >> cc.log_cluster_size) {
2373                                        goto read_single_page;
2374                                }
2375
2376                                ret = f2fs_is_compressed_cluster(inode, page->index);
2377                                if (ret < 0)
2378                                        goto set_error_page;
2379                                else if (!ret) {
2380                                        nc_cluster_idx =
2381                                                page->index >> cc.log_cluster_size;
2382                                        goto read_single_page;
2383                                }
2384
2385                                nc_cluster_idx = NULL_CLUSTER;
2386                        }
2387                        ret = f2fs_init_compress_ctx(&cc);
2388                        if (ret)
2389                                goto set_error_page;
2390
2391                        f2fs_compress_ctx_add_page(&cc, page);
2392
2393                        goto next_page;
2394                }
2395read_single_page:
2396#endif
2397
2398                ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2399                                        &bio, &last_block_in_bio, rac);
2400                if (ret) {
2401#ifdef CONFIG_F2FS_FS_COMPRESSION
2402set_error_page:
2403#endif
2404                        SetPageError(page);
2405                        zero_user_segment(page, 0, PAGE_SIZE);
2406                        unlock_page(page);
2407                }
2408#ifdef CONFIG_F2FS_FS_COMPRESSION
2409next_page:
2410#endif
2411                if (rac)
2412                        put_page(page);
2413
2414#ifdef CONFIG_F2FS_FS_COMPRESSION
2415                if (f2fs_compressed_file(inode)) {
2416                        /* last page */
2417                        if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2418                                ret = f2fs_read_multi_pages(&cc, &bio,
2419                                                        max_nr_pages,
2420                                                        &last_block_in_bio,
2421                                                        rac != NULL, false);
2422                                f2fs_destroy_compress_ctx(&cc, false);
2423                        }
2424                }
2425#endif
2426        }
2427        if (bio)
2428                __submit_bio(F2FS_I_SB(inode), bio, DATA);
2429        return ret;
2430}
2431
2432static int f2fs_read_data_page(struct file *file, struct page *page)
2433{
2434        struct inode *inode = page_file_mapping(page)->host;
2435        int ret = -EAGAIN;
2436
2437        trace_f2fs_readpage(page, DATA);
2438
2439        if (!f2fs_is_compress_backend_ready(inode)) {
2440                unlock_page(page);
2441                return -EOPNOTSUPP;
2442        }
2443
2444        /* If the file has inline data, try to read it directly */
2445        if (f2fs_has_inline_data(inode))
2446                ret = f2fs_read_inline_data(inode, page);
2447        if (ret == -EAGAIN)
2448                ret = f2fs_mpage_readpages(inode, NULL, page);
2449        return ret;
2450}
2451
2452static void f2fs_readahead(struct readahead_control *rac)
2453{
2454        struct inode *inode = rac->mapping->host;
2455
2456        trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2457
2458        if (!f2fs_is_compress_backend_ready(inode))
2459                return;
2460
2461        /* If the file has inline data, skip readpages */
2462        if (f2fs_has_inline_data(inode))
2463                return;
2464
2465        f2fs_mpage_readpages(inode, rac, NULL);
2466}
2467
2468int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2469{
2470        struct inode *inode = fio->page->mapping->host;
2471        struct page *mpage, *page;
2472        gfp_t gfp_flags = GFP_NOFS;
2473
2474        if (!f2fs_encrypted_file(inode))
2475                return 0;
2476
2477        page = fio->compressed_page ? fio->compressed_page : fio->page;
2478
2479        /* wait for GCed page writeback via META_MAPPING */
2480        f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2481
2482        if (fscrypt_inode_uses_inline_crypto(inode))
2483                return 0;
2484
2485retry_encrypt:
2486        fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2487                                        PAGE_SIZE, 0, gfp_flags);
2488        if (IS_ERR(fio->encrypted_page)) {
2489                /* flush pending IOs and wait for a while in the ENOMEM case */
2490                if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2491                        f2fs_flush_merged_writes(fio->sbi);
2492                        congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2493                        gfp_flags |= __GFP_NOFAIL;
2494                        goto retry_encrypt;
2495                }
2496                return PTR_ERR(fio->encrypted_page);
2497        }
2498
2499        mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2500        if (mpage) {
2501                if (PageUptodate(mpage))
2502                        memcpy(page_address(mpage),
2503                                page_address(fio->encrypted_page), PAGE_SIZE);
2504                f2fs_put_page(mpage, 1);
2505        }
2506        return 0;
2507}
2508
2509static inline bool check_inplace_update_policy(struct inode *inode,
2510                                struct f2fs_io_info *fio)
2511{
2512        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2513        unsigned int policy = SM_I(sbi)->ipu_policy;
2514
2515        if (policy & (0x1 << F2FS_IPU_FORCE))
2516                return true;
2517        if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2518                return true;
2519        if (policy & (0x1 << F2FS_IPU_UTIL) &&
2520                        utilization(sbi) > SM_I(sbi)->min_ipu_util)
2521                return true;
2522        if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2523                        utilization(sbi) > SM_I(sbi)->min_ipu_util)
2524                return true;
2525
2526        /*
2527         * IPU for rewrite async pages
2528         */
2529        if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2530                        fio && fio->op == REQ_OP_WRITE &&
2531                        !(fio->op_flags & REQ_SYNC) &&
2532                        !IS_ENCRYPTED(inode))
2533                return true;
2534
2535        /* this is only set during fdatasync */
2536        if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2537                        is_inode_flag_set(inode, FI_NEED_IPU))
2538                return true;
2539
2540        if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2541                        !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2542                return true;
2543
2544        return false;
2545}
2546
2547bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2548{
2549        /* swap file is migrating in aligned write mode */
2550        if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2551                return false;
2552
2553        if (f2fs_is_pinned_file(inode))
2554                return true;
2555
2556        /* if this is cold file, we should overwrite to avoid fragmentation */
2557        if (file_is_cold(inode))
2558                return true;
2559
2560        return check_inplace_update_policy(inode, fio);
2561}
2562
2563bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2564{
2565        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2566
2567        if (f2fs_lfs_mode(sbi))
2568                return true;
2569        if (S_ISDIR(inode->i_mode))
2570                return true;
2571        if (IS_NOQUOTA(inode))
2572                return true;
2573        if (f2fs_is_atomic_file(inode))
2574                return true;
2575        if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2576                return true;
2577
2578        /* swap file is migrating in aligned write mode */
2579        if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2580                return true;
2581
2582        if (fio) {
2583                if (page_private_gcing(fio->page))
2584                        return true;
2585                if (page_private_dummy(fio->page))
2586                        return true;
2587                if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2588                        f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2589                        return true;
2590        }
2591        return false;
2592}
2593
2594static inline bool need_inplace_update(struct f2fs_io_info *fio)
2595{
2596        struct inode *inode = fio->page->mapping->host;
2597
2598        if (f2fs_should_update_outplace(inode, fio))
2599                return false;
2600
2601        return f2fs_should_update_inplace(inode, fio);
2602}
2603
2604int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2605{
2606        struct page *page = fio->page;
2607        struct inode *inode = page->mapping->host;
2608        struct dnode_of_data dn;
2609        struct extent_info ei = {0, };
2610        struct node_info ni;
2611        bool ipu_force = false;
2612        int err = 0;
2613
2614        set_new_dnode(&dn, inode, NULL, NULL, 0);
2615        if (need_inplace_update(fio) &&
2616                        f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2617                fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2618
2619                if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2620                                                DATA_GENERIC_ENHANCE))
2621                        return -EFSCORRUPTED;
2622
2623                ipu_force = true;
2624                fio->need_lock = LOCK_DONE;
2625                goto got_it;
2626        }
2627
2628        /* Deadlock due to between page->lock and f2fs_lock_op */
2629        if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2630                return -EAGAIN;
2631
2632        err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2633        if (err)
2634                goto out;
2635
2636        fio->old_blkaddr = dn.data_blkaddr;
2637
2638        /* This page is already truncated */
2639        if (fio->old_blkaddr == NULL_ADDR) {
2640                ClearPageUptodate(page);
2641                clear_page_private_gcing(page);
2642                goto out_writepage;
2643        }
2644got_it:
2645        if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2646                !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2647                                                DATA_GENERIC_ENHANCE)) {
2648                err = -EFSCORRUPTED;
2649                goto out_writepage;
2650        }
2651        /*
2652         * If current allocation needs SSR,
2653         * it had better in-place writes for updated data.
2654         */
2655        if (ipu_force ||
2656                (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2657                                        need_inplace_update(fio))) {
2658                err = f2fs_encrypt_one_page(fio);
2659                if (err)
2660                        goto out_writepage;
2661
2662                set_page_writeback(page);
2663                ClearPageError(page);
2664                f2fs_put_dnode(&dn);
2665                if (fio->need_lock == LOCK_REQ)
2666                        f2fs_unlock_op(fio->sbi);
2667                err = f2fs_inplace_write_data(fio);
2668                if (err) {
2669                        if (fscrypt_inode_uses_fs_layer_crypto(inode))
2670                                fscrypt_finalize_bounce_page(&fio->encrypted_page);
2671                        if (PageWriteback(page))
2672                                end_page_writeback(page);
2673                } else {
2674                        set_inode_flag(inode, FI_UPDATE_WRITE);
2675                }
2676                trace_f2fs_do_write_data_page(fio->page, IPU);
2677                return err;
2678        }
2679
2680        if (fio->need_lock == LOCK_RETRY) {
2681                if (!f2fs_trylock_op(fio->sbi)) {
2682                        err = -EAGAIN;
2683                        goto out_writepage;
2684                }
2685                fio->need_lock = LOCK_REQ;
2686        }
2687
2688        err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2689        if (err)
2690                goto out_writepage;
2691
2692        fio->version = ni.version;
2693
2694        err = f2fs_encrypt_one_page(fio);
2695        if (err)
2696                goto out_writepage;
2697
2698        set_page_writeback(page);
2699        ClearPageError(page);
2700
2701        if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2702                f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2703
2704        /* LFS mode write path */
2705        f2fs_outplace_write_data(&dn, fio);
2706        trace_f2fs_do_write_data_page(page, OPU);
2707        set_inode_flag(inode, FI_APPEND_WRITE);
2708        if (page->index == 0)
2709                set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2710out_writepage:
2711        f2fs_put_dnode(&dn);
2712out:
2713        if (fio->need_lock == LOCK_REQ)
2714                f2fs_unlock_op(fio->sbi);
2715        return err;
2716}
2717
2718int f2fs_write_single_data_page(struct page *page, int *submitted,
2719                                struct bio **bio,
2720                                sector_t *last_block,
2721                                struct writeback_control *wbc,
2722                                enum iostat_type io_type,
2723                                int compr_blocks,
2724                                bool allow_balance)
2725{
2726        struct inode *inode = page->mapping->host;
2727        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2728        loff_t i_size = i_size_read(inode);
2729        const pgoff_t end_index = ((unsigned long long)i_size)
2730                                                        >> PAGE_SHIFT;
2731        loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2732        unsigned offset = 0;
2733        bool need_balance_fs = false;
2734        int err = 0;
2735        struct f2fs_io_info fio = {
2736                .sbi = sbi,
2737                .ino = inode->i_ino,
2738                .type = DATA,
2739                .op = REQ_OP_WRITE,
2740                .op_flags = wbc_to_write_flags(wbc),
2741                .old_blkaddr = NULL_ADDR,
2742                .page = page,
2743                .encrypted_page = NULL,
2744                .submitted = false,
2745                .compr_blocks = compr_blocks,
2746                .need_lock = LOCK_RETRY,
2747                .io_type = io_type,
2748                .io_wbc = wbc,
2749                .bio = bio,
2750                .last_block = last_block,
2751        };
2752
2753        trace_f2fs_writepage(page, DATA);
2754
2755        /* we should bypass data pages to proceed the kworkder jobs */
2756        if (unlikely(f2fs_cp_error(sbi))) {
2757                mapping_set_error(page->mapping, -EIO);
2758                /*
2759                 * don't drop any dirty dentry pages for keeping lastest
2760                 * directory structure.
2761                 */
2762                if (S_ISDIR(inode->i_mode))
2763                        goto redirty_out;
2764                goto out;
2765        }
2766
2767        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2768                goto redirty_out;
2769
2770        if (page->index < end_index ||
2771                        f2fs_verity_in_progress(inode) ||
2772                        compr_blocks)
2773                goto write;
2774
2775        /*
2776         * If the offset is out-of-range of file size,
2777         * this page does not have to be written to disk.
2778         */
2779        offset = i_size & (PAGE_SIZE - 1);
2780        if ((page->index >= end_index + 1) || !offset)
2781                goto out;
2782
2783        zero_user_segment(page, offset, PAGE_SIZE);
2784write:
2785        if (f2fs_is_drop_cache(inode))
2786                goto out;
2787        /* we should not write 0'th page having journal header */
2788        if (f2fs_is_volatile_file(inode) && (!page->index ||
2789                        (!wbc->for_reclaim &&
2790                        f2fs_available_free_memory(sbi, BASE_CHECK))))
2791                goto redirty_out;
2792
2793        /* Dentry/quota blocks are controlled by checkpoint */
2794        if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2795                /*
2796                 * We need to wait for node_write to avoid block allocation during
2797                 * checkpoint. This can only happen to quota writes which can cause
2798                 * the below discard race condition.
2799                 */
2800                if (IS_NOQUOTA(inode))
2801                        down_read(&sbi->node_write);
2802
2803                fio.need_lock = LOCK_DONE;
2804                err = f2fs_do_write_data_page(&fio);
2805
2806                if (IS_NOQUOTA(inode))
2807                        up_read(&sbi->node_write);
2808
2809                goto done;
2810        }
2811
2812        if (!wbc->for_reclaim)
2813                need_balance_fs = true;
2814        else if (has_not_enough_free_secs(sbi, 0, 0))
2815                goto redirty_out;
2816        else
2817                set_inode_flag(inode, FI_HOT_DATA);
2818
2819        err = -EAGAIN;
2820        if (f2fs_has_inline_data(inode)) {
2821                err = f2fs_write_inline_data(inode, page);
2822                if (!err)
2823                        goto out;
2824        }
2825
2826        if (err == -EAGAIN) {
2827                err = f2fs_do_write_data_page(&fio);
2828                if (err == -EAGAIN) {
2829                        fio.need_lock = LOCK_REQ;
2830                        err = f2fs_do_write_data_page(&fio);
2831                }
2832        }
2833
2834        if (err) {
2835                file_set_keep_isize(inode);
2836        } else {
2837                spin_lock(&F2FS_I(inode)->i_size_lock);
2838                if (F2FS_I(inode)->last_disk_size < psize)
2839                        F2FS_I(inode)->last_disk_size = psize;
2840                spin_unlock(&F2FS_I(inode)->i_size_lock);
2841        }
2842
2843done:
2844        if (err && err != -ENOENT)
2845                goto redirty_out;
2846
2847out:
2848        inode_dec_dirty_pages(inode);
2849        if (err) {
2850                ClearPageUptodate(page);
2851                clear_page_private_gcing(page);
2852        }
2853
2854        if (wbc->for_reclaim) {
2855                f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2856                clear_inode_flag(inode, FI_HOT_DATA);
2857                f2fs_remove_dirty_inode(inode);
2858                submitted = NULL;
2859        }
2860        unlock_page(page);
2861        if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2862                        !F2FS_I(inode)->cp_task && allow_balance)
2863                f2fs_balance_fs(sbi, need_balance_fs);
2864
2865        if (unlikely(f2fs_cp_error(sbi))) {
2866                f2fs_submit_merged_write(sbi, DATA);
2867                f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2868                submitted = NULL;
2869        }
2870
2871        if (submitted)
2872                *submitted = fio.submitted ? 1 : 0;
2873
2874        return 0;
2875
2876redirty_out:
2877        redirty_page_for_writepage(wbc, page);
2878        /*
2879         * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2880         * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2881         * file_write_and_wait_range() will see EIO error, which is critical
2882         * to return value of fsync() followed by atomic_write failure to user.
2883         */
2884        if (!err || wbc->for_reclaim)
2885                return AOP_WRITEPAGE_ACTIVATE;
2886        unlock_page(page);
2887        return err;
2888}
2889
2890static int f2fs_write_data_page(struct page *page,
2891                                        struct writeback_control *wbc)
2892{
2893#ifdef CONFIG_F2FS_FS_COMPRESSION
2894        struct inode *inode = page->mapping->host;
2895
2896        if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2897                goto out;
2898
2899        if (f2fs_compressed_file(inode)) {
2900                if (f2fs_is_compressed_cluster(inode, page->index)) {
2901                        redirty_page_for_writepage(wbc, page);
2902                        return AOP_WRITEPAGE_ACTIVATE;
2903                }
2904        }
2905out:
2906#endif
2907
2908        return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2909                                                wbc, FS_DATA_IO, 0, true);
2910}
2911
2912/*
2913 * This function was copied from write_cche_pages from mm/page-writeback.c.
2914 * The major change is making write step of cold data page separately from
2915 * warm/hot data page.
2916 */
2917static int f2fs_write_cache_pages(struct address_space *mapping,
2918                                        struct writeback_control *wbc,
2919                                        enum iostat_type io_type)
2920{
2921        int ret = 0;
2922        int done = 0, retry = 0;
2923        struct pagevec pvec;
2924        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2925        struct bio *bio = NULL;
2926        sector_t last_block;
2927#ifdef CONFIG_F2FS_FS_COMPRESSION
2928        struct inode *inode = mapping->host;
2929        struct compress_ctx cc = {
2930                .inode = inode,
2931                .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2932                .cluster_size = F2FS_I(inode)->i_cluster_size,
2933                .cluster_idx = NULL_CLUSTER,
2934                .rpages = NULL,
2935                .nr_rpages = 0,
2936                .cpages = NULL,
2937                .rbuf = NULL,
2938                .cbuf = NULL,
2939                .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2940                .private = NULL,
2941        };
2942#endif
2943        int nr_pages;
2944        pgoff_t index;
2945        pgoff_t end;            /* Inclusive */
2946        pgoff_t done_index;
2947        int range_whole = 0;
2948        xa_mark_t tag;
2949        int nwritten = 0;
2950        int submitted = 0;
2951        int i;
2952
2953        pagevec_init(&pvec);
2954
2955        if (get_dirty_pages(mapping->host) <=
2956                                SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2957                set_inode_flag(mapping->host, FI_HOT_DATA);
2958        else
2959                clear_inode_flag(mapping->host, FI_HOT_DATA);
2960
2961        if (wbc->range_cyclic) {
2962                index = mapping->writeback_index; /* prev offset */
2963                end = -1;
2964        } else {
2965                index = wbc->range_start >> PAGE_SHIFT;
2966                end = wbc->range_end >> PAGE_SHIFT;
2967                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2968                        range_whole = 1;
2969        }
2970        if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2971                tag = PAGECACHE_TAG_TOWRITE;
2972        else
2973                tag = PAGECACHE_TAG_DIRTY;
2974retry:
2975        retry = 0;
2976        if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2977                tag_pages_for_writeback(mapping, index, end);
2978        done_index = index;
2979        while (!done && !retry && (index <= end)) {
2980                nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2981                                tag);
2982                if (nr_pages == 0)
2983                        break;
2984
2985                for (i = 0; i < nr_pages; i++) {
2986                        struct page *page = pvec.pages[i];
2987                        bool need_readd;
2988readd:
2989                        need_readd = false;
2990#ifdef CONFIG_F2FS_FS_COMPRESSION
2991                        if (f2fs_compressed_file(inode)) {
2992                                ret = f2fs_init_compress_ctx(&cc);
2993                                if (ret) {
2994                                        done = 1;
2995                                        break;
2996                                }
2997
2998                                if (!f2fs_cluster_can_merge_page(&cc,
2999                                                                page->index)) {
3000                                        ret = f2fs_write_multi_pages(&cc,
3001                                                &submitted, wbc, io_type);
3002                                        if (!ret)
3003                                                need_readd = true;
3004                                        goto result;
3005                                }
3006
3007                                if (unlikely(f2fs_cp_error(sbi)))
3008                                        goto lock_page;
3009
3010                                if (f2fs_cluster_is_empty(&cc)) {
3011                                        void *fsdata = NULL;
3012                                        struct page *pagep;
3013                                        int ret2;
3014
3015                                        ret2 = f2fs_prepare_compress_overwrite(
3016                                                        inode, &pagep,
3017                                                        page->index, &fsdata);
3018                                        if (ret2 < 0) {
3019                                                ret = ret2;
3020                                                done = 1;
3021                                                break;
3022                                        } else if (ret2 &&
3023                                                !f2fs_compress_write_end(inode,
3024                                                                fsdata, page->index,
3025                                                                1)) {
3026                                                retry = 1;
3027                                                break;
3028                                        }
3029                                } else {
3030                                        goto lock_page;
3031                                }
3032                        }
3033#endif
3034                        /* give a priority to WB_SYNC threads */
3035                        if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3036                                        wbc->sync_mode == WB_SYNC_NONE) {
3037                                done = 1;
3038                                break;
3039                        }
3040#ifdef CONFIG_F2FS_FS_COMPRESSION
3041lock_page:
3042#endif
3043                        done_index = page->index;
3044retry_write:
3045                        lock_page(page);
3046
3047                        if (unlikely(page->mapping != mapping)) {
3048continue_unlock:
3049                                unlock_page(page);
3050                                continue;
3051                        }
3052
3053                        if (!PageDirty(page)) {
3054                                /* someone wrote it for us */
3055                                goto continue_unlock;
3056                        }
3057
3058                        if (PageWriteback(page)) {
3059                                if (wbc->sync_mode != WB_SYNC_NONE)
3060                                        f2fs_wait_on_page_writeback(page,
3061                                                        DATA, true, true);
3062                                else
3063                                        goto continue_unlock;
3064                        }
3065
3066                        if (!clear_page_dirty_for_io(page))
3067                                goto continue_unlock;
3068
3069#ifdef CONFIG_F2FS_FS_COMPRESSION
3070                        if (f2fs_compressed_file(inode)) {
3071                                get_page(page);
3072                                f2fs_compress_ctx_add_page(&cc, page);
3073                                continue;
3074                        }
3075#endif
3076                        ret = f2fs_write_single_data_page(page, &submitted,
3077                                        &bio, &last_block, wbc, io_type,
3078                                        0, true);
3079                        if (ret == AOP_WRITEPAGE_ACTIVATE)
3080                                unlock_page(page);
3081#ifdef CONFIG_F2FS_FS_COMPRESSION
3082result:
3083#endif
3084                        nwritten += submitted;
3085                        wbc->nr_to_write -= submitted;
3086
3087                        if (unlikely(ret)) {
3088                                /*
3089                                 * keep nr_to_write, since vfs uses this to
3090                                 * get # of written pages.
3091                                 */
3092                                if (ret == AOP_WRITEPAGE_ACTIVATE) {
3093                                        ret = 0;
3094                                        goto next;
3095                                } else if (ret == -EAGAIN) {
3096                                        ret = 0;
3097                                        if (wbc->sync_mode == WB_SYNC_ALL) {
3098                                                cond_resched();
3099                                                congestion_wait(BLK_RW_ASYNC,
3100                                                        DEFAULT_IO_TIMEOUT);
3101                                                goto retry_write;
3102                                        }
3103                                        goto next;
3104                                }
3105                                done_index = page->index + 1;
3106                                done = 1;
3107                                break;
3108                        }
3109
3110                        if (wbc->nr_to_write <= 0 &&
3111                                        wbc->sync_mode == WB_SYNC_NONE) {
3112                                done = 1;
3113                                break;
3114                        }
3115next:
3116                        if (need_readd)
3117                                goto readd;
3118                }
3119                pagevec_release(&pvec);
3120                cond_resched();
3121        }
3122#ifdef CONFIG_F2FS_FS_COMPRESSION
3123        /* flush remained pages in compress cluster */
3124        if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3125                ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3126                nwritten += submitted;
3127                wbc->nr_to_write -= submitted;
3128                if (ret) {
3129                        done = 1;
3130                        retry = 0;
3131                }
3132        }
3133        if (f2fs_compressed_file(inode))
3134                f2fs_destroy_compress_ctx(&cc, false);
3135#endif
3136        if (retry) {
3137                index = 0;
3138                end = -1;
3139                goto retry;
3140        }
3141        if (wbc->range_cyclic && !done)
3142                done_index = 0;
3143        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3144                mapping->writeback_index = done_index;
3145
3146        if (nwritten)
3147                f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3148                                                                NULL, 0, DATA);
3149        /* submit cached bio of IPU write */
3150        if (bio)
3151                f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3152
3153        return ret;
3154}
3155
3156static inline bool __should_serialize_io(struct inode *inode,
3157                                        struct writeback_control *wbc)
3158{
3159        /* to avoid deadlock in path of data flush */
3160        if (F2FS_I(inode)->cp_task)
3161                return false;
3162
3163        if (!S_ISREG(inode->i_mode))
3164                return false;
3165        if (IS_NOQUOTA(inode))
3166                return false;
3167
3168        if (f2fs_need_compress_data(inode))
3169                return true;
3170        if (wbc->sync_mode != WB_SYNC_ALL)
3171                return true;
3172        if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3173                return true;
3174        return false;
3175}
3176
3177static int __f2fs_write_data_pages(struct address_space *mapping,
3178                                                struct writeback_control *wbc,
3179                                                enum iostat_type io_type)
3180{
3181        struct inode *inode = mapping->host;
3182        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3183        struct blk_plug plug;
3184        int ret;
3185        bool locked = false;
3186
3187        /* deal with chardevs and other special file */
3188        if (!mapping->a_ops->writepage)
3189                return 0;
3190
3191        /* skip writing if there is no dirty page in this inode */
3192        if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3193                return 0;
3194
3195        /* during POR, we don't need to trigger writepage at all. */
3196        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3197                goto skip_write;
3198
3199        if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3200                        wbc->sync_mode == WB_SYNC_NONE &&
3201                        get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3202                        f2fs_available_free_memory(sbi, DIRTY_DENTS))
3203                goto skip_write;
3204
3205        /* skip writing during file defragment */
3206        if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3207                goto skip_write;
3208
3209        trace_f2fs_writepages(mapping->host, wbc, DATA);
3210
3211        /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3212        if (wbc->sync_mode == WB_SYNC_ALL)
3213                atomic_inc(&sbi->wb_sync_req[DATA]);
3214        else if (atomic_read(&sbi->wb_sync_req[DATA]))
3215                goto skip_write;
3216
3217        if (__should_serialize_io(inode, wbc)) {
3218                mutex_lock(&sbi->writepages);
3219                locked = true;
3220        }
3221
3222        blk_start_plug(&plug);
3223        ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3224        blk_finish_plug(&plug);
3225
3226        if (locked)
3227                mutex_unlock(&sbi->writepages);
3228
3229        if (wbc->sync_mode == WB_SYNC_ALL)
3230                atomic_dec(&sbi->wb_sync_req[DATA]);
3231        /*
3232         * if some pages were truncated, we cannot guarantee its mapping->host
3233         * to detect pending bios.
3234         */
3235
3236        f2fs_remove_dirty_inode(inode);
3237        return ret;
3238
3239skip_write:
3240        wbc->pages_skipped += get_dirty_pages(inode);
3241        trace_f2fs_writepages(mapping->host, wbc, DATA);
3242        return 0;
3243}
3244
3245static int f2fs_write_data_pages(struct address_space *mapping,
3246                            struct writeback_control *wbc)
3247{
3248        struct inode *inode = mapping->host;
3249
3250        return __f2fs_write_data_pages(mapping, wbc,
3251                        F2FS_I(inode)->cp_task == current ?
3252                        FS_CP_DATA_IO : FS_DATA_IO);
3253}
3254
3255static void f2fs_write_failed(struct inode *inode, loff_t to)
3256{
3257        loff_t i_size = i_size_read(inode);
3258
3259        if (IS_NOQUOTA(inode))
3260                return;
3261
3262        /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3263        if (to > i_size && !f2fs_verity_in_progress(inode)) {
3264                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3265                filemap_invalidate_lock(inode->i_mapping);
3266
3267                truncate_pagecache(inode, i_size);
3268                f2fs_truncate_blocks(inode, i_size, true);
3269
3270                filemap_invalidate_unlock(inode->i_mapping);
3271                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3272        }
3273}
3274
3275static int prepare_write_begin(struct f2fs_sb_info *sbi,
3276                        struct page *page, loff_t pos, unsigned len,
3277                        block_t *blk_addr, bool *node_changed)
3278{
3279        struct inode *inode = page->mapping->host;
3280        pgoff_t index = page->index;
3281        struct dnode_of_data dn;
3282        struct page *ipage;
3283        bool locked = false;
3284        struct extent_info ei = {0, };
3285        int err = 0;
3286        int flag;
3287
3288        /*
3289         * we already allocated all the blocks, so we don't need to get
3290         * the block addresses when there is no need to fill the page.
3291         */
3292        if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3293            !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3294            !f2fs_verity_in_progress(inode))
3295                return 0;
3296
3297        /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3298        if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3299                flag = F2FS_GET_BLOCK_DEFAULT;
3300        else
3301                flag = F2FS_GET_BLOCK_PRE_AIO;
3302
3303        if (f2fs_has_inline_data(inode) ||
3304                        (pos & PAGE_MASK) >= i_size_read(inode)) {
3305                f2fs_do_map_lock(sbi, flag, true);
3306                locked = true;
3307        }
3308
3309restart:
3310        /* check inline_data */
3311        ipage = f2fs_get_node_page(sbi, inode->i_ino);
3312        if (IS_ERR(ipage)) {
3313                err = PTR_ERR(ipage);
3314                goto unlock_out;
3315        }
3316
3317        set_new_dnode(&dn, inode, ipage, ipage, 0);
3318
3319        if (f2fs_has_inline_data(inode)) {
3320                if (pos + len <= MAX_INLINE_DATA(inode)) {
3321                        f2fs_do_read_inline_data(page, ipage);
3322                        set_inode_flag(inode, FI_DATA_EXIST);
3323                        if (inode->i_nlink)
3324                                set_page_private_inline(ipage);
3325                } else {
3326                        err = f2fs_convert_inline_page(&dn, page);
3327                        if (err)
3328                                goto out;
3329                        if (dn.data_blkaddr == NULL_ADDR)
3330                                err = f2fs_get_block(&dn, index);
3331                }
3332        } else if (locked) {
3333                err = f2fs_get_block(&dn, index);
3334        } else {
3335                if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3336                        dn.data_blkaddr = ei.blk + index - ei.fofs;
3337                } else {
3338                        /* hole case */
3339                        err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3340                        if (err || dn.data_blkaddr == NULL_ADDR) {
3341                                f2fs_put_dnode(&dn);
3342                                f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3343                                                                true);
3344                                WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3345                                locked = true;
3346                                goto restart;
3347                        }
3348                }
3349        }
3350
3351        /* convert_inline_page can make node_changed */
3352        *blk_addr = dn.data_blkaddr;
3353        *node_changed = dn.node_changed;
3354out:
3355        f2fs_put_dnode(&dn);
3356unlock_out:
3357        if (locked)
3358                f2fs_do_map_lock(sbi, flag, false);
3359        return err;
3360}
3361
3362static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3363                loff_t pos, unsigned len, unsigned flags,
3364                struct page **pagep, void **fsdata)
3365{
3366        struct inode *inode = mapping->host;
3367        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3368        struct page *page = NULL;
3369        pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3370        bool need_balance = false, drop_atomic = false;
3371        block_t blkaddr = NULL_ADDR;
3372        int err = 0;
3373
3374        trace_f2fs_write_begin(inode, pos, len, flags);
3375
3376        if (!f2fs_is_checkpoint_ready(sbi)) {
3377                err = -ENOSPC;
3378                goto fail;
3379        }
3380
3381        if ((f2fs_is_atomic_file(inode) &&
3382                        !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3383                        is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3384                err = -ENOMEM;
3385                drop_atomic = true;
3386                goto fail;
3387        }
3388
3389        /*
3390         * We should check this at this moment to avoid deadlock on inode page
3391         * and #0 page. The locking rule for inline_data conversion should be:
3392         * lock_page(page #0) -> lock_page(inode_page)
3393         */
3394        if (index != 0) {
3395                err = f2fs_convert_inline_inode(inode);
3396                if (err)
3397                        goto fail;
3398        }
3399
3400#ifdef CONFIG_F2FS_FS_COMPRESSION
3401        if (f2fs_compressed_file(inode)) {
3402                int ret;
3403
3404                *fsdata = NULL;
3405
3406                if (len == PAGE_SIZE)
3407                        goto repeat;
3408
3409                ret = f2fs_prepare_compress_overwrite(inode, pagep,
3410                                                        index, fsdata);
3411                if (ret < 0) {
3412                        err = ret;
3413                        goto fail;
3414                } else if (ret) {
3415                        return 0;
3416                }
3417        }
3418#endif
3419
3420repeat:
3421        /*
3422         * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3423         * wait_for_stable_page. Will wait that below with our IO control.
3424         */
3425        page = f2fs_pagecache_get_page(mapping, index,
3426                                FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3427        if (!page) {
3428                err = -ENOMEM;
3429                goto fail;
3430        }
3431
3432        /* TODO: cluster can be compressed due to race with .writepage */
3433
3434        *pagep = page;
3435
3436        err = prepare_write_begin(sbi, page, pos, len,
3437                                        &blkaddr, &need_balance);
3438        if (err)
3439                goto fail;
3440
3441        if (need_balance && !IS_NOQUOTA(inode) &&
3442                        has_not_enough_free_secs(sbi, 0, 0)) {
3443                unlock_page(page);
3444                f2fs_balance_fs(sbi, true);
3445                lock_page(page);
3446                if (page->mapping != mapping) {
3447                        /* The page got truncated from under us */
3448                        f2fs_put_page(page, 1);
3449                        goto repeat;
3450                }
3451        }
3452
3453        f2fs_wait_on_page_writeback(page, DATA, false, true);
3454
3455        if (len == PAGE_SIZE || PageUptodate(page))
3456                return 0;
3457
3458        if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3459            !f2fs_verity_in_progress(inode)) {
3460                zero_user_segment(page, len, PAGE_SIZE);
3461                return 0;
3462        }
3463
3464        if (blkaddr == NEW_ADDR) {
3465                zero_user_segment(page, 0, PAGE_SIZE);
3466                SetPageUptodate(page);
3467        } else {
3468                if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3469                                DATA_GENERIC_ENHANCE_READ)) {
3470                        err = -EFSCORRUPTED;
3471                        goto fail;
3472                }
3473                err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3474                if (err)
3475                        goto fail;
3476
3477                lock_page(page);
3478                if (unlikely(page->mapping != mapping)) {
3479                        f2fs_put_page(page, 1);
3480                        goto repeat;
3481                }
3482                if (unlikely(!PageUptodate(page))) {
3483                        err = -EIO;
3484                        goto fail;
3485                }
3486        }
3487        return 0;
3488
3489fail:
3490        f2fs_put_page(page, 1);
3491        f2fs_write_failed(inode, pos + len);
3492        if (drop_atomic)
3493                f2fs_drop_inmem_pages_all(sbi, false);
3494        return err;
3495}
3496
3497static int f2fs_write_end(struct file *file,
3498                        struct address_space *mapping,
3499                        loff_t pos, unsigned len, unsigned copied,
3500                        struct page *page, void *fsdata)
3501{
3502        struct inode *inode = page->mapping->host;
3503
3504        trace_f2fs_write_end(inode, pos, len, copied);
3505
3506        /*
3507         * This should be come from len == PAGE_SIZE, and we expect copied
3508         * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3509         * let generic_perform_write() try to copy data again through copied=0.
3510         */
3511        if (!PageUptodate(page)) {
3512                if (unlikely(copied != len))
3513                        copied = 0;
3514                else
3515                        SetPageUptodate(page);
3516        }
3517
3518#ifdef CONFIG_F2FS_FS_COMPRESSION
3519        /* overwrite compressed file */
3520        if (f2fs_compressed_file(inode) && fsdata) {
3521                f2fs_compress_write_end(inode, fsdata, page->index, copied);
3522                f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3523
3524                if (pos + copied > i_size_read(inode) &&
3525                                !f2fs_verity_in_progress(inode))
3526                        f2fs_i_size_write(inode, pos + copied);
3527                return copied;
3528        }
3529#endif
3530
3531        if (!copied)
3532                goto unlock_out;
3533
3534        set_page_dirty(page);
3535
3536        if (pos + copied > i_size_read(inode) &&
3537            !f2fs_verity_in_progress(inode))
3538                f2fs_i_size_write(inode, pos + copied);
3539unlock_out:
3540        f2fs_put_page(page, 1);
3541        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3542        return copied;
3543}
3544
3545static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3546                           loff_t offset)
3547{
3548        unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3549        unsigned blkbits = i_blkbits;
3550        unsigned blocksize_mask = (1 << blkbits) - 1;
3551        unsigned long align = offset | iov_iter_alignment(iter);
3552        struct block_device *bdev = inode->i_sb->s_bdev;
3553
3554        if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3555                return 1;
3556
3557        if (align & blocksize_mask) {
3558                if (bdev)
3559                        blkbits = blksize_bits(bdev_logical_block_size(bdev));
3560                blocksize_mask = (1 << blkbits) - 1;
3561                if (align & blocksize_mask)
3562                        return -EINVAL;
3563                return 1;
3564        }
3565        return 0;
3566}
3567
3568static void f2fs_dio_end_io(struct bio *bio)
3569{
3570        struct f2fs_private_dio *dio = bio->bi_private;
3571
3572        dec_page_count(F2FS_I_SB(dio->inode),
3573                        dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3574
3575        bio->bi_private = dio->orig_private;
3576        bio->bi_end_io = dio->orig_end_io;
3577
3578        kfree(dio);
3579
3580        bio_endio(bio);
3581}
3582
3583static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3584                                                        loff_t file_offset)
3585{
3586        struct f2fs_private_dio *dio;
3587        bool write = (bio_op(bio) == REQ_OP_WRITE);
3588
3589        dio = f2fs_kzalloc(F2FS_I_SB(inode),
3590                        sizeof(struct f2fs_private_dio), GFP_NOFS);
3591        if (!dio)
3592                goto out;
3593
3594        dio->inode = inode;
3595        dio->orig_end_io = bio->bi_end_io;
3596        dio->orig_private = bio->bi_private;
3597        dio->write = write;
3598
3599        bio->bi_end_io = f2fs_dio_end_io;
3600        bio->bi_private = dio;
3601
3602        inc_page_count(F2FS_I_SB(inode),
3603                        write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3604
3605        submit_bio(bio);
3606        return;
3607out:
3608        bio->bi_status = BLK_STS_IOERR;
3609        bio_endio(bio);
3610}
3611
3612static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3613{
3614        struct address_space *mapping = iocb->ki_filp->f_mapping;
3615        struct inode *inode = mapping->host;
3616        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3617        struct f2fs_inode_info *fi = F2FS_I(inode);
3618        size_t count = iov_iter_count(iter);
3619        loff_t offset = iocb->ki_pos;
3620        int rw = iov_iter_rw(iter);
3621        int err;
3622        enum rw_hint hint = iocb->ki_hint;
3623        int whint_mode = F2FS_OPTION(sbi).whint_mode;
3624        bool do_opu;
3625
3626        err = check_direct_IO(inode, iter, offset);
3627        if (err)
3628                return err < 0 ? err : 0;
3629
3630        if (f2fs_force_buffered_io(inode, iocb, iter))
3631                return 0;
3632
3633        do_opu = rw == WRITE && f2fs_lfs_mode(sbi);
3634
3635        trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3636
3637        if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3638                iocb->ki_hint = WRITE_LIFE_NOT_SET;
3639
3640        if (iocb->ki_flags & IOCB_NOWAIT) {
3641                if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3642                        iocb->ki_hint = hint;
3643                        err = -EAGAIN;
3644                        goto out;
3645                }
3646                if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3647                        up_read(&fi->i_gc_rwsem[rw]);
3648                        iocb->ki_hint = hint;
3649                        err = -EAGAIN;
3650                        goto out;
3651                }
3652        } else {
3653                down_read(&fi->i_gc_rwsem[rw]);
3654                if (do_opu)
3655                        down_read(&fi->i_gc_rwsem[READ]);
3656        }
3657
3658        err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3659                        iter, rw == WRITE ? get_data_block_dio_write :
3660                        get_data_block_dio, NULL, f2fs_dio_submit_bio,
3661                        rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3662                        DIO_SKIP_HOLES);
3663
3664        if (do_opu)
3665                up_read(&fi->i_gc_rwsem[READ]);
3666
3667        up_read(&fi->i_gc_rwsem[rw]);
3668
3669        if (rw == WRITE) {
3670                if (whint_mode == WHINT_MODE_OFF)
3671                        iocb->ki_hint = hint;
3672                if (err > 0) {
3673                        f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3674                                                                        err);
3675                        if (!do_opu)
3676                                set_inode_flag(inode, FI_UPDATE_WRITE);
3677                } else if (err == -EIOCBQUEUED) {
3678                        f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3679                                                count - iov_iter_count(iter));
3680                } else if (err < 0) {
3681                        f2fs_write_failed(inode, offset + count);
3682                }
3683        } else {
3684                if (err > 0)
3685                        f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3686                else if (err == -EIOCBQUEUED)
3687                        f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3688                                                count - iov_iter_count(iter));
3689        }
3690
3691out:
3692        trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3693
3694        return err;
3695}
3696
3697void f2fs_invalidate_page(struct page *page, unsigned int offset,
3698                                                        unsigned int length)
3699{
3700        struct inode *inode = page->mapping->host;
3701        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3702
3703        if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3704                (offset % PAGE_SIZE || length != PAGE_SIZE))
3705                return;
3706
3707        if (PageDirty(page)) {
3708                if (inode->i_ino == F2FS_META_INO(sbi)) {
3709                        dec_page_count(sbi, F2FS_DIRTY_META);
3710                } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3711                        dec_page_count(sbi, F2FS_DIRTY_NODES);
3712                } else {
3713                        inode_dec_dirty_pages(inode);
3714                        f2fs_remove_dirty_inode(inode);
3715                }
3716        }
3717
3718        clear_page_private_gcing(page);
3719
3720        if (test_opt(sbi, COMPRESS_CACHE)) {
3721                if (f2fs_compressed_file(inode))
3722                        f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3723                if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3724                        clear_page_private_data(page);
3725        }
3726
3727        if (page_private_atomic(page))
3728                return f2fs_drop_inmem_page(inode, page);
3729
3730        detach_page_private(page);
3731        set_page_private(page, 0);
3732}
3733
3734int f2fs_release_page(struct page *page, gfp_t wait)
3735{
3736        /* If this is dirty page, keep PagePrivate */
3737        if (PageDirty(page))
3738                return 0;
3739
3740        /* This is atomic written page, keep Private */
3741        if (page_private_atomic(page))
3742                return 0;
3743
3744        if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
3745                struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3746                struct inode *inode = page->mapping->host;
3747
3748                if (f2fs_compressed_file(inode))
3749                        f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3750                if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3751                        clear_page_private_data(page);
3752        }
3753
3754        clear_page_private_gcing(page);
3755
3756        detach_page_private(page);
3757        set_page_private(page, 0);
3758        return 1;
3759}
3760
3761static int f2fs_set_data_page_dirty(struct page *page)
3762{
3763        struct inode *inode = page_file_mapping(page)->host;
3764
3765        trace_f2fs_set_page_dirty(page, DATA);
3766
3767        if (!PageUptodate(page))
3768                SetPageUptodate(page);
3769        if (PageSwapCache(page))
3770                return __set_page_dirty_nobuffers(page);
3771
3772        if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3773                if (!page_private_atomic(page)) {
3774                        f2fs_register_inmem_page(inode, page);
3775                        return 1;
3776                }
3777                /*
3778                 * Previously, this page has been registered, we just
3779                 * return here.
3780                 */
3781                return 0;
3782        }
3783
3784        if (!PageDirty(page)) {
3785                __set_page_dirty_nobuffers(page);
3786                f2fs_update_dirty_page(inode, page);
3787                return 1;
3788        }
3789        return 0;
3790}
3791
3792
3793static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3794{
3795#ifdef CONFIG_F2FS_FS_COMPRESSION
3796        struct dnode_of_data dn;
3797        sector_t start_idx, blknr = 0;
3798        int ret;
3799
3800        start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3801
3802        set_new_dnode(&dn, inode, NULL, NULL, 0);
3803        ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3804        if (ret)
3805                return 0;
3806
3807        if (dn.data_blkaddr != COMPRESS_ADDR) {
3808                dn.ofs_in_node += block - start_idx;
3809                blknr = f2fs_data_blkaddr(&dn);
3810                if (!__is_valid_data_blkaddr(blknr))
3811                        blknr = 0;
3812        }
3813
3814        f2fs_put_dnode(&dn);
3815        return blknr;
3816#else
3817        return 0;
3818#endif
3819}
3820
3821
3822static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3823{
3824        struct inode *inode = mapping->host;
3825        sector_t blknr = 0;
3826
3827        if (f2fs_has_inline_data(inode))
3828                goto out;
3829
3830        /* make sure allocating whole blocks */
3831        if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3832                filemap_write_and_wait(mapping);
3833
3834        /* Block number less than F2FS MAX BLOCKS */
3835        if (unlikely(block >= max_file_blocks(inode)))
3836                goto out;
3837
3838        if (f2fs_compressed_file(inode)) {
3839                blknr = f2fs_bmap_compress(inode, block);
3840        } else {
3841                struct f2fs_map_blocks map;
3842
3843                memset(&map, 0, sizeof(map));
3844                map.m_lblk = block;
3845                map.m_len = 1;
3846                map.m_next_pgofs = NULL;
3847                map.m_seg_type = NO_CHECK_TYPE;
3848
3849                if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3850                        blknr = map.m_pblk;
3851        }
3852out:
3853        trace_f2fs_bmap(inode, block, blknr);
3854        return blknr;
3855}
3856
3857#ifdef CONFIG_MIGRATION
3858#include <linux/migrate.h>
3859
3860int f2fs_migrate_page(struct address_space *mapping,
3861                struct page *newpage, struct page *page, enum migrate_mode mode)
3862{
3863        int rc, extra_count;
3864        struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3865        bool atomic_written = page_private_atomic(page);
3866
3867        BUG_ON(PageWriteback(page));
3868
3869        /* migrating an atomic written page is safe with the inmem_lock hold */
3870        if (atomic_written) {
3871                if (mode != MIGRATE_SYNC)
3872                        return -EBUSY;
3873                if (!mutex_trylock(&fi->inmem_lock))
3874                        return -EAGAIN;
3875        }
3876
3877        /* one extra reference was held for atomic_write page */
3878        extra_count = atomic_written ? 1 : 0;
3879        rc = migrate_page_move_mapping(mapping, newpage,
3880                                page, extra_count);
3881        if (rc != MIGRATEPAGE_SUCCESS) {
3882                if (atomic_written)
3883                        mutex_unlock(&fi->inmem_lock);
3884                return rc;
3885        }
3886
3887        if (atomic_written) {
3888                struct inmem_pages *cur;
3889
3890                list_for_each_entry(cur, &fi->inmem_pages, list)
3891                        if (cur->page == page) {
3892                                cur->page = newpage;
3893                                break;
3894                        }
3895                mutex_unlock(&fi->inmem_lock);
3896                put_page(page);
3897                get_page(newpage);
3898        }
3899
3900        /* guarantee to start from no stale private field */
3901        set_page_private(newpage, 0);
3902        if (PagePrivate(page)) {
3903                set_page_private(newpage, page_private(page));
3904                SetPagePrivate(newpage);
3905                get_page(newpage);
3906
3907                set_page_private(page, 0);
3908                ClearPagePrivate(page);
3909                put_page(page);
3910        }
3911
3912        if (mode != MIGRATE_SYNC_NO_COPY)
3913                migrate_page_copy(newpage, page);
3914        else
3915                migrate_page_states(newpage, page);
3916
3917        return MIGRATEPAGE_SUCCESS;
3918}
3919#endif
3920
3921#ifdef CONFIG_SWAP
3922static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3923                                                        unsigned int blkcnt)
3924{
3925        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3926        unsigned int blkofs;
3927        unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3928        unsigned int secidx = start_blk / blk_per_sec;
3929        unsigned int end_sec = secidx + blkcnt / blk_per_sec;
3930        int ret = 0;
3931
3932        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3933        filemap_invalidate_lock(inode->i_mapping);
3934
3935        set_inode_flag(inode, FI_ALIGNED_WRITE);
3936
3937        for (; secidx < end_sec; secidx++) {
3938                down_write(&sbi->pin_sem);
3939
3940                f2fs_lock_op(sbi);
3941                f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3942                f2fs_unlock_op(sbi);
3943
3944                set_inode_flag(inode, FI_DO_DEFRAG);
3945
3946                for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
3947                        struct page *page;
3948                        unsigned int blkidx = secidx * blk_per_sec + blkofs;
3949
3950                        page = f2fs_get_lock_data_page(inode, blkidx, true);
3951                        if (IS_ERR(page)) {
3952                                up_write(&sbi->pin_sem);
3953                                ret = PTR_ERR(page);
3954                                goto done;
3955                        }
3956
3957                        set_page_dirty(page);
3958                        f2fs_put_page(page, 1);
3959                }
3960
3961                clear_inode_flag(inode, FI_DO_DEFRAG);
3962
3963                ret = filemap_fdatawrite(inode->i_mapping);
3964
3965                up_write(&sbi->pin_sem);
3966
3967                if (ret)
3968                        break;
3969        }
3970
3971done:
3972        clear_inode_flag(inode, FI_DO_DEFRAG);
3973        clear_inode_flag(inode, FI_ALIGNED_WRITE);
3974
3975        filemap_invalidate_unlock(inode->i_mapping);
3976        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3977
3978        return ret;
3979}
3980
3981static int check_swap_activate(struct swap_info_struct *sis,
3982                                struct file *swap_file, sector_t *span)
3983{
3984        struct address_space *mapping = swap_file->f_mapping;
3985        struct inode *inode = mapping->host;
3986        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3987        sector_t cur_lblock;
3988        sector_t last_lblock;
3989        sector_t pblock;
3990        sector_t lowest_pblock = -1;
3991        sector_t highest_pblock = 0;
3992        int nr_extents = 0;
3993        unsigned long nr_pblocks;
3994        unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
3995        unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
3996        unsigned int not_aligned = 0;
3997        int ret = 0;
3998
3999        /*
4000         * Map all the blocks into the extent list.  This code doesn't try
4001         * to be very smart.
4002         */
4003        cur_lblock = 0;
4004        last_lblock = bytes_to_blks(inode, i_size_read(inode));
4005
4006        while (cur_lblock < last_lblock && cur_lblock < sis->max) {
4007                struct f2fs_map_blocks map;
4008retry:
4009                cond_resched();
4010
4011                memset(&map, 0, sizeof(map));
4012                map.m_lblk = cur_lblock;
4013                map.m_len = last_lblock - cur_lblock;
4014                map.m_next_pgofs = NULL;
4015                map.m_next_extent = NULL;
4016                map.m_seg_type = NO_CHECK_TYPE;
4017                map.m_may_create = false;
4018
4019                ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
4020                if (ret)
4021                        goto out;
4022
4023                /* hole */
4024                if (!(map.m_flags & F2FS_MAP_FLAGS)) {
4025                        f2fs_err(sbi, "Swapfile has holes");
4026                        ret = -EINVAL;
4027                        goto out;
4028                }
4029
4030                pblock = map.m_pblk;
4031                nr_pblocks = map.m_len;
4032
4033                if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
4034                                nr_pblocks & sec_blks_mask) {
4035                        not_aligned++;
4036
4037                        nr_pblocks = roundup(nr_pblocks, blks_per_sec);
4038                        if (cur_lblock + nr_pblocks > sis->max)
4039                                nr_pblocks -= blks_per_sec;
4040
4041                        if (!nr_pblocks) {
4042                                /* this extent is last one */
4043                                nr_pblocks = map.m_len;
4044                                f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
4045                                goto next;
4046                        }
4047
4048                        ret = f2fs_migrate_blocks(inode, cur_lblock,
4049                                                        nr_pblocks);
4050                        if (ret)
4051                                goto out;
4052                        goto retry;
4053                }
4054next:
4055                if (cur_lblock + nr_pblocks >= sis->max)
4056                        nr_pblocks = sis->max - cur_lblock;
4057
4058                if (cur_lblock) {       /* exclude the header page */
4059                        if (pblock < lowest_pblock)
4060                                lowest_pblock = pblock;
4061                        if (pblock + nr_pblocks - 1 > highest_pblock)
4062                                highest_pblock = pblock + nr_pblocks - 1;
4063                }
4064
4065                /*
4066                 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4067                 */
4068                ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4069                if (ret < 0)
4070                        goto out;
4071                nr_extents += ret;
4072                cur_lblock += nr_pblocks;
4073        }
4074        ret = nr_extents;
4075        *span = 1 + highest_pblock - lowest_pblock;
4076        if (cur_lblock == 0)
4077                cur_lblock = 1; /* force Empty message */
4078        sis->max = cur_lblock;
4079        sis->pages = cur_lblock - 1;
4080        sis->highest_bit = cur_lblock - 1;
4081out:
4082        if (not_aligned)
4083                f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
4084                          not_aligned, blks_per_sec * F2FS_BLKSIZE);
4085        return ret;
4086}
4087
4088static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4089                                sector_t *span)
4090{
4091        struct inode *inode = file_inode(file);
4092        int ret;
4093
4094        if (!S_ISREG(inode->i_mode))
4095                return -EINVAL;
4096
4097        if (f2fs_readonly(F2FS_I_SB(inode)->sb))
4098                return -EROFS;
4099
4100        if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
4101                f2fs_err(F2FS_I_SB(inode),
4102                        "Swapfile not supported in LFS mode");
4103                return -EINVAL;
4104        }
4105
4106        ret = f2fs_convert_inline_inode(inode);
4107        if (ret)
4108                return ret;
4109
4110        if (!f2fs_disable_compressed_file(inode))
4111                return -EINVAL;
4112
4113        f2fs_precache_extents(inode);
4114
4115        ret = check_swap_activate(sis, file, span);
4116        if (ret < 0)
4117                return ret;
4118
4119        set_inode_flag(inode, FI_PIN_FILE);
4120        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4121        return ret;
4122}
4123
4124static void f2fs_swap_deactivate(struct file *file)
4125{
4126        struct inode *inode = file_inode(file);
4127
4128        clear_inode_flag(inode, FI_PIN_FILE);
4129}
4130#else
4131static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4132                                sector_t *span)
4133{
4134        return -EOPNOTSUPP;
4135}
4136
4137static void f2fs_swap_deactivate(struct file *file)
4138{
4139}
4140#endif
4141
4142const struct address_space_operations f2fs_dblock_aops = {
4143        .readpage       = f2fs_read_data_page,
4144        .readahead      = f2fs_readahead,
4145        .writepage      = f2fs_write_data_page,
4146        .writepages     = f2fs_write_data_pages,
4147        .write_begin    = f2fs_write_begin,
4148        .write_end      = f2fs_write_end,
4149        .set_page_dirty = f2fs_set_data_page_dirty,
4150        .invalidatepage = f2fs_invalidate_page,
4151        .releasepage    = f2fs_release_page,
4152        .direct_IO      = f2fs_direct_IO,
4153        .bmap           = f2fs_bmap,
4154        .swap_activate  = f2fs_swap_activate,
4155        .swap_deactivate = f2fs_swap_deactivate,
4156#ifdef CONFIG_MIGRATION
4157        .migratepage    = f2fs_migrate_page,
4158#endif
4159};
4160
4161void f2fs_clear_page_cache_dirty_tag(struct page *page)
4162{
4163        struct address_space *mapping = page_mapping(page);
4164        unsigned long flags;
4165
4166        xa_lock_irqsave(&mapping->i_pages, flags);
4167        __xa_clear_mark(&mapping->i_pages, page_index(page),
4168                                                PAGECACHE_TAG_DIRTY);
4169        xa_unlock_irqrestore(&mapping->i_pages, flags);
4170}
4171
4172int __init f2fs_init_post_read_processing(void)
4173{
4174        bio_post_read_ctx_cache =
4175                kmem_cache_create("f2fs_bio_post_read_ctx",
4176                                  sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4177        if (!bio_post_read_ctx_cache)
4178                goto fail;
4179        bio_post_read_ctx_pool =
4180                mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4181                                         bio_post_read_ctx_cache);
4182        if (!bio_post_read_ctx_pool)
4183                goto fail_free_cache;
4184        return 0;
4185
4186fail_free_cache:
4187        kmem_cache_destroy(bio_post_read_ctx_cache);
4188fail:
4189        return -ENOMEM;
4190}
4191
4192void f2fs_destroy_post_read_processing(void)
4193{
4194        mempool_destroy(bio_post_read_ctx_pool);
4195        kmem_cache_destroy(bio_post_read_ctx_cache);
4196}
4197
4198int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4199{
4200        if (!f2fs_sb_has_encrypt(sbi) &&
4201                !f2fs_sb_has_verity(sbi) &&
4202                !f2fs_sb_has_compression(sbi))
4203                return 0;
4204
4205        sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4206                                                 WQ_UNBOUND | WQ_HIGHPRI,
4207                                                 num_online_cpus());
4208        if (!sbi->post_read_wq)
4209                return -ENOMEM;
4210        return 0;
4211}
4212
4213void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4214{
4215        if (sbi->post_read_wq)
4216                destroy_workqueue(sbi->post_read_wq);
4217}
4218
4219int __init f2fs_init_bio_entry_cache(void)
4220{
4221        bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4222                        sizeof(struct bio_entry));
4223        if (!bio_entry_slab)
4224                return -ENOMEM;
4225        return 0;
4226}
4227
4228void f2fs_destroy_bio_entry_cache(void)
4229{
4230        kmem_cache_destroy(bio_entry_slab);
4231}
4232