1
2
3
4
5
6
7
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/writeback.h>
13#include <linux/backing-dev.h>
14#include <linux/pagevec.h>
15#include <linux/blkdev.h>
16#include <linux/bio.h>
17#include <linux/blk-crypto.h>
18#include <linux/swap.h>
19#include <linux/prefetch.h>
20#include <linux/uio.h>
21#include <linux/cleancache.h>
22#include <linux/sched/signal.h>
23#include <linux/fiemap.h>
24
25#include "f2fs.h"
26#include "node.h"
27#include "segment.h"
28#include <trace/events/f2fs.h>
29
30#define NUM_PREALLOC_POST_READ_CTXS 128
31
32static struct kmem_cache *bio_post_read_ctx_cache;
33static struct kmem_cache *bio_entry_slab;
34static mempool_t *bio_post_read_ctx_pool;
35static struct bio_set f2fs_bioset;
36
37#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
38
39int __init f2fs_init_bioset(void)
40{
41 if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
42 0, BIOSET_NEED_BVECS))
43 return -ENOMEM;
44 return 0;
45}
46
47void f2fs_destroy_bioset(void)
48{
49 bioset_exit(&f2fs_bioset);
50}
51
52static bool __is_cp_guaranteed(struct page *page)
53{
54 struct address_space *mapping = page->mapping;
55 struct inode *inode;
56 struct f2fs_sb_info *sbi;
57
58 if (!mapping)
59 return false;
60
61 if (f2fs_is_compressed_page(page))
62 return false;
63
64 inode = mapping->host;
65 sbi = F2FS_I_SB(inode);
66
67 if (inode->i_ino == F2FS_META_INO(sbi) ||
68 inode->i_ino == F2FS_NODE_INO(sbi) ||
69 S_ISDIR(inode->i_mode) ||
70 (S_ISREG(inode->i_mode) &&
71 (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
72 is_cold_data(page))
73 return true;
74 return false;
75}
76
77static enum count_type __read_io_type(struct page *page)
78{
79 struct address_space *mapping = page_file_mapping(page);
80
81 if (mapping) {
82 struct inode *inode = mapping->host;
83 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
84
85 if (inode->i_ino == F2FS_META_INO(sbi))
86 return F2FS_RD_META;
87
88 if (inode->i_ino == F2FS_NODE_INO(sbi))
89 return F2FS_RD_NODE;
90 }
91 return F2FS_RD_DATA;
92}
93
94
95enum bio_post_read_step {
96#ifdef CONFIG_FS_ENCRYPTION
97 STEP_DECRYPT = 1 << 0,
98#else
99 STEP_DECRYPT = 0,
100#endif
101#ifdef CONFIG_F2FS_FS_COMPRESSION
102 STEP_DECOMPRESS = 1 << 1,
103#else
104 STEP_DECOMPRESS = 0,
105#endif
106#ifdef CONFIG_FS_VERITY
107 STEP_VERITY = 1 << 2,
108#else
109 STEP_VERITY = 0,
110#endif
111};
112
113struct bio_post_read_ctx {
114 struct bio *bio;
115 struct f2fs_sb_info *sbi;
116 struct work_struct work;
117 unsigned int enabled_steps;
118};
119
120static void f2fs_finish_read_bio(struct bio *bio)
121{
122 struct bio_vec *bv;
123 struct bvec_iter_all iter_all;
124
125
126
127
128
129 bio_for_each_segment_all(bv, bio, iter_all) {
130 struct page *page = bv->bv_page;
131
132 if (f2fs_is_compressed_page(page)) {
133 if (bio->bi_status)
134 f2fs_end_read_compressed_page(page, true);
135 f2fs_put_page_dic(page);
136 continue;
137 }
138
139
140 if (bio->bi_status || PageError(page)) {
141 ClearPageUptodate(page);
142
143 ClearPageError(page);
144 } else {
145 SetPageUptodate(page);
146 }
147 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
148 unlock_page(page);
149 }
150
151 if (bio->bi_private)
152 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
153 bio_put(bio);
154}
155
156static void f2fs_verify_bio(struct work_struct *work)
157{
158 struct bio_post_read_ctx *ctx =
159 container_of(work, struct bio_post_read_ctx, work);
160 struct bio *bio = ctx->bio;
161 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
162
163
164
165
166
167
168
169
170 mempool_free(ctx, bio_post_read_ctx_pool);
171 bio->bi_private = NULL;
172
173
174
175
176
177 if (may_have_compressed_pages) {
178 struct bio_vec *bv;
179 struct bvec_iter_all iter_all;
180
181 bio_for_each_segment_all(bv, bio, iter_all) {
182 struct page *page = bv->bv_page;
183
184 if (!f2fs_is_compressed_page(page) &&
185 !PageError(page) && !fsverity_verify_page(page))
186 SetPageError(page);
187 }
188 } else {
189 fsverity_verify_bio(bio);
190 }
191
192 f2fs_finish_read_bio(bio);
193}
194
195
196
197
198
199
200
201
202
203
204static void f2fs_verify_and_finish_bio(struct bio *bio)
205{
206 struct bio_post_read_ctx *ctx = bio->bi_private;
207
208 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
209 INIT_WORK(&ctx->work, f2fs_verify_bio);
210 fsverity_enqueue_verify_work(&ctx->work);
211 } else {
212 f2fs_finish_read_bio(bio);
213 }
214}
215
216
217
218
219
220
221
222
223
224
225static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
226{
227 struct bio_vec *bv;
228 struct bvec_iter_all iter_all;
229 bool all_compressed = true;
230
231 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
232 struct page *page = bv->bv_page;
233
234
235 if (f2fs_is_compressed_page(page))
236 f2fs_end_read_compressed_page(page, PageError(page));
237 else
238 all_compressed = false;
239 }
240
241
242
243
244
245
246 if (all_compressed)
247 ctx->enabled_steps &= ~STEP_VERITY;
248}
249
250static void f2fs_post_read_work(struct work_struct *work)
251{
252 struct bio_post_read_ctx *ctx =
253 container_of(work, struct bio_post_read_ctx, work);
254
255 if (ctx->enabled_steps & STEP_DECRYPT)
256 fscrypt_decrypt_bio(ctx->bio);
257
258 if (ctx->enabled_steps & STEP_DECOMPRESS)
259 f2fs_handle_step_decompress(ctx);
260
261 f2fs_verify_and_finish_bio(ctx->bio);
262}
263
264static void f2fs_read_end_io(struct bio *bio)
265{
266 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
267 struct bio_post_read_ctx *ctx = bio->bi_private;
268
269 if (time_to_inject(sbi, FAULT_READ_IO)) {
270 f2fs_show_injection_info(sbi, FAULT_READ_IO);
271 bio->bi_status = BLK_STS_IOERR;
272 }
273
274 if (bio->bi_status) {
275 f2fs_finish_read_bio(bio);
276 return;
277 }
278
279 if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
280 INIT_WORK(&ctx->work, f2fs_post_read_work);
281 queue_work(ctx->sbi->post_read_wq, &ctx->work);
282 } else {
283 f2fs_verify_and_finish_bio(bio);
284 }
285}
286
287static void f2fs_write_end_io(struct bio *bio)
288{
289 struct f2fs_sb_info *sbi = bio->bi_private;
290 struct bio_vec *bvec;
291 struct bvec_iter_all iter_all;
292
293 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
294 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
295 bio->bi_status = BLK_STS_IOERR;
296 }
297
298 bio_for_each_segment_all(bvec, bio, iter_all) {
299 struct page *page = bvec->bv_page;
300 enum count_type type = WB_DATA_TYPE(page);
301
302 if (IS_DUMMY_WRITTEN_PAGE(page)) {
303 set_page_private(page, (unsigned long)NULL);
304 ClearPagePrivate(page);
305 unlock_page(page);
306 mempool_free(page, sbi->write_io_dummy);
307
308 if (unlikely(bio->bi_status))
309 f2fs_stop_checkpoint(sbi, true);
310 continue;
311 }
312
313 fscrypt_finalize_bounce_page(&page);
314
315#ifdef CONFIG_F2FS_FS_COMPRESSION
316 if (f2fs_is_compressed_page(page)) {
317 f2fs_compress_write_end_io(bio, page);
318 continue;
319 }
320#endif
321
322 if (unlikely(bio->bi_status)) {
323 mapping_set_error(page->mapping, -EIO);
324 if (type == F2FS_WB_CP_DATA)
325 f2fs_stop_checkpoint(sbi, true);
326 }
327
328 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
329 page->index != nid_of_node(page));
330
331 dec_page_count(sbi, type);
332 if (f2fs_in_warm_node_list(sbi, page))
333 f2fs_del_fsync_node_entry(sbi, page);
334 clear_cold_data(page);
335 end_page_writeback(page);
336 }
337 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
338 wq_has_sleeper(&sbi->cp_wait))
339 wake_up(&sbi->cp_wait);
340
341 bio_put(bio);
342}
343
344struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
345 block_t blk_addr, struct bio *bio)
346{
347 struct block_device *bdev = sbi->sb->s_bdev;
348 int i;
349
350 if (f2fs_is_multi_device(sbi)) {
351 for (i = 0; i < sbi->s_ndevs; i++) {
352 if (FDEV(i).start_blk <= blk_addr &&
353 FDEV(i).end_blk >= blk_addr) {
354 blk_addr -= FDEV(i).start_blk;
355 bdev = FDEV(i).bdev;
356 break;
357 }
358 }
359 }
360 if (bio) {
361 bio_set_dev(bio, bdev);
362 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
363 }
364 return bdev;
365}
366
367int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
368{
369 int i;
370
371 if (!f2fs_is_multi_device(sbi))
372 return 0;
373
374 for (i = 0; i < sbi->s_ndevs; i++)
375 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
376 return i;
377 return 0;
378}
379
380static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
381{
382 struct f2fs_sb_info *sbi = fio->sbi;
383 struct bio *bio;
384
385 bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
386
387 f2fs_target_device(sbi, fio->new_blkaddr, bio);
388 if (is_read_io(fio->op)) {
389 bio->bi_end_io = f2fs_read_end_io;
390 bio->bi_private = NULL;
391 } else {
392 bio->bi_end_io = f2fs_write_end_io;
393 bio->bi_private = sbi;
394 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
395 fio->type, fio->temp);
396 }
397 if (fio->io_wbc)
398 wbc_init_bio(fio->io_wbc, bio);
399
400 return bio;
401}
402
403static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
404 pgoff_t first_idx,
405 const struct f2fs_io_info *fio,
406 gfp_t gfp_mask)
407{
408
409
410
411
412 if (!fio || !fio->encrypted_page)
413 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
414}
415
416static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
417 pgoff_t next_idx,
418 const struct f2fs_io_info *fio)
419{
420
421
422
423
424 if (fio && fio->encrypted_page)
425 return !bio_has_crypt_ctx(bio);
426
427 return fscrypt_mergeable_bio(bio, inode, next_idx);
428}
429
430static inline void __submit_bio(struct f2fs_sb_info *sbi,
431 struct bio *bio, enum page_type type)
432{
433 if (!is_read_io(bio_op(bio))) {
434 unsigned int start;
435
436 if (type != DATA && type != NODE)
437 goto submit_io;
438
439 if (f2fs_lfs_mode(sbi) && current->plug)
440 blk_finish_plug(current->plug);
441
442 if (!F2FS_IO_ALIGNED(sbi))
443 goto submit_io;
444
445 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
446 start %= F2FS_IO_SIZE(sbi);
447
448 if (start == 0)
449 goto submit_io;
450
451
452 for (; start < F2FS_IO_SIZE(sbi); start++) {
453 struct page *page =
454 mempool_alloc(sbi->write_io_dummy,
455 GFP_NOIO | __GFP_NOFAIL);
456 f2fs_bug_on(sbi, !page);
457
458 zero_user_segment(page, 0, PAGE_SIZE);
459 SetPagePrivate(page);
460 set_page_private(page, DUMMY_WRITTEN_PAGE);
461 lock_page(page);
462 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
463 f2fs_bug_on(sbi, 1);
464 }
465
466
467
468
469 if (type == NODE)
470 set_sbi_flag(sbi, SBI_NEED_CP);
471 }
472submit_io:
473 if (is_read_io(bio_op(bio)))
474 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
475 else
476 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
477 submit_bio(bio);
478}
479
480void f2fs_submit_bio(struct f2fs_sb_info *sbi,
481 struct bio *bio, enum page_type type)
482{
483 __submit_bio(sbi, bio, type);
484}
485
486static void __attach_io_flag(struct f2fs_io_info *fio)
487{
488 struct f2fs_sb_info *sbi = fio->sbi;
489 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
490 unsigned int io_flag, fua_flag, meta_flag;
491
492 if (fio->type == DATA)
493 io_flag = sbi->data_io_flag;
494 else if (fio->type == NODE)
495 io_flag = sbi->node_io_flag;
496 else
497 return;
498
499 fua_flag = io_flag & temp_mask;
500 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
501
502
503
504
505
506
507
508 if ((1 << fio->temp) & meta_flag)
509 fio->op_flags |= REQ_META;
510 if ((1 << fio->temp) & fua_flag)
511 fio->op_flags |= REQ_FUA;
512}
513
514static void __submit_merged_bio(struct f2fs_bio_info *io)
515{
516 struct f2fs_io_info *fio = &io->fio;
517
518 if (!io->bio)
519 return;
520
521 __attach_io_flag(fio);
522 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
523
524 if (is_read_io(fio->op))
525 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
526 else
527 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
528
529 __submit_bio(io->sbi, io->bio, fio->type);
530 io->bio = NULL;
531}
532
533static bool __has_merged_page(struct bio *bio, struct inode *inode,
534 struct page *page, nid_t ino)
535{
536 struct bio_vec *bvec;
537 struct bvec_iter_all iter_all;
538
539 if (!bio)
540 return false;
541
542 if (!inode && !page && !ino)
543 return true;
544
545 bio_for_each_segment_all(bvec, bio, iter_all) {
546 struct page *target = bvec->bv_page;
547
548 if (fscrypt_is_bounce_page(target)) {
549 target = fscrypt_pagecache_page(target);
550 if (IS_ERR(target))
551 continue;
552 }
553 if (f2fs_is_compressed_page(target)) {
554 target = f2fs_compress_control_page(target);
555 if (IS_ERR(target))
556 continue;
557 }
558
559 if (inode && inode == target->mapping->host)
560 return true;
561 if (page && page == target)
562 return true;
563 if (ino && ino == ino_of_node(target))
564 return true;
565 }
566
567 return false;
568}
569
570static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
571 enum page_type type, enum temp_type temp)
572{
573 enum page_type btype = PAGE_TYPE_OF_BIO(type);
574 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
575
576 down_write(&io->io_rwsem);
577
578
579 if (type >= META_FLUSH) {
580 io->fio.type = META_FLUSH;
581 io->fio.op = REQ_OP_WRITE;
582 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
583 if (!test_opt(sbi, NOBARRIER))
584 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
585 }
586 __submit_merged_bio(io);
587 up_write(&io->io_rwsem);
588}
589
590static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
591 struct inode *inode, struct page *page,
592 nid_t ino, enum page_type type, bool force)
593{
594 enum temp_type temp;
595 bool ret = true;
596
597 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
598 if (!force) {
599 enum page_type btype = PAGE_TYPE_OF_BIO(type);
600 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
601
602 down_read(&io->io_rwsem);
603 ret = __has_merged_page(io->bio, inode, page, ino);
604 up_read(&io->io_rwsem);
605 }
606 if (ret)
607 __f2fs_submit_merged_write(sbi, type, temp);
608
609
610 if (type >= META)
611 break;
612 }
613}
614
615void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
616{
617 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
618}
619
620void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
621 struct inode *inode, struct page *page,
622 nid_t ino, enum page_type type)
623{
624 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
625}
626
627void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
628{
629 f2fs_submit_merged_write(sbi, DATA);
630 f2fs_submit_merged_write(sbi, NODE);
631 f2fs_submit_merged_write(sbi, META);
632}
633
634
635
636
637
638int f2fs_submit_page_bio(struct f2fs_io_info *fio)
639{
640 struct bio *bio;
641 struct page *page = fio->encrypted_page ?
642 fio->encrypted_page : fio->page;
643
644 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
645 fio->is_por ? META_POR : (__is_meta_io(fio) ?
646 META_GENERIC : DATA_GENERIC_ENHANCE)))
647 return -EFSCORRUPTED;
648
649 trace_f2fs_submit_page_bio(page, fio);
650
651
652 bio = __bio_alloc(fio, 1);
653
654 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
655 fio->page->index, fio, GFP_NOIO);
656
657 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
658 bio_put(bio);
659 return -EFAULT;
660 }
661
662 if (fio->io_wbc && !is_read_io(fio->op))
663 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
664
665 __attach_io_flag(fio);
666 bio_set_op_attrs(bio, fio->op, fio->op_flags);
667
668 inc_page_count(fio->sbi, is_read_io(fio->op) ?
669 __read_io_type(page): WB_DATA_TYPE(fio->page));
670
671 __submit_bio(fio->sbi, bio, fio->type);
672 return 0;
673}
674
675static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
676 block_t last_blkaddr, block_t cur_blkaddr)
677{
678 if (unlikely(sbi->max_io_bytes &&
679 bio->bi_iter.bi_size >= sbi->max_io_bytes))
680 return false;
681 if (last_blkaddr + 1 != cur_blkaddr)
682 return false;
683 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
684}
685
686static bool io_type_is_mergeable(struct f2fs_bio_info *io,
687 struct f2fs_io_info *fio)
688{
689 if (io->fio.op != fio->op)
690 return false;
691 return io->fio.op_flags == fio->op_flags;
692}
693
694static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
695 struct f2fs_bio_info *io,
696 struct f2fs_io_info *fio,
697 block_t last_blkaddr,
698 block_t cur_blkaddr)
699{
700 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
701 unsigned int filled_blocks =
702 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
703 unsigned int io_size = F2FS_IO_SIZE(sbi);
704 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
705
706
707 if (!(filled_blocks % io_size) && left_vecs < io_size)
708 return false;
709 }
710 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
711 return false;
712 return io_type_is_mergeable(io, fio);
713}
714
715static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
716 struct page *page, enum temp_type temp)
717{
718 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
719 struct bio_entry *be;
720
721 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
722 be->bio = bio;
723 bio_get(bio);
724
725 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
726 f2fs_bug_on(sbi, 1);
727
728 down_write(&io->bio_list_lock);
729 list_add_tail(&be->list, &io->bio_list);
730 up_write(&io->bio_list_lock);
731}
732
733static void del_bio_entry(struct bio_entry *be)
734{
735 list_del(&be->list);
736 kmem_cache_free(bio_entry_slab, be);
737}
738
739static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
740 struct page *page)
741{
742 struct f2fs_sb_info *sbi = fio->sbi;
743 enum temp_type temp;
744 bool found = false;
745 int ret = -EAGAIN;
746
747 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
748 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
749 struct list_head *head = &io->bio_list;
750 struct bio_entry *be;
751
752 down_write(&io->bio_list_lock);
753 list_for_each_entry(be, head, list) {
754 if (be->bio != *bio)
755 continue;
756
757 found = true;
758
759 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
760 *fio->last_block,
761 fio->new_blkaddr));
762 if (f2fs_crypt_mergeable_bio(*bio,
763 fio->page->mapping->host,
764 fio->page->index, fio) &&
765 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
766 PAGE_SIZE) {
767 ret = 0;
768 break;
769 }
770
771
772 del_bio_entry(be);
773 __submit_bio(sbi, *bio, DATA);
774 break;
775 }
776 up_write(&io->bio_list_lock);
777 }
778
779 if (ret) {
780 bio_put(*bio);
781 *bio = NULL;
782 }
783
784 return ret;
785}
786
787void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
788 struct bio **bio, struct page *page)
789{
790 enum temp_type temp;
791 bool found = false;
792 struct bio *target = bio ? *bio : NULL;
793
794 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
795 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
796 struct list_head *head = &io->bio_list;
797 struct bio_entry *be;
798
799 if (list_empty(head))
800 continue;
801
802 down_read(&io->bio_list_lock);
803 list_for_each_entry(be, head, list) {
804 if (target)
805 found = (target == be->bio);
806 else
807 found = __has_merged_page(be->bio, NULL,
808 page, 0);
809 if (found)
810 break;
811 }
812 up_read(&io->bio_list_lock);
813
814 if (!found)
815 continue;
816
817 found = false;
818
819 down_write(&io->bio_list_lock);
820 list_for_each_entry(be, head, list) {
821 if (target)
822 found = (target == be->bio);
823 else
824 found = __has_merged_page(be->bio, NULL,
825 page, 0);
826 if (found) {
827 target = be->bio;
828 del_bio_entry(be);
829 break;
830 }
831 }
832 up_write(&io->bio_list_lock);
833 }
834
835 if (found)
836 __submit_bio(sbi, target, DATA);
837 if (bio && *bio) {
838 bio_put(*bio);
839 *bio = NULL;
840 }
841}
842
843int f2fs_merge_page_bio(struct f2fs_io_info *fio)
844{
845 struct bio *bio = *fio->bio;
846 struct page *page = fio->encrypted_page ?
847 fio->encrypted_page : fio->page;
848
849 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
850 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
851 return -EFSCORRUPTED;
852
853 trace_f2fs_submit_page_bio(page, fio);
854
855 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
856 fio->new_blkaddr))
857 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
858alloc_new:
859 if (!bio) {
860 bio = __bio_alloc(fio, BIO_MAX_VECS);
861 __attach_io_flag(fio);
862 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
863 fio->page->index, fio, GFP_NOIO);
864 bio_set_op_attrs(bio, fio->op, fio->op_flags);
865
866 add_bio_entry(fio->sbi, bio, page, fio->temp);
867 } else {
868 if (add_ipu_page(fio, &bio, page))
869 goto alloc_new;
870 }
871
872 if (fio->io_wbc)
873 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
874
875 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
876
877 *fio->last_block = fio->new_blkaddr;
878 *fio->bio = bio;
879
880 return 0;
881}
882
883void f2fs_submit_page_write(struct f2fs_io_info *fio)
884{
885 struct f2fs_sb_info *sbi = fio->sbi;
886 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
887 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
888 struct page *bio_page;
889
890 f2fs_bug_on(sbi, is_read_io(fio->op));
891
892 down_write(&io->io_rwsem);
893next:
894 if (fio->in_list) {
895 spin_lock(&io->io_lock);
896 if (list_empty(&io->io_list)) {
897 spin_unlock(&io->io_lock);
898 goto out;
899 }
900 fio = list_first_entry(&io->io_list,
901 struct f2fs_io_info, list);
902 list_del(&fio->list);
903 spin_unlock(&io->io_lock);
904 }
905
906 verify_fio_blkaddr(fio);
907
908 if (fio->encrypted_page)
909 bio_page = fio->encrypted_page;
910 else if (fio->compressed_page)
911 bio_page = fio->compressed_page;
912 else
913 bio_page = fio->page;
914
915
916 fio->submitted = true;
917
918 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
919
920 if (io->bio &&
921 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
922 fio->new_blkaddr) ||
923 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
924 bio_page->index, fio)))
925 __submit_merged_bio(io);
926alloc_new:
927 if (io->bio == NULL) {
928 if (F2FS_IO_ALIGNED(sbi) &&
929 (fio->type == DATA || fio->type == NODE) &&
930 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
931 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
932 fio->retry = true;
933 goto skip;
934 }
935 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
936 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
937 bio_page->index, fio, GFP_NOIO);
938 io->fio = *fio;
939 }
940
941 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
942 __submit_merged_bio(io);
943 goto alloc_new;
944 }
945
946 if (fio->io_wbc)
947 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
948
949 io->last_block_in_bio = fio->new_blkaddr;
950
951 trace_f2fs_submit_page_write(fio->page, fio);
952skip:
953 if (fio->in_list)
954 goto next;
955out:
956 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
957 !f2fs_is_checkpoint_ready(sbi))
958 __submit_merged_bio(io);
959 up_write(&io->io_rwsem);
960}
961
962static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
963 unsigned nr_pages, unsigned op_flag,
964 pgoff_t first_idx, bool for_write)
965{
966 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
967 struct bio *bio;
968 struct bio_post_read_ctx *ctx;
969 unsigned int post_read_steps = 0;
970
971 bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
972 bio_max_segs(nr_pages), &f2fs_bioset);
973 if (!bio)
974 return ERR_PTR(-ENOMEM);
975
976 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
977
978 f2fs_target_device(sbi, blkaddr, bio);
979 bio->bi_end_io = f2fs_read_end_io;
980 bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
981
982 if (fscrypt_inode_uses_fs_layer_crypto(inode))
983 post_read_steps |= STEP_DECRYPT;
984
985 if (f2fs_need_verity(inode, first_idx))
986 post_read_steps |= STEP_VERITY;
987
988
989
990
991
992
993
994
995 if (post_read_steps || f2fs_compressed_file(inode)) {
996
997 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
998 ctx->bio = bio;
999 ctx->sbi = sbi;
1000 ctx->enabled_steps = post_read_steps;
1001 bio->bi_private = ctx;
1002 }
1003
1004 return bio;
1005}
1006
1007
1008static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1009 block_t blkaddr, int op_flags, bool for_write)
1010{
1011 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1012 struct bio *bio;
1013
1014 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1015 page->index, for_write);
1016 if (IS_ERR(bio))
1017 return PTR_ERR(bio);
1018
1019
1020 f2fs_wait_on_block_writeback(inode, blkaddr);
1021
1022 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1023 bio_put(bio);
1024 return -EFAULT;
1025 }
1026 ClearPageError(page);
1027 inc_page_count(sbi, F2FS_RD_DATA);
1028 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1029 __submit_bio(sbi, bio, DATA);
1030 return 0;
1031}
1032
1033static void __set_data_blkaddr(struct dnode_of_data *dn)
1034{
1035 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1036 __le32 *addr_array;
1037 int base = 0;
1038
1039 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1040 base = get_extra_isize(dn->inode);
1041
1042
1043 addr_array = blkaddr_in_node(rn);
1044 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1045}
1046
1047
1048
1049
1050
1051
1052
1053void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1054{
1055 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1056 __set_data_blkaddr(dn);
1057 if (set_page_dirty(dn->node_page))
1058 dn->node_changed = true;
1059}
1060
1061void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1062{
1063 dn->data_blkaddr = blkaddr;
1064 f2fs_set_data_blkaddr(dn);
1065 f2fs_update_extent_cache(dn);
1066}
1067
1068
1069int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1070{
1071 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1072 int err;
1073
1074 if (!count)
1075 return 0;
1076
1077 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1078 return -EPERM;
1079 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1080 return err;
1081
1082 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1083 dn->ofs_in_node, count);
1084
1085 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1086
1087 for (; count > 0; dn->ofs_in_node++) {
1088 block_t blkaddr = f2fs_data_blkaddr(dn);
1089
1090 if (blkaddr == NULL_ADDR) {
1091 dn->data_blkaddr = NEW_ADDR;
1092 __set_data_blkaddr(dn);
1093 count--;
1094 }
1095 }
1096
1097 if (set_page_dirty(dn->node_page))
1098 dn->node_changed = true;
1099 return 0;
1100}
1101
1102
1103int f2fs_reserve_new_block(struct dnode_of_data *dn)
1104{
1105 unsigned int ofs_in_node = dn->ofs_in_node;
1106 int ret;
1107
1108 ret = f2fs_reserve_new_blocks(dn, 1);
1109 dn->ofs_in_node = ofs_in_node;
1110 return ret;
1111}
1112
1113int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1114{
1115 bool need_put = dn->inode_page ? false : true;
1116 int err;
1117
1118 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1119 if (err)
1120 return err;
1121
1122 if (dn->data_blkaddr == NULL_ADDR)
1123 err = f2fs_reserve_new_block(dn);
1124 if (err || need_put)
1125 f2fs_put_dnode(dn);
1126 return err;
1127}
1128
1129int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1130{
1131 struct extent_info ei = {0, 0, 0};
1132 struct inode *inode = dn->inode;
1133
1134 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1135 dn->data_blkaddr = ei.blk + index - ei.fofs;
1136 return 0;
1137 }
1138
1139 return f2fs_reserve_block(dn, index);
1140}
1141
1142struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1143 int op_flags, bool for_write)
1144{
1145 struct address_space *mapping = inode->i_mapping;
1146 struct dnode_of_data dn;
1147 struct page *page;
1148 struct extent_info ei = {0,0,0};
1149 int err;
1150
1151 page = f2fs_grab_cache_page(mapping, index, for_write);
1152 if (!page)
1153 return ERR_PTR(-ENOMEM);
1154
1155 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1156 dn.data_blkaddr = ei.blk + index - ei.fofs;
1157 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1158 DATA_GENERIC_ENHANCE_READ)) {
1159 err = -EFSCORRUPTED;
1160 goto put_err;
1161 }
1162 goto got_it;
1163 }
1164
1165 set_new_dnode(&dn, inode, NULL, NULL, 0);
1166 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1167 if (err)
1168 goto put_err;
1169 f2fs_put_dnode(&dn);
1170
1171 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1172 err = -ENOENT;
1173 goto put_err;
1174 }
1175 if (dn.data_blkaddr != NEW_ADDR &&
1176 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1177 dn.data_blkaddr,
1178 DATA_GENERIC_ENHANCE)) {
1179 err = -EFSCORRUPTED;
1180 goto put_err;
1181 }
1182got_it:
1183 if (PageUptodate(page)) {
1184 unlock_page(page);
1185 return page;
1186 }
1187
1188
1189
1190
1191
1192
1193
1194
1195 if (dn.data_blkaddr == NEW_ADDR) {
1196 zero_user_segment(page, 0, PAGE_SIZE);
1197 if (!PageUptodate(page))
1198 SetPageUptodate(page);
1199 unlock_page(page);
1200 return page;
1201 }
1202
1203 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1204 op_flags, for_write);
1205 if (err)
1206 goto put_err;
1207 return page;
1208
1209put_err:
1210 f2fs_put_page(page, 1);
1211 return ERR_PTR(err);
1212}
1213
1214struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1215{
1216 struct address_space *mapping = inode->i_mapping;
1217 struct page *page;
1218
1219 page = find_get_page(mapping, index);
1220 if (page && PageUptodate(page))
1221 return page;
1222 f2fs_put_page(page, 0);
1223
1224 page = f2fs_get_read_data_page(inode, index, 0, false);
1225 if (IS_ERR(page))
1226 return page;
1227
1228 if (PageUptodate(page))
1229 return page;
1230
1231 wait_on_page_locked(page);
1232 if (unlikely(!PageUptodate(page))) {
1233 f2fs_put_page(page, 0);
1234 return ERR_PTR(-EIO);
1235 }
1236 return page;
1237}
1238
1239
1240
1241
1242
1243
1244struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1245 bool for_write)
1246{
1247 struct address_space *mapping = inode->i_mapping;
1248 struct page *page;
1249repeat:
1250 page = f2fs_get_read_data_page(inode, index, 0, for_write);
1251 if (IS_ERR(page))
1252 return page;
1253
1254
1255 lock_page(page);
1256 if (unlikely(page->mapping != mapping)) {
1257 f2fs_put_page(page, 1);
1258 goto repeat;
1259 }
1260 if (unlikely(!PageUptodate(page))) {
1261 f2fs_put_page(page, 1);
1262 return ERR_PTR(-EIO);
1263 }
1264 return page;
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276struct page *f2fs_get_new_data_page(struct inode *inode,
1277 struct page *ipage, pgoff_t index, bool new_i_size)
1278{
1279 struct address_space *mapping = inode->i_mapping;
1280 struct page *page;
1281 struct dnode_of_data dn;
1282 int err;
1283
1284 page = f2fs_grab_cache_page(mapping, index, true);
1285 if (!page) {
1286
1287
1288
1289
1290 f2fs_put_page(ipage, 1);
1291 return ERR_PTR(-ENOMEM);
1292 }
1293
1294 set_new_dnode(&dn, inode, ipage, NULL, 0);
1295 err = f2fs_reserve_block(&dn, index);
1296 if (err) {
1297 f2fs_put_page(page, 1);
1298 return ERR_PTR(err);
1299 }
1300 if (!ipage)
1301 f2fs_put_dnode(&dn);
1302
1303 if (PageUptodate(page))
1304 goto got_it;
1305
1306 if (dn.data_blkaddr == NEW_ADDR) {
1307 zero_user_segment(page, 0, PAGE_SIZE);
1308 if (!PageUptodate(page))
1309 SetPageUptodate(page);
1310 } else {
1311 f2fs_put_page(page, 1);
1312
1313
1314 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1315 page = f2fs_get_lock_data_page(inode, index, true);
1316 if (IS_ERR(page))
1317 return page;
1318 }
1319got_it:
1320 if (new_i_size && i_size_read(inode) <
1321 ((loff_t)(index + 1) << PAGE_SHIFT))
1322 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1323 return page;
1324}
1325
1326static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1327{
1328 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1329 struct f2fs_summary sum;
1330 struct node_info ni;
1331 block_t old_blkaddr;
1332 blkcnt_t count = 1;
1333 int err;
1334
1335 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1336 return -EPERM;
1337
1338 err = f2fs_get_node_info(sbi, dn->nid, &ni);
1339 if (err)
1340 return err;
1341
1342 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1343 if (dn->data_blkaddr != NULL_ADDR)
1344 goto alloc;
1345
1346 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1347 return err;
1348
1349alloc:
1350 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1351 old_blkaddr = dn->data_blkaddr;
1352 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1353 &sum, seg_type, NULL);
1354 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1355 invalidate_mapping_pages(META_MAPPING(sbi),
1356 old_blkaddr, old_blkaddr);
1357 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1358
1359
1360
1361
1362
1363 return 0;
1364}
1365
1366int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1367{
1368 struct inode *inode = file_inode(iocb->ki_filp);
1369 struct f2fs_map_blocks map;
1370 int flag;
1371 int err = 0;
1372 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1373
1374 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1375 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1376 if (map.m_len > map.m_lblk)
1377 map.m_len -= map.m_lblk;
1378 else
1379 map.m_len = 0;
1380
1381 map.m_next_pgofs = NULL;
1382 map.m_next_extent = NULL;
1383 map.m_seg_type = NO_CHECK_TYPE;
1384 map.m_may_create = true;
1385
1386 if (direct_io) {
1387 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1388 flag = f2fs_force_buffered_io(inode, iocb, from) ?
1389 F2FS_GET_BLOCK_PRE_AIO :
1390 F2FS_GET_BLOCK_PRE_DIO;
1391 goto map_blocks;
1392 }
1393 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1394 err = f2fs_convert_inline_inode(inode);
1395 if (err)
1396 return err;
1397 }
1398 if (f2fs_has_inline_data(inode))
1399 return err;
1400
1401 flag = F2FS_GET_BLOCK_PRE_AIO;
1402
1403map_blocks:
1404 err = f2fs_map_blocks(inode, &map, 1, flag);
1405 if (map.m_len > 0 && err == -ENOSPC) {
1406 if (!direct_io)
1407 set_inode_flag(inode, FI_NO_PREALLOC);
1408 err = 0;
1409 }
1410 return err;
1411}
1412
1413void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1414{
1415 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1416 if (lock)
1417 down_read(&sbi->node_change);
1418 else
1419 up_read(&sbi->node_change);
1420 } else {
1421 if (lock)
1422 f2fs_lock_op(sbi);
1423 else
1424 f2fs_unlock_op(sbi);
1425 }
1426}
1427
1428
1429
1430
1431
1432
1433int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1434 int create, int flag)
1435{
1436 unsigned int maxblocks = map->m_len;
1437 struct dnode_of_data dn;
1438 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1439 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1440 pgoff_t pgofs, end_offset, end;
1441 int err = 0, ofs = 1;
1442 unsigned int ofs_in_node, last_ofs_in_node;
1443 blkcnt_t prealloc;
1444 struct extent_info ei = {0,0,0};
1445 block_t blkaddr;
1446 unsigned int start_pgofs;
1447
1448 if (!maxblocks)
1449 return 0;
1450
1451 map->m_len = 0;
1452 map->m_flags = 0;
1453
1454
1455 pgofs = (pgoff_t)map->m_lblk;
1456 end = pgofs + maxblocks;
1457
1458 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1459 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1460 map->m_may_create)
1461 goto next_dnode;
1462
1463 map->m_pblk = ei.blk + pgofs - ei.fofs;
1464 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1465 map->m_flags = F2FS_MAP_MAPPED;
1466 if (map->m_next_extent)
1467 *map->m_next_extent = pgofs + map->m_len;
1468
1469
1470 if (flag == F2FS_GET_BLOCK_DIO)
1471 f2fs_wait_on_block_writeback_range(inode,
1472 map->m_pblk, map->m_len);
1473 goto out;
1474 }
1475
1476next_dnode:
1477 if (map->m_may_create)
1478 f2fs_do_map_lock(sbi, flag, true);
1479
1480
1481 set_new_dnode(&dn, inode, NULL, NULL, 0);
1482 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1483 if (err) {
1484 if (flag == F2FS_GET_BLOCK_BMAP)
1485 map->m_pblk = 0;
1486 if (err == -ENOENT) {
1487 err = 0;
1488 if (map->m_next_pgofs)
1489 *map->m_next_pgofs =
1490 f2fs_get_next_page_offset(&dn, pgofs);
1491 if (map->m_next_extent)
1492 *map->m_next_extent =
1493 f2fs_get_next_page_offset(&dn, pgofs);
1494 }
1495 goto unlock_out;
1496 }
1497
1498 start_pgofs = pgofs;
1499 prealloc = 0;
1500 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1501 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1502
1503next_block:
1504 blkaddr = f2fs_data_blkaddr(&dn);
1505
1506 if (__is_valid_data_blkaddr(blkaddr) &&
1507 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1508 err = -EFSCORRUPTED;
1509 goto sync_out;
1510 }
1511
1512 if (__is_valid_data_blkaddr(blkaddr)) {
1513
1514 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1515 map->m_may_create) {
1516 err = __allocate_data_block(&dn, map->m_seg_type);
1517 if (err)
1518 goto sync_out;
1519 blkaddr = dn.data_blkaddr;
1520 set_inode_flag(inode, FI_APPEND_WRITE);
1521 }
1522 } else {
1523 if (create) {
1524 if (unlikely(f2fs_cp_error(sbi))) {
1525 err = -EIO;
1526 goto sync_out;
1527 }
1528 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1529 if (blkaddr == NULL_ADDR) {
1530 prealloc++;
1531 last_ofs_in_node = dn.ofs_in_node;
1532 }
1533 } else {
1534 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1535 flag != F2FS_GET_BLOCK_DIO);
1536 err = __allocate_data_block(&dn,
1537 map->m_seg_type);
1538 if (!err)
1539 set_inode_flag(inode, FI_APPEND_WRITE);
1540 }
1541 if (err)
1542 goto sync_out;
1543 map->m_flags |= F2FS_MAP_NEW;
1544 blkaddr = dn.data_blkaddr;
1545 } else {
1546 if (flag == F2FS_GET_BLOCK_BMAP) {
1547 map->m_pblk = 0;
1548 goto sync_out;
1549 }
1550 if (flag == F2FS_GET_BLOCK_PRECACHE)
1551 goto sync_out;
1552 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1553 blkaddr == NULL_ADDR) {
1554 if (map->m_next_pgofs)
1555 *map->m_next_pgofs = pgofs + 1;
1556 goto sync_out;
1557 }
1558 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1559
1560 if (map->m_next_pgofs)
1561 *map->m_next_pgofs = pgofs + 1;
1562 goto sync_out;
1563 }
1564 }
1565 }
1566
1567 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1568 goto skip;
1569
1570 if (map->m_len == 0) {
1571
1572 if (blkaddr == NEW_ADDR)
1573 map->m_flags |= F2FS_MAP_UNWRITTEN;
1574 map->m_flags |= F2FS_MAP_MAPPED;
1575
1576 map->m_pblk = blkaddr;
1577 map->m_len = 1;
1578 } else if ((map->m_pblk != NEW_ADDR &&
1579 blkaddr == (map->m_pblk + ofs)) ||
1580 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1581 flag == F2FS_GET_BLOCK_PRE_DIO) {
1582 ofs++;
1583 map->m_len++;
1584 } else {
1585 goto sync_out;
1586 }
1587
1588skip:
1589 dn.ofs_in_node++;
1590 pgofs++;
1591
1592
1593 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1594 (pgofs == end || dn.ofs_in_node == end_offset)) {
1595
1596 dn.ofs_in_node = ofs_in_node;
1597 err = f2fs_reserve_new_blocks(&dn, prealloc);
1598 if (err)
1599 goto sync_out;
1600
1601 map->m_len += dn.ofs_in_node - ofs_in_node;
1602 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1603 err = -ENOSPC;
1604 goto sync_out;
1605 }
1606 dn.ofs_in_node = end_offset;
1607 }
1608
1609 if (pgofs >= end)
1610 goto sync_out;
1611 else if (dn.ofs_in_node < end_offset)
1612 goto next_block;
1613
1614 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1615 if (map->m_flags & F2FS_MAP_MAPPED) {
1616 unsigned int ofs = start_pgofs - map->m_lblk;
1617
1618 f2fs_update_extent_cache_range(&dn,
1619 start_pgofs, map->m_pblk + ofs,
1620 map->m_len - ofs);
1621 }
1622 }
1623
1624 f2fs_put_dnode(&dn);
1625
1626 if (map->m_may_create) {
1627 f2fs_do_map_lock(sbi, flag, false);
1628 f2fs_balance_fs(sbi, dn.node_changed);
1629 }
1630 goto next_dnode;
1631
1632sync_out:
1633
1634
1635 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1636 f2fs_wait_on_block_writeback_range(inode,
1637 map->m_pblk, map->m_len);
1638
1639 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1640 if (map->m_flags & F2FS_MAP_MAPPED) {
1641 unsigned int ofs = start_pgofs - map->m_lblk;
1642
1643 f2fs_update_extent_cache_range(&dn,
1644 start_pgofs, map->m_pblk + ofs,
1645 map->m_len - ofs);
1646 }
1647 if (map->m_next_extent)
1648 *map->m_next_extent = pgofs + 1;
1649 }
1650 f2fs_put_dnode(&dn);
1651unlock_out:
1652 if (map->m_may_create) {
1653 f2fs_do_map_lock(sbi, flag, false);
1654 f2fs_balance_fs(sbi, dn.node_changed);
1655 }
1656out:
1657 trace_f2fs_map_blocks(inode, map, err);
1658 return err;
1659}
1660
1661bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1662{
1663 struct f2fs_map_blocks map;
1664 block_t last_lblk;
1665 int err;
1666
1667 if (pos + len > i_size_read(inode))
1668 return false;
1669
1670 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1671 map.m_next_pgofs = NULL;
1672 map.m_next_extent = NULL;
1673 map.m_seg_type = NO_CHECK_TYPE;
1674 map.m_may_create = false;
1675 last_lblk = F2FS_BLK_ALIGN(pos + len);
1676
1677 while (map.m_lblk < last_lblk) {
1678 map.m_len = last_lblk - map.m_lblk;
1679 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1680 if (err || map.m_len == 0)
1681 return false;
1682 map.m_lblk += map.m_len;
1683 }
1684 return true;
1685}
1686
1687static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1688{
1689 return (bytes >> inode->i_blkbits);
1690}
1691
1692static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1693{
1694 return (blks << inode->i_blkbits);
1695}
1696
1697static int __get_data_block(struct inode *inode, sector_t iblock,
1698 struct buffer_head *bh, int create, int flag,
1699 pgoff_t *next_pgofs, int seg_type, bool may_write)
1700{
1701 struct f2fs_map_blocks map;
1702 int err;
1703
1704 map.m_lblk = iblock;
1705 map.m_len = bytes_to_blks(inode, bh->b_size);
1706 map.m_next_pgofs = next_pgofs;
1707 map.m_next_extent = NULL;
1708 map.m_seg_type = seg_type;
1709 map.m_may_create = may_write;
1710
1711 err = f2fs_map_blocks(inode, &map, create, flag);
1712 if (!err) {
1713 map_bh(bh, inode->i_sb, map.m_pblk);
1714 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1715 bh->b_size = blks_to_bytes(inode, map.m_len);
1716 }
1717 return err;
1718}
1719
1720static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1721 struct buffer_head *bh_result, int create)
1722{
1723 return __get_data_block(inode, iblock, bh_result, create,
1724 F2FS_GET_BLOCK_DIO, NULL,
1725 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1726 true);
1727}
1728
1729static int get_data_block_dio(struct inode *inode, sector_t iblock,
1730 struct buffer_head *bh_result, int create)
1731{
1732 return __get_data_block(inode, iblock, bh_result, create,
1733 F2FS_GET_BLOCK_DIO, NULL,
1734 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1735 false);
1736}
1737
1738static int f2fs_xattr_fiemap(struct inode *inode,
1739 struct fiemap_extent_info *fieinfo)
1740{
1741 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1742 struct page *page;
1743 struct node_info ni;
1744 __u64 phys = 0, len;
1745 __u32 flags;
1746 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1747 int err = 0;
1748
1749 if (f2fs_has_inline_xattr(inode)) {
1750 int offset;
1751
1752 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1753 inode->i_ino, false);
1754 if (!page)
1755 return -ENOMEM;
1756
1757 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1758 if (err) {
1759 f2fs_put_page(page, 1);
1760 return err;
1761 }
1762
1763 phys = blks_to_bytes(inode, ni.blk_addr);
1764 offset = offsetof(struct f2fs_inode, i_addr) +
1765 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1766 get_inline_xattr_addrs(inode));
1767
1768 phys += offset;
1769 len = inline_xattr_size(inode);
1770
1771 f2fs_put_page(page, 1);
1772
1773 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1774
1775 if (!xnid)
1776 flags |= FIEMAP_EXTENT_LAST;
1777
1778 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1779 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1780 if (err || err == 1)
1781 return err;
1782 }
1783
1784 if (xnid) {
1785 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1786 if (!page)
1787 return -ENOMEM;
1788
1789 err = f2fs_get_node_info(sbi, xnid, &ni);
1790 if (err) {
1791 f2fs_put_page(page, 1);
1792 return err;
1793 }
1794
1795 phys = blks_to_bytes(inode, ni.blk_addr);
1796 len = inode->i_sb->s_blocksize;
1797
1798 f2fs_put_page(page, 1);
1799
1800 flags = FIEMAP_EXTENT_LAST;
1801 }
1802
1803 if (phys) {
1804 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1805 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1806 }
1807
1808 return (err < 0 ? err : 0);
1809}
1810
1811static loff_t max_inode_blocks(struct inode *inode)
1812{
1813 loff_t result = ADDRS_PER_INODE(inode);
1814 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1815
1816
1817 result += (leaf_count * 2);
1818
1819
1820 leaf_count *= NIDS_PER_BLOCK;
1821 result += (leaf_count * 2);
1822
1823
1824 leaf_count *= NIDS_PER_BLOCK;
1825 result += leaf_count;
1826
1827 return result;
1828}
1829
1830int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1831 u64 start, u64 len)
1832{
1833 struct f2fs_map_blocks map;
1834 sector_t start_blk, last_blk;
1835 pgoff_t next_pgofs;
1836 u64 logical = 0, phys = 0, size = 0;
1837 u32 flags = 0;
1838 int ret = 0;
1839 bool compr_cluster = false;
1840 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1841 loff_t maxbytes;
1842
1843 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1844 ret = f2fs_precache_extents(inode);
1845 if (ret)
1846 return ret;
1847 }
1848
1849 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1850 if (ret)
1851 return ret;
1852
1853 inode_lock(inode);
1854
1855 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
1856 if (start > maxbytes) {
1857 ret = -EFBIG;
1858 goto out;
1859 }
1860
1861 if (len > maxbytes || (maxbytes - len) < start)
1862 len = maxbytes - start;
1863
1864 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1865 ret = f2fs_xattr_fiemap(inode, fieinfo);
1866 goto out;
1867 }
1868
1869 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1870 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1871 if (ret != -EAGAIN)
1872 goto out;
1873 }
1874
1875 if (bytes_to_blks(inode, len) == 0)
1876 len = blks_to_bytes(inode, 1);
1877
1878 start_blk = bytes_to_blks(inode, start);
1879 last_blk = bytes_to_blks(inode, start + len - 1);
1880
1881next:
1882 memset(&map, 0, sizeof(map));
1883 map.m_lblk = start_blk;
1884 map.m_len = bytes_to_blks(inode, len);
1885 map.m_next_pgofs = &next_pgofs;
1886 map.m_seg_type = NO_CHECK_TYPE;
1887
1888 if (compr_cluster)
1889 map.m_len = cluster_size - 1;
1890
1891 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
1892 if (ret)
1893 goto out;
1894
1895
1896 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1897 start_blk = next_pgofs;
1898
1899 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
1900 max_inode_blocks(inode)))
1901 goto prep_next;
1902
1903 flags |= FIEMAP_EXTENT_LAST;
1904 }
1905
1906 if (size) {
1907 flags |= FIEMAP_EXTENT_MERGED;
1908 if (IS_ENCRYPTED(inode))
1909 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1910
1911 ret = fiemap_fill_next_extent(fieinfo, logical,
1912 phys, size, flags);
1913 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
1914 if (ret)
1915 goto out;
1916 size = 0;
1917 }
1918
1919 if (start_blk > last_blk)
1920 goto out;
1921
1922 if (compr_cluster) {
1923 compr_cluster = false;
1924
1925
1926 logical = blks_to_bytes(inode, start_blk - 1);
1927 phys = blks_to_bytes(inode, map.m_pblk);
1928 size = blks_to_bytes(inode, cluster_size);
1929
1930 flags |= FIEMAP_EXTENT_ENCODED;
1931
1932 start_blk += cluster_size - 1;
1933
1934 if (start_blk > last_blk)
1935 goto out;
1936
1937 goto prep_next;
1938 }
1939
1940 if (map.m_pblk == COMPRESS_ADDR) {
1941 compr_cluster = true;
1942 start_blk++;
1943 goto prep_next;
1944 }
1945
1946 logical = blks_to_bytes(inode, start_blk);
1947 phys = blks_to_bytes(inode, map.m_pblk);
1948 size = blks_to_bytes(inode, map.m_len);
1949 flags = 0;
1950 if (map.m_flags & F2FS_MAP_UNWRITTEN)
1951 flags = FIEMAP_EXTENT_UNWRITTEN;
1952
1953 start_blk += bytes_to_blks(inode, size);
1954
1955prep_next:
1956 cond_resched();
1957 if (fatal_signal_pending(current))
1958 ret = -EINTR;
1959 else
1960 goto next;
1961out:
1962 if (ret == 1)
1963 ret = 0;
1964
1965 inode_unlock(inode);
1966 return ret;
1967}
1968
1969static inline loff_t f2fs_readpage_limit(struct inode *inode)
1970{
1971 if (IS_ENABLED(CONFIG_FS_VERITY) &&
1972 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
1973 return inode->i_sb->s_maxbytes;
1974
1975 return i_size_read(inode);
1976}
1977
1978static int f2fs_read_single_page(struct inode *inode, struct page *page,
1979 unsigned nr_pages,
1980 struct f2fs_map_blocks *map,
1981 struct bio **bio_ret,
1982 sector_t *last_block_in_bio,
1983 bool is_readahead)
1984{
1985 struct bio *bio = *bio_ret;
1986 const unsigned blocksize = blks_to_bytes(inode, 1);
1987 sector_t block_in_file;
1988 sector_t last_block;
1989 sector_t last_block_in_file;
1990 sector_t block_nr;
1991 int ret = 0;
1992
1993 block_in_file = (sector_t)page_index(page);
1994 last_block = block_in_file + nr_pages;
1995 last_block_in_file = bytes_to_blks(inode,
1996 f2fs_readpage_limit(inode) + blocksize - 1);
1997 if (last_block > last_block_in_file)
1998 last_block = last_block_in_file;
1999
2000
2001 if (block_in_file >= last_block)
2002 goto zero_out;
2003
2004
2005
2006 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2007 block_in_file > map->m_lblk &&
2008 block_in_file < (map->m_lblk + map->m_len))
2009 goto got_it;
2010
2011
2012
2013
2014
2015 map->m_lblk = block_in_file;
2016 map->m_len = last_block - block_in_file;
2017
2018 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2019 if (ret)
2020 goto out;
2021got_it:
2022 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2023 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2024 SetPageMappedToDisk(page);
2025
2026 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2027 !cleancache_get_page(page))) {
2028 SetPageUptodate(page);
2029 goto confused;
2030 }
2031
2032 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2033 DATA_GENERIC_ENHANCE_READ)) {
2034 ret = -EFSCORRUPTED;
2035 goto out;
2036 }
2037 } else {
2038zero_out:
2039 zero_user_segment(page, 0, PAGE_SIZE);
2040 if (f2fs_need_verity(inode, page->index) &&
2041 !fsverity_verify_page(page)) {
2042 ret = -EIO;
2043 goto out;
2044 }
2045 if (!PageUptodate(page))
2046 SetPageUptodate(page);
2047 unlock_page(page);
2048 goto out;
2049 }
2050
2051
2052
2053
2054
2055 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2056 *last_block_in_bio, block_nr) ||
2057 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2058submit_and_realloc:
2059 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2060 bio = NULL;
2061 }
2062 if (bio == NULL) {
2063 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2064 is_readahead ? REQ_RAHEAD : 0, page->index,
2065 false);
2066 if (IS_ERR(bio)) {
2067 ret = PTR_ERR(bio);
2068 bio = NULL;
2069 goto out;
2070 }
2071 }
2072
2073
2074
2075
2076
2077 f2fs_wait_on_block_writeback(inode, block_nr);
2078
2079 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2080 goto submit_and_realloc;
2081
2082 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2083 f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2084 ClearPageError(page);
2085 *last_block_in_bio = block_nr;
2086 goto out;
2087confused:
2088 if (bio) {
2089 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2090 bio = NULL;
2091 }
2092 unlock_page(page);
2093out:
2094 *bio_ret = bio;
2095 return ret;
2096}
2097
2098#ifdef CONFIG_F2FS_FS_COMPRESSION
2099int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2100 unsigned nr_pages, sector_t *last_block_in_bio,
2101 bool is_readahead, bool for_write)
2102{
2103 struct dnode_of_data dn;
2104 struct inode *inode = cc->inode;
2105 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2106 struct bio *bio = *bio_ret;
2107 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2108 sector_t last_block_in_file;
2109 const unsigned blocksize = blks_to_bytes(inode, 1);
2110 struct decompress_io_ctx *dic = NULL;
2111 int i;
2112 int ret = 0;
2113
2114 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2115
2116 last_block_in_file = bytes_to_blks(inode,
2117 f2fs_readpage_limit(inode) + blocksize - 1);
2118
2119
2120 for (i = 0; i < cc->cluster_size; i++) {
2121 struct page *page = cc->rpages[i];
2122
2123 if (!page)
2124 continue;
2125 if ((sector_t)page->index >= last_block_in_file) {
2126 zero_user_segment(page, 0, PAGE_SIZE);
2127 if (!PageUptodate(page))
2128 SetPageUptodate(page);
2129 } else if (!PageUptodate(page)) {
2130 continue;
2131 }
2132 unlock_page(page);
2133 cc->rpages[i] = NULL;
2134 cc->nr_rpages--;
2135 }
2136
2137
2138 if (f2fs_cluster_is_empty(cc))
2139 goto out;
2140
2141 set_new_dnode(&dn, inode, NULL, NULL, 0);
2142 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2143 if (ret)
2144 goto out;
2145
2146 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2147
2148 for (i = 1; i < cc->cluster_size; i++) {
2149 block_t blkaddr;
2150
2151 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2152 dn.ofs_in_node + i);
2153
2154 if (!__is_valid_data_blkaddr(blkaddr))
2155 break;
2156
2157 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2158 ret = -EFAULT;
2159 goto out_put_dnode;
2160 }
2161 cc->nr_cpages++;
2162 }
2163
2164
2165 if (cc->nr_cpages == 0) {
2166 ret = 0;
2167 goto out_put_dnode;
2168 }
2169
2170 dic = f2fs_alloc_dic(cc);
2171 if (IS_ERR(dic)) {
2172 ret = PTR_ERR(dic);
2173 goto out_put_dnode;
2174 }
2175
2176 for (i = 0; i < dic->nr_cpages; i++) {
2177 struct page *page = dic->cpages[i];
2178 block_t blkaddr;
2179 struct bio_post_read_ctx *ctx;
2180
2181 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2182 dn.ofs_in_node + i + 1);
2183
2184 if (bio && (!page_is_mergeable(sbi, bio,
2185 *last_block_in_bio, blkaddr) ||
2186 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2187submit_and_realloc:
2188 __submit_bio(sbi, bio, DATA);
2189 bio = NULL;
2190 }
2191
2192 if (!bio) {
2193 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2194 is_readahead ? REQ_RAHEAD : 0,
2195 page->index, for_write);
2196 if (IS_ERR(bio)) {
2197 ret = PTR_ERR(bio);
2198 f2fs_decompress_end_io(dic, ret);
2199 f2fs_put_dnode(&dn);
2200 *bio_ret = NULL;
2201 return ret;
2202 }
2203 }
2204
2205 f2fs_wait_on_block_writeback(inode, blkaddr);
2206
2207 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2208 goto submit_and_realloc;
2209
2210 ctx = bio->bi_private;
2211 ctx->enabled_steps |= STEP_DECOMPRESS;
2212 refcount_inc(&dic->refcnt);
2213
2214 inc_page_count(sbi, F2FS_RD_DATA);
2215 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2216 f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2217 ClearPageError(page);
2218 *last_block_in_bio = blkaddr;
2219 }
2220
2221 f2fs_put_dnode(&dn);
2222
2223 *bio_ret = bio;
2224 return 0;
2225
2226out_put_dnode:
2227 f2fs_put_dnode(&dn);
2228out:
2229 for (i = 0; i < cc->cluster_size; i++) {
2230 if (cc->rpages[i]) {
2231 ClearPageUptodate(cc->rpages[i]);
2232 ClearPageError(cc->rpages[i]);
2233 unlock_page(cc->rpages[i]);
2234 }
2235 }
2236 *bio_ret = bio;
2237 return ret;
2238}
2239#endif
2240
2241
2242
2243
2244
2245static int f2fs_mpage_readpages(struct inode *inode,
2246 struct readahead_control *rac, struct page *page)
2247{
2248 struct bio *bio = NULL;
2249 sector_t last_block_in_bio = 0;
2250 struct f2fs_map_blocks map;
2251#ifdef CONFIG_F2FS_FS_COMPRESSION
2252 struct compress_ctx cc = {
2253 .inode = inode,
2254 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2255 .cluster_size = F2FS_I(inode)->i_cluster_size,
2256 .cluster_idx = NULL_CLUSTER,
2257 .rpages = NULL,
2258 .cpages = NULL,
2259 .nr_rpages = 0,
2260 .nr_cpages = 0,
2261 };
2262#endif
2263 unsigned nr_pages = rac ? readahead_count(rac) : 1;
2264 unsigned max_nr_pages = nr_pages;
2265 int ret = 0;
2266
2267 map.m_pblk = 0;
2268 map.m_lblk = 0;
2269 map.m_len = 0;
2270 map.m_flags = 0;
2271 map.m_next_pgofs = NULL;
2272 map.m_next_extent = NULL;
2273 map.m_seg_type = NO_CHECK_TYPE;
2274 map.m_may_create = false;
2275
2276 for (; nr_pages; nr_pages--) {
2277 if (rac) {
2278 page = readahead_page(rac);
2279 prefetchw(&page->flags);
2280 }
2281
2282#ifdef CONFIG_F2FS_FS_COMPRESSION
2283 if (f2fs_compressed_file(inode)) {
2284
2285 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2286 ret = f2fs_read_multi_pages(&cc, &bio,
2287 max_nr_pages,
2288 &last_block_in_bio,
2289 rac != NULL, false);
2290 f2fs_destroy_compress_ctx(&cc, false);
2291 if (ret)
2292 goto set_error_page;
2293 }
2294 ret = f2fs_is_compressed_cluster(inode, page->index);
2295 if (ret < 0)
2296 goto set_error_page;
2297 else if (!ret)
2298 goto read_single_page;
2299
2300 ret = f2fs_init_compress_ctx(&cc);
2301 if (ret)
2302 goto set_error_page;
2303
2304 f2fs_compress_ctx_add_page(&cc, page);
2305
2306 goto next_page;
2307 }
2308read_single_page:
2309#endif
2310
2311 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2312 &bio, &last_block_in_bio, rac);
2313 if (ret) {
2314#ifdef CONFIG_F2FS_FS_COMPRESSION
2315set_error_page:
2316#endif
2317 SetPageError(page);
2318 zero_user_segment(page, 0, PAGE_SIZE);
2319 unlock_page(page);
2320 }
2321#ifdef CONFIG_F2FS_FS_COMPRESSION
2322next_page:
2323#endif
2324 if (rac)
2325 put_page(page);
2326
2327#ifdef CONFIG_F2FS_FS_COMPRESSION
2328 if (f2fs_compressed_file(inode)) {
2329
2330 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2331 ret = f2fs_read_multi_pages(&cc, &bio,
2332 max_nr_pages,
2333 &last_block_in_bio,
2334 rac != NULL, false);
2335 f2fs_destroy_compress_ctx(&cc, false);
2336 }
2337 }
2338#endif
2339 }
2340 if (bio)
2341 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2342 return ret;
2343}
2344
2345static int f2fs_read_data_page(struct file *file, struct page *page)
2346{
2347 struct inode *inode = page_file_mapping(page)->host;
2348 int ret = -EAGAIN;
2349
2350 trace_f2fs_readpage(page, DATA);
2351
2352 if (!f2fs_is_compress_backend_ready(inode)) {
2353 unlock_page(page);
2354 return -EOPNOTSUPP;
2355 }
2356
2357
2358 if (f2fs_has_inline_data(inode))
2359 ret = f2fs_read_inline_data(inode, page);
2360 if (ret == -EAGAIN)
2361 ret = f2fs_mpage_readpages(inode, NULL, page);
2362 return ret;
2363}
2364
2365static void f2fs_readahead(struct readahead_control *rac)
2366{
2367 struct inode *inode = rac->mapping->host;
2368
2369 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2370
2371 if (!f2fs_is_compress_backend_ready(inode))
2372 return;
2373
2374
2375 if (f2fs_has_inline_data(inode))
2376 return;
2377
2378 f2fs_mpage_readpages(inode, rac, NULL);
2379}
2380
2381int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2382{
2383 struct inode *inode = fio->page->mapping->host;
2384 struct page *mpage, *page;
2385 gfp_t gfp_flags = GFP_NOFS;
2386
2387 if (!f2fs_encrypted_file(inode))
2388 return 0;
2389
2390 page = fio->compressed_page ? fio->compressed_page : fio->page;
2391
2392
2393 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2394
2395 if (fscrypt_inode_uses_inline_crypto(inode))
2396 return 0;
2397
2398retry_encrypt:
2399 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2400 PAGE_SIZE, 0, gfp_flags);
2401 if (IS_ERR(fio->encrypted_page)) {
2402
2403 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2404 f2fs_flush_merged_writes(fio->sbi);
2405 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2406 gfp_flags |= __GFP_NOFAIL;
2407 goto retry_encrypt;
2408 }
2409 return PTR_ERR(fio->encrypted_page);
2410 }
2411
2412 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2413 if (mpage) {
2414 if (PageUptodate(mpage))
2415 memcpy(page_address(mpage),
2416 page_address(fio->encrypted_page), PAGE_SIZE);
2417 f2fs_put_page(mpage, 1);
2418 }
2419 return 0;
2420}
2421
2422static inline bool check_inplace_update_policy(struct inode *inode,
2423 struct f2fs_io_info *fio)
2424{
2425 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2426 unsigned int policy = SM_I(sbi)->ipu_policy;
2427
2428 if (policy & (0x1 << F2FS_IPU_FORCE))
2429 return true;
2430 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2431 return true;
2432 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2433 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2434 return true;
2435 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2436 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2437 return true;
2438
2439
2440
2441
2442 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2443 fio && fio->op == REQ_OP_WRITE &&
2444 !(fio->op_flags & REQ_SYNC) &&
2445 !IS_ENCRYPTED(inode))
2446 return true;
2447
2448
2449 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2450 is_inode_flag_set(inode, FI_NEED_IPU))
2451 return true;
2452
2453 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2454 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2455 return true;
2456
2457 return false;
2458}
2459
2460bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2461{
2462 if (f2fs_is_pinned_file(inode))
2463 return true;
2464
2465
2466 if (file_is_cold(inode))
2467 return true;
2468
2469 return check_inplace_update_policy(inode, fio);
2470}
2471
2472bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2473{
2474 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2475
2476 if (f2fs_lfs_mode(sbi))
2477 return true;
2478 if (S_ISDIR(inode->i_mode))
2479 return true;
2480 if (IS_NOQUOTA(inode))
2481 return true;
2482 if (f2fs_is_atomic_file(inode))
2483 return true;
2484 if (fio) {
2485 if (is_cold_data(fio->page))
2486 return true;
2487 if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
2488 return true;
2489 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2490 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2491 return true;
2492 }
2493 return false;
2494}
2495
2496static inline bool need_inplace_update(struct f2fs_io_info *fio)
2497{
2498 struct inode *inode = fio->page->mapping->host;
2499
2500 if (f2fs_should_update_outplace(inode, fio))
2501 return false;
2502
2503 return f2fs_should_update_inplace(inode, fio);
2504}
2505
2506int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2507{
2508 struct page *page = fio->page;
2509 struct inode *inode = page->mapping->host;
2510 struct dnode_of_data dn;
2511 struct extent_info ei = {0,0,0};
2512 struct node_info ni;
2513 bool ipu_force = false;
2514 int err = 0;
2515
2516 set_new_dnode(&dn, inode, NULL, NULL, 0);
2517 if (need_inplace_update(fio) &&
2518 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2519 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2520
2521 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2522 DATA_GENERIC_ENHANCE))
2523 return -EFSCORRUPTED;
2524
2525 ipu_force = true;
2526 fio->need_lock = LOCK_DONE;
2527 goto got_it;
2528 }
2529
2530
2531 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2532 return -EAGAIN;
2533
2534 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2535 if (err)
2536 goto out;
2537
2538 fio->old_blkaddr = dn.data_blkaddr;
2539
2540
2541 if (fio->old_blkaddr == NULL_ADDR) {
2542 ClearPageUptodate(page);
2543 clear_cold_data(page);
2544 goto out_writepage;
2545 }
2546got_it:
2547 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2548 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2549 DATA_GENERIC_ENHANCE)) {
2550 err = -EFSCORRUPTED;
2551 goto out_writepage;
2552 }
2553
2554
2555
2556
2557 if (ipu_force ||
2558 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2559 need_inplace_update(fio))) {
2560 err = f2fs_encrypt_one_page(fio);
2561 if (err)
2562 goto out_writepage;
2563
2564 set_page_writeback(page);
2565 ClearPageError(page);
2566 f2fs_put_dnode(&dn);
2567 if (fio->need_lock == LOCK_REQ)
2568 f2fs_unlock_op(fio->sbi);
2569 err = f2fs_inplace_write_data(fio);
2570 if (err) {
2571 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2572 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2573 if (PageWriteback(page))
2574 end_page_writeback(page);
2575 } else {
2576 set_inode_flag(inode, FI_UPDATE_WRITE);
2577 }
2578 trace_f2fs_do_write_data_page(fio->page, IPU);
2579 return err;
2580 }
2581
2582 if (fio->need_lock == LOCK_RETRY) {
2583 if (!f2fs_trylock_op(fio->sbi)) {
2584 err = -EAGAIN;
2585 goto out_writepage;
2586 }
2587 fio->need_lock = LOCK_REQ;
2588 }
2589
2590 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2591 if (err)
2592 goto out_writepage;
2593
2594 fio->version = ni.version;
2595
2596 err = f2fs_encrypt_one_page(fio);
2597 if (err)
2598 goto out_writepage;
2599
2600 set_page_writeback(page);
2601 ClearPageError(page);
2602
2603 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2604 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2605
2606
2607 f2fs_outplace_write_data(&dn, fio);
2608 trace_f2fs_do_write_data_page(page, OPU);
2609 set_inode_flag(inode, FI_APPEND_WRITE);
2610 if (page->index == 0)
2611 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2612out_writepage:
2613 f2fs_put_dnode(&dn);
2614out:
2615 if (fio->need_lock == LOCK_REQ)
2616 f2fs_unlock_op(fio->sbi);
2617 return err;
2618}
2619
2620int f2fs_write_single_data_page(struct page *page, int *submitted,
2621 struct bio **bio,
2622 sector_t *last_block,
2623 struct writeback_control *wbc,
2624 enum iostat_type io_type,
2625 int compr_blocks,
2626 bool allow_balance)
2627{
2628 struct inode *inode = page->mapping->host;
2629 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2630 loff_t i_size = i_size_read(inode);
2631 const pgoff_t end_index = ((unsigned long long)i_size)
2632 >> PAGE_SHIFT;
2633 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2634 unsigned offset = 0;
2635 bool need_balance_fs = false;
2636 int err = 0;
2637 struct f2fs_io_info fio = {
2638 .sbi = sbi,
2639 .ino = inode->i_ino,
2640 .type = DATA,
2641 .op = REQ_OP_WRITE,
2642 .op_flags = wbc_to_write_flags(wbc),
2643 .old_blkaddr = NULL_ADDR,
2644 .page = page,
2645 .encrypted_page = NULL,
2646 .submitted = false,
2647 .compr_blocks = compr_blocks,
2648 .need_lock = LOCK_RETRY,
2649 .io_type = io_type,
2650 .io_wbc = wbc,
2651 .bio = bio,
2652 .last_block = last_block,
2653 };
2654
2655 trace_f2fs_writepage(page, DATA);
2656
2657
2658 if (unlikely(f2fs_cp_error(sbi))) {
2659 mapping_set_error(page->mapping, -EIO);
2660
2661
2662
2663
2664 if (S_ISDIR(inode->i_mode))
2665 goto redirty_out;
2666 goto out;
2667 }
2668
2669 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2670 goto redirty_out;
2671
2672 if (page->index < end_index ||
2673 f2fs_verity_in_progress(inode) ||
2674 compr_blocks)
2675 goto write;
2676
2677
2678
2679
2680
2681 offset = i_size & (PAGE_SIZE - 1);
2682 if ((page->index >= end_index + 1) || !offset)
2683 goto out;
2684
2685 zero_user_segment(page, offset, PAGE_SIZE);
2686write:
2687 if (f2fs_is_drop_cache(inode))
2688 goto out;
2689
2690 if (f2fs_is_volatile_file(inode) && (!page->index ||
2691 (!wbc->for_reclaim &&
2692 f2fs_available_free_memory(sbi, BASE_CHECK))))
2693 goto redirty_out;
2694
2695
2696 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2697
2698
2699
2700
2701
2702 if (IS_NOQUOTA(inode))
2703 down_read(&sbi->node_write);
2704
2705 fio.need_lock = LOCK_DONE;
2706 err = f2fs_do_write_data_page(&fio);
2707
2708 if (IS_NOQUOTA(inode))
2709 up_read(&sbi->node_write);
2710
2711 goto done;
2712 }
2713
2714 if (!wbc->for_reclaim)
2715 need_balance_fs = true;
2716 else if (has_not_enough_free_secs(sbi, 0, 0))
2717 goto redirty_out;
2718 else
2719 set_inode_flag(inode, FI_HOT_DATA);
2720
2721 err = -EAGAIN;
2722 if (f2fs_has_inline_data(inode)) {
2723 err = f2fs_write_inline_data(inode, page);
2724 if (!err)
2725 goto out;
2726 }
2727
2728 if (err == -EAGAIN) {
2729 err = f2fs_do_write_data_page(&fio);
2730 if (err == -EAGAIN) {
2731 fio.need_lock = LOCK_REQ;
2732 err = f2fs_do_write_data_page(&fio);
2733 }
2734 }
2735
2736 if (err) {
2737 file_set_keep_isize(inode);
2738 } else {
2739 spin_lock(&F2FS_I(inode)->i_size_lock);
2740 if (F2FS_I(inode)->last_disk_size < psize)
2741 F2FS_I(inode)->last_disk_size = psize;
2742 spin_unlock(&F2FS_I(inode)->i_size_lock);
2743 }
2744
2745done:
2746 if (err && err != -ENOENT)
2747 goto redirty_out;
2748
2749out:
2750 inode_dec_dirty_pages(inode);
2751 if (err) {
2752 ClearPageUptodate(page);
2753 clear_cold_data(page);
2754 }
2755
2756 if (wbc->for_reclaim) {
2757 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2758 clear_inode_flag(inode, FI_HOT_DATA);
2759 f2fs_remove_dirty_inode(inode);
2760 submitted = NULL;
2761 }
2762 unlock_page(page);
2763 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2764 !F2FS_I(inode)->cp_task && allow_balance)
2765 f2fs_balance_fs(sbi, need_balance_fs);
2766
2767 if (unlikely(f2fs_cp_error(sbi))) {
2768 f2fs_submit_merged_write(sbi, DATA);
2769 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2770 submitted = NULL;
2771 }
2772
2773 if (submitted)
2774 *submitted = fio.submitted ? 1 : 0;
2775
2776 return 0;
2777
2778redirty_out:
2779 redirty_page_for_writepage(wbc, page);
2780
2781
2782
2783
2784
2785
2786 if (!err || wbc->for_reclaim)
2787 return AOP_WRITEPAGE_ACTIVATE;
2788 unlock_page(page);
2789 return err;
2790}
2791
2792static int f2fs_write_data_page(struct page *page,
2793 struct writeback_control *wbc)
2794{
2795#ifdef CONFIG_F2FS_FS_COMPRESSION
2796 struct inode *inode = page->mapping->host;
2797
2798 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2799 goto out;
2800
2801 if (f2fs_compressed_file(inode)) {
2802 if (f2fs_is_compressed_cluster(inode, page->index)) {
2803 redirty_page_for_writepage(wbc, page);
2804 return AOP_WRITEPAGE_ACTIVATE;
2805 }
2806 }
2807out:
2808#endif
2809
2810 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2811 wbc, FS_DATA_IO, 0, true);
2812}
2813
2814
2815
2816
2817
2818
2819static int f2fs_write_cache_pages(struct address_space *mapping,
2820 struct writeback_control *wbc,
2821 enum iostat_type io_type)
2822{
2823 int ret = 0;
2824 int done = 0, retry = 0;
2825 struct pagevec pvec;
2826 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2827 struct bio *bio = NULL;
2828 sector_t last_block;
2829#ifdef CONFIG_F2FS_FS_COMPRESSION
2830 struct inode *inode = mapping->host;
2831 struct compress_ctx cc = {
2832 .inode = inode,
2833 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2834 .cluster_size = F2FS_I(inode)->i_cluster_size,
2835 .cluster_idx = NULL_CLUSTER,
2836 .rpages = NULL,
2837 .nr_rpages = 0,
2838 .cpages = NULL,
2839 .rbuf = NULL,
2840 .cbuf = NULL,
2841 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2842 .private = NULL,
2843 };
2844#endif
2845 int nr_pages;
2846 pgoff_t index;
2847 pgoff_t end;
2848 pgoff_t done_index;
2849 int range_whole = 0;
2850 xa_mark_t tag;
2851 int nwritten = 0;
2852 int submitted = 0;
2853 int i;
2854
2855 pagevec_init(&pvec);
2856
2857 if (get_dirty_pages(mapping->host) <=
2858 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2859 set_inode_flag(mapping->host, FI_HOT_DATA);
2860 else
2861 clear_inode_flag(mapping->host, FI_HOT_DATA);
2862
2863 if (wbc->range_cyclic) {
2864 index = mapping->writeback_index;
2865 end = -1;
2866 } else {
2867 index = wbc->range_start >> PAGE_SHIFT;
2868 end = wbc->range_end >> PAGE_SHIFT;
2869 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2870 range_whole = 1;
2871 }
2872 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2873 tag = PAGECACHE_TAG_TOWRITE;
2874 else
2875 tag = PAGECACHE_TAG_DIRTY;
2876retry:
2877 retry = 0;
2878 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2879 tag_pages_for_writeback(mapping, index, end);
2880 done_index = index;
2881 while (!done && !retry && (index <= end)) {
2882 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2883 tag);
2884 if (nr_pages == 0)
2885 break;
2886
2887 for (i = 0; i < nr_pages; i++) {
2888 struct page *page = pvec.pages[i];
2889 bool need_readd;
2890readd:
2891 need_readd = false;
2892#ifdef CONFIG_F2FS_FS_COMPRESSION
2893 if (f2fs_compressed_file(inode)) {
2894 ret = f2fs_init_compress_ctx(&cc);
2895 if (ret) {
2896 done = 1;
2897 break;
2898 }
2899
2900 if (!f2fs_cluster_can_merge_page(&cc,
2901 page->index)) {
2902 ret = f2fs_write_multi_pages(&cc,
2903 &submitted, wbc, io_type);
2904 if (!ret)
2905 need_readd = true;
2906 goto result;
2907 }
2908
2909 if (unlikely(f2fs_cp_error(sbi)))
2910 goto lock_page;
2911
2912 if (f2fs_cluster_is_empty(&cc)) {
2913 void *fsdata = NULL;
2914 struct page *pagep;
2915 int ret2;
2916
2917 ret2 = f2fs_prepare_compress_overwrite(
2918 inode, &pagep,
2919 page->index, &fsdata);
2920 if (ret2 < 0) {
2921 ret = ret2;
2922 done = 1;
2923 break;
2924 } else if (ret2 &&
2925 !f2fs_compress_write_end(inode,
2926 fsdata, page->index,
2927 1)) {
2928 retry = 1;
2929 break;
2930 }
2931 } else {
2932 goto lock_page;
2933 }
2934 }
2935#endif
2936
2937 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2938 wbc->sync_mode == WB_SYNC_NONE) {
2939 done = 1;
2940 break;
2941 }
2942#ifdef CONFIG_F2FS_FS_COMPRESSION
2943lock_page:
2944#endif
2945 done_index = page->index;
2946retry_write:
2947 lock_page(page);
2948
2949 if (unlikely(page->mapping != mapping)) {
2950continue_unlock:
2951 unlock_page(page);
2952 continue;
2953 }
2954
2955 if (!PageDirty(page)) {
2956
2957 goto continue_unlock;
2958 }
2959
2960 if (PageWriteback(page)) {
2961 if (wbc->sync_mode != WB_SYNC_NONE)
2962 f2fs_wait_on_page_writeback(page,
2963 DATA, true, true);
2964 else
2965 goto continue_unlock;
2966 }
2967
2968 if (!clear_page_dirty_for_io(page))
2969 goto continue_unlock;
2970
2971#ifdef CONFIG_F2FS_FS_COMPRESSION
2972 if (f2fs_compressed_file(inode)) {
2973 get_page(page);
2974 f2fs_compress_ctx_add_page(&cc, page);
2975 continue;
2976 }
2977#endif
2978 ret = f2fs_write_single_data_page(page, &submitted,
2979 &bio, &last_block, wbc, io_type,
2980 0, true);
2981 if (ret == AOP_WRITEPAGE_ACTIVATE)
2982 unlock_page(page);
2983#ifdef CONFIG_F2FS_FS_COMPRESSION
2984result:
2985#endif
2986 nwritten += submitted;
2987 wbc->nr_to_write -= submitted;
2988
2989 if (unlikely(ret)) {
2990
2991
2992
2993
2994 if (ret == AOP_WRITEPAGE_ACTIVATE) {
2995 ret = 0;
2996 goto next;
2997 } else if (ret == -EAGAIN) {
2998 ret = 0;
2999 if (wbc->sync_mode == WB_SYNC_ALL) {
3000 cond_resched();
3001 congestion_wait(BLK_RW_ASYNC,
3002 DEFAULT_IO_TIMEOUT);
3003 goto retry_write;
3004 }
3005 goto next;
3006 }
3007 done_index = page->index + 1;
3008 done = 1;
3009 break;
3010 }
3011
3012 if (wbc->nr_to_write <= 0 &&
3013 wbc->sync_mode == WB_SYNC_NONE) {
3014 done = 1;
3015 break;
3016 }
3017next:
3018 if (need_readd)
3019 goto readd;
3020 }
3021 pagevec_release(&pvec);
3022 cond_resched();
3023 }
3024#ifdef CONFIG_F2FS_FS_COMPRESSION
3025
3026 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3027 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3028 nwritten += submitted;
3029 wbc->nr_to_write -= submitted;
3030 if (ret) {
3031 done = 1;
3032 retry = 0;
3033 }
3034 }
3035 if (f2fs_compressed_file(inode))
3036 f2fs_destroy_compress_ctx(&cc, false);
3037#endif
3038 if (retry) {
3039 index = 0;
3040 end = -1;
3041 goto retry;
3042 }
3043 if (wbc->range_cyclic && !done)
3044 done_index = 0;
3045 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3046 mapping->writeback_index = done_index;
3047
3048 if (nwritten)
3049 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3050 NULL, 0, DATA);
3051
3052 if (bio)
3053 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3054
3055 return ret;
3056}
3057
3058static inline bool __should_serialize_io(struct inode *inode,
3059 struct writeback_control *wbc)
3060{
3061
3062 if (F2FS_I(inode)->cp_task)
3063 return false;
3064
3065 if (!S_ISREG(inode->i_mode))
3066 return false;
3067 if (IS_NOQUOTA(inode))
3068 return false;
3069
3070 if (f2fs_need_compress_data(inode))
3071 return true;
3072 if (wbc->sync_mode != WB_SYNC_ALL)
3073 return true;
3074 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3075 return true;
3076 return false;
3077}
3078
3079static int __f2fs_write_data_pages(struct address_space *mapping,
3080 struct writeback_control *wbc,
3081 enum iostat_type io_type)
3082{
3083 struct inode *inode = mapping->host;
3084 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3085 struct blk_plug plug;
3086 int ret;
3087 bool locked = false;
3088
3089
3090 if (!mapping->a_ops->writepage)
3091 return 0;
3092
3093
3094 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3095 return 0;
3096
3097
3098 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3099 goto skip_write;
3100
3101 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3102 wbc->sync_mode == WB_SYNC_NONE &&
3103 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3104 f2fs_available_free_memory(sbi, DIRTY_DENTS))
3105 goto skip_write;
3106
3107
3108 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3109 goto skip_write;
3110
3111 trace_f2fs_writepages(mapping->host, wbc, DATA);
3112
3113
3114 if (wbc->sync_mode == WB_SYNC_ALL)
3115 atomic_inc(&sbi->wb_sync_req[DATA]);
3116 else if (atomic_read(&sbi->wb_sync_req[DATA]))
3117 goto skip_write;
3118
3119 if (__should_serialize_io(inode, wbc)) {
3120 mutex_lock(&sbi->writepages);
3121 locked = true;
3122 }
3123
3124 blk_start_plug(&plug);
3125 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3126 blk_finish_plug(&plug);
3127
3128 if (locked)
3129 mutex_unlock(&sbi->writepages);
3130
3131 if (wbc->sync_mode == WB_SYNC_ALL)
3132 atomic_dec(&sbi->wb_sync_req[DATA]);
3133
3134
3135
3136
3137
3138 f2fs_remove_dirty_inode(inode);
3139 return ret;
3140
3141skip_write:
3142 wbc->pages_skipped += get_dirty_pages(inode);
3143 trace_f2fs_writepages(mapping->host, wbc, DATA);
3144 return 0;
3145}
3146
3147static int f2fs_write_data_pages(struct address_space *mapping,
3148 struct writeback_control *wbc)
3149{
3150 struct inode *inode = mapping->host;
3151
3152 return __f2fs_write_data_pages(mapping, wbc,
3153 F2FS_I(inode)->cp_task == current ?
3154 FS_CP_DATA_IO : FS_DATA_IO);
3155}
3156
3157static void f2fs_write_failed(struct address_space *mapping, loff_t to)
3158{
3159 struct inode *inode = mapping->host;
3160 loff_t i_size = i_size_read(inode);
3161
3162 if (IS_NOQUOTA(inode))
3163 return;
3164
3165
3166 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3167 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3168 down_write(&F2FS_I(inode)->i_mmap_sem);
3169
3170 truncate_pagecache(inode, i_size);
3171 f2fs_truncate_blocks(inode, i_size, true);
3172
3173 up_write(&F2FS_I(inode)->i_mmap_sem);
3174 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3175 }
3176}
3177
3178static int prepare_write_begin(struct f2fs_sb_info *sbi,
3179 struct page *page, loff_t pos, unsigned len,
3180 block_t *blk_addr, bool *node_changed)
3181{
3182 struct inode *inode = page->mapping->host;
3183 pgoff_t index = page->index;
3184 struct dnode_of_data dn;
3185 struct page *ipage;
3186 bool locked = false;
3187 struct extent_info ei = {0,0,0};
3188 int err = 0;
3189 int flag;
3190
3191
3192
3193
3194
3195 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3196 !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3197 !f2fs_verity_in_progress(inode))
3198 return 0;
3199
3200
3201 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3202 flag = F2FS_GET_BLOCK_DEFAULT;
3203 else
3204 flag = F2FS_GET_BLOCK_PRE_AIO;
3205
3206 if (f2fs_has_inline_data(inode) ||
3207 (pos & PAGE_MASK) >= i_size_read(inode)) {
3208 f2fs_do_map_lock(sbi, flag, true);
3209 locked = true;
3210 }
3211
3212restart:
3213
3214 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3215 if (IS_ERR(ipage)) {
3216 err = PTR_ERR(ipage);
3217 goto unlock_out;
3218 }
3219
3220 set_new_dnode(&dn, inode, ipage, ipage, 0);
3221
3222 if (f2fs_has_inline_data(inode)) {
3223 if (pos + len <= MAX_INLINE_DATA(inode)) {
3224 f2fs_do_read_inline_data(page, ipage);
3225 set_inode_flag(inode, FI_DATA_EXIST);
3226 if (inode->i_nlink)
3227 set_inline_node(ipage);
3228 } else {
3229 err = f2fs_convert_inline_page(&dn, page);
3230 if (err)
3231 goto out;
3232 if (dn.data_blkaddr == NULL_ADDR)
3233 err = f2fs_get_block(&dn, index);
3234 }
3235 } else if (locked) {
3236 err = f2fs_get_block(&dn, index);
3237 } else {
3238 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3239 dn.data_blkaddr = ei.blk + index - ei.fofs;
3240 } else {
3241
3242 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3243 if (err || dn.data_blkaddr == NULL_ADDR) {
3244 f2fs_put_dnode(&dn);
3245 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3246 true);
3247 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3248 locked = true;
3249 goto restart;
3250 }
3251 }
3252 }
3253
3254
3255 *blk_addr = dn.data_blkaddr;
3256 *node_changed = dn.node_changed;
3257out:
3258 f2fs_put_dnode(&dn);
3259unlock_out:
3260 if (locked)
3261 f2fs_do_map_lock(sbi, flag, false);
3262 return err;
3263}
3264
3265static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3266 loff_t pos, unsigned len, unsigned flags,
3267 struct page **pagep, void **fsdata)
3268{
3269 struct inode *inode = mapping->host;
3270 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3271 struct page *page = NULL;
3272 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3273 bool need_balance = false, drop_atomic = false;
3274 block_t blkaddr = NULL_ADDR;
3275 int err = 0;
3276
3277 trace_f2fs_write_begin(inode, pos, len, flags);
3278
3279 if (!f2fs_is_checkpoint_ready(sbi)) {
3280 err = -ENOSPC;
3281 goto fail;
3282 }
3283
3284 if ((f2fs_is_atomic_file(inode) &&
3285 !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3286 is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3287 err = -ENOMEM;
3288 drop_atomic = true;
3289 goto fail;
3290 }
3291
3292
3293
3294
3295
3296
3297 if (index != 0) {
3298 err = f2fs_convert_inline_inode(inode);
3299 if (err)
3300 goto fail;
3301 }
3302
3303#ifdef CONFIG_F2FS_FS_COMPRESSION
3304 if (f2fs_compressed_file(inode)) {
3305 int ret;
3306
3307 *fsdata = NULL;
3308
3309 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3310 index, fsdata);
3311 if (ret < 0) {
3312 err = ret;
3313 goto fail;
3314 } else if (ret) {
3315 return 0;
3316 }
3317 }
3318#endif
3319
3320repeat:
3321
3322
3323
3324
3325 page = f2fs_pagecache_get_page(mapping, index,
3326 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3327 if (!page) {
3328 err = -ENOMEM;
3329 goto fail;
3330 }
3331
3332
3333
3334 *pagep = page;
3335
3336 err = prepare_write_begin(sbi, page, pos, len,
3337 &blkaddr, &need_balance);
3338 if (err)
3339 goto fail;
3340
3341 if (need_balance && !IS_NOQUOTA(inode) &&
3342 has_not_enough_free_secs(sbi, 0, 0)) {
3343 unlock_page(page);
3344 f2fs_balance_fs(sbi, true);
3345 lock_page(page);
3346 if (page->mapping != mapping) {
3347
3348 f2fs_put_page(page, 1);
3349 goto repeat;
3350 }
3351 }
3352
3353 f2fs_wait_on_page_writeback(page, DATA, false, true);
3354
3355 if (len == PAGE_SIZE || PageUptodate(page))
3356 return 0;
3357
3358 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3359 !f2fs_verity_in_progress(inode)) {
3360 zero_user_segment(page, len, PAGE_SIZE);
3361 return 0;
3362 }
3363
3364 if (blkaddr == NEW_ADDR) {
3365 zero_user_segment(page, 0, PAGE_SIZE);
3366 SetPageUptodate(page);
3367 } else {
3368 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3369 DATA_GENERIC_ENHANCE_READ)) {
3370 err = -EFSCORRUPTED;
3371 goto fail;
3372 }
3373 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3374 if (err)
3375 goto fail;
3376
3377 lock_page(page);
3378 if (unlikely(page->mapping != mapping)) {
3379 f2fs_put_page(page, 1);
3380 goto repeat;
3381 }
3382 if (unlikely(!PageUptodate(page))) {
3383 err = -EIO;
3384 goto fail;
3385 }
3386 }
3387 return 0;
3388
3389fail:
3390 f2fs_put_page(page, 1);
3391 f2fs_write_failed(mapping, pos + len);
3392 if (drop_atomic)
3393 f2fs_drop_inmem_pages_all(sbi, false);
3394 return err;
3395}
3396
3397static int f2fs_write_end(struct file *file,
3398 struct address_space *mapping,
3399 loff_t pos, unsigned len, unsigned copied,
3400 struct page *page, void *fsdata)
3401{
3402 struct inode *inode = page->mapping->host;
3403
3404 trace_f2fs_write_end(inode, pos, len, copied);
3405
3406
3407
3408
3409
3410
3411 if (!PageUptodate(page)) {
3412 if (unlikely(copied != len))
3413 copied = 0;
3414 else
3415 SetPageUptodate(page);
3416 }
3417
3418#ifdef CONFIG_F2FS_FS_COMPRESSION
3419
3420 if (f2fs_compressed_file(inode) && fsdata) {
3421 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3422 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3423
3424 if (pos + copied > i_size_read(inode) &&
3425 !f2fs_verity_in_progress(inode))
3426 f2fs_i_size_write(inode, pos + copied);
3427 return copied;
3428 }
3429#endif
3430
3431 if (!copied)
3432 goto unlock_out;
3433
3434 set_page_dirty(page);
3435
3436 if (pos + copied > i_size_read(inode) &&
3437 !f2fs_verity_in_progress(inode))
3438 f2fs_i_size_write(inode, pos + copied);
3439unlock_out:
3440 f2fs_put_page(page, 1);
3441 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3442 return copied;
3443}
3444
3445static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3446 loff_t offset)
3447{
3448 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3449 unsigned blkbits = i_blkbits;
3450 unsigned blocksize_mask = (1 << blkbits) - 1;
3451 unsigned long align = offset | iov_iter_alignment(iter);
3452 struct block_device *bdev = inode->i_sb->s_bdev;
3453
3454 if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3455 return 1;
3456
3457 if (align & blocksize_mask) {
3458 if (bdev)
3459 blkbits = blksize_bits(bdev_logical_block_size(bdev));
3460 blocksize_mask = (1 << blkbits) - 1;
3461 if (align & blocksize_mask)
3462 return -EINVAL;
3463 return 1;
3464 }
3465 return 0;
3466}
3467
3468static void f2fs_dio_end_io(struct bio *bio)
3469{
3470 struct f2fs_private_dio *dio = bio->bi_private;
3471
3472 dec_page_count(F2FS_I_SB(dio->inode),
3473 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3474
3475 bio->bi_private = dio->orig_private;
3476 bio->bi_end_io = dio->orig_end_io;
3477
3478 kfree(dio);
3479
3480 bio_endio(bio);
3481}
3482
3483static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3484 loff_t file_offset)
3485{
3486 struct f2fs_private_dio *dio;
3487 bool write = (bio_op(bio) == REQ_OP_WRITE);
3488
3489 dio = f2fs_kzalloc(F2FS_I_SB(inode),
3490 sizeof(struct f2fs_private_dio), GFP_NOFS);
3491 if (!dio)
3492 goto out;
3493
3494 dio->inode = inode;
3495 dio->orig_end_io = bio->bi_end_io;
3496 dio->orig_private = bio->bi_private;
3497 dio->write = write;
3498
3499 bio->bi_end_io = f2fs_dio_end_io;
3500 bio->bi_private = dio;
3501
3502 inc_page_count(F2FS_I_SB(inode),
3503 write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3504
3505 submit_bio(bio);
3506 return;
3507out:
3508 bio->bi_status = BLK_STS_IOERR;
3509 bio_endio(bio);
3510}
3511
3512static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3513{
3514 struct address_space *mapping = iocb->ki_filp->f_mapping;
3515 struct inode *inode = mapping->host;
3516 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3517 struct f2fs_inode_info *fi = F2FS_I(inode);
3518 size_t count = iov_iter_count(iter);
3519 loff_t offset = iocb->ki_pos;
3520 int rw = iov_iter_rw(iter);
3521 int err;
3522 enum rw_hint hint = iocb->ki_hint;
3523 int whint_mode = F2FS_OPTION(sbi).whint_mode;
3524 bool do_opu;
3525
3526 err = check_direct_IO(inode, iter, offset);
3527 if (err)
3528 return err < 0 ? err : 0;
3529
3530 if (f2fs_force_buffered_io(inode, iocb, iter))
3531 return 0;
3532
3533 do_opu = allow_outplace_dio(inode, iocb, iter);
3534
3535 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3536
3537 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3538 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3539
3540 if (iocb->ki_flags & IOCB_NOWAIT) {
3541 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3542 iocb->ki_hint = hint;
3543 err = -EAGAIN;
3544 goto out;
3545 }
3546 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3547 up_read(&fi->i_gc_rwsem[rw]);
3548 iocb->ki_hint = hint;
3549 err = -EAGAIN;
3550 goto out;
3551 }
3552 } else {
3553 down_read(&fi->i_gc_rwsem[rw]);
3554 if (do_opu)
3555 down_read(&fi->i_gc_rwsem[READ]);
3556 }
3557
3558 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3559 iter, rw == WRITE ? get_data_block_dio_write :
3560 get_data_block_dio, NULL, f2fs_dio_submit_bio,
3561 rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3562 DIO_SKIP_HOLES);
3563
3564 if (do_opu)
3565 up_read(&fi->i_gc_rwsem[READ]);
3566
3567 up_read(&fi->i_gc_rwsem[rw]);
3568
3569 if (rw == WRITE) {
3570 if (whint_mode == WHINT_MODE_OFF)
3571 iocb->ki_hint = hint;
3572 if (err > 0) {
3573 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3574 err);
3575 if (!do_opu)
3576 set_inode_flag(inode, FI_UPDATE_WRITE);
3577 } else if (err == -EIOCBQUEUED) {
3578 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3579 count - iov_iter_count(iter));
3580 } else if (err < 0) {
3581 f2fs_write_failed(mapping, offset + count);
3582 }
3583 } else {
3584 if (err > 0)
3585 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3586 else if (err == -EIOCBQUEUED)
3587 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3588 count - iov_iter_count(iter));
3589 }
3590
3591out:
3592 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3593
3594 return err;
3595}
3596
3597void f2fs_invalidate_page(struct page *page, unsigned int offset,
3598 unsigned int length)
3599{
3600 struct inode *inode = page->mapping->host;
3601 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3602
3603 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3604 (offset % PAGE_SIZE || length != PAGE_SIZE))
3605 return;
3606
3607 if (PageDirty(page)) {
3608 if (inode->i_ino == F2FS_META_INO(sbi)) {
3609 dec_page_count(sbi, F2FS_DIRTY_META);
3610 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3611 dec_page_count(sbi, F2FS_DIRTY_NODES);
3612 } else {
3613 inode_dec_dirty_pages(inode);
3614 f2fs_remove_dirty_inode(inode);
3615 }
3616 }
3617
3618 clear_cold_data(page);
3619
3620 if (IS_ATOMIC_WRITTEN_PAGE(page))
3621 return f2fs_drop_inmem_page(inode, page);
3622
3623 f2fs_clear_page_private(page);
3624}
3625
3626int f2fs_release_page(struct page *page, gfp_t wait)
3627{
3628
3629 if (PageDirty(page))
3630 return 0;
3631
3632
3633 if (IS_ATOMIC_WRITTEN_PAGE(page))
3634 return 0;
3635
3636 clear_cold_data(page);
3637 f2fs_clear_page_private(page);
3638 return 1;
3639}
3640
3641static int f2fs_set_data_page_dirty(struct page *page)
3642{
3643 struct inode *inode = page_file_mapping(page)->host;
3644
3645 trace_f2fs_set_page_dirty(page, DATA);
3646
3647 if (!PageUptodate(page))
3648 SetPageUptodate(page);
3649 if (PageSwapCache(page))
3650 return __set_page_dirty_nobuffers(page);
3651
3652 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3653 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
3654 f2fs_register_inmem_page(inode, page);
3655 return 1;
3656 }
3657
3658
3659
3660
3661 return 0;
3662 }
3663
3664 if (!PageDirty(page)) {
3665 __set_page_dirty_nobuffers(page);
3666 f2fs_update_dirty_page(inode, page);
3667 return 1;
3668 }
3669 return 0;
3670}
3671
3672
3673static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3674{
3675#ifdef CONFIG_F2FS_FS_COMPRESSION
3676 struct dnode_of_data dn;
3677 sector_t start_idx, blknr = 0;
3678 int ret;
3679
3680 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3681
3682 set_new_dnode(&dn, inode, NULL, NULL, 0);
3683 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3684 if (ret)
3685 return 0;
3686
3687 if (dn.data_blkaddr != COMPRESS_ADDR) {
3688 dn.ofs_in_node += block - start_idx;
3689 blknr = f2fs_data_blkaddr(&dn);
3690 if (!__is_valid_data_blkaddr(blknr))
3691 blknr = 0;
3692 }
3693
3694 f2fs_put_dnode(&dn);
3695 return blknr;
3696#else
3697 return 0;
3698#endif
3699}
3700
3701
3702static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3703{
3704 struct inode *inode = mapping->host;
3705 sector_t blknr = 0;
3706
3707 if (f2fs_has_inline_data(inode))
3708 goto out;
3709
3710
3711 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3712 filemap_write_and_wait(mapping);
3713
3714
3715 if (unlikely(block >= max_file_blocks(inode)))
3716 goto out;
3717
3718 if (f2fs_compressed_file(inode)) {
3719 blknr = f2fs_bmap_compress(inode, block);
3720 } else {
3721 struct f2fs_map_blocks map;
3722
3723 memset(&map, 0, sizeof(map));
3724 map.m_lblk = block;
3725 map.m_len = 1;
3726 map.m_next_pgofs = NULL;
3727 map.m_seg_type = NO_CHECK_TYPE;
3728
3729 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3730 blknr = map.m_pblk;
3731 }
3732out:
3733 trace_f2fs_bmap(inode, block, blknr);
3734 return blknr;
3735}
3736
3737#ifdef CONFIG_MIGRATION
3738#include <linux/migrate.h>
3739
3740int f2fs_migrate_page(struct address_space *mapping,
3741 struct page *newpage, struct page *page, enum migrate_mode mode)
3742{
3743 int rc, extra_count;
3744 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3745 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
3746
3747 BUG_ON(PageWriteback(page));
3748
3749
3750 if (atomic_written) {
3751 if (mode != MIGRATE_SYNC)
3752 return -EBUSY;
3753 if (!mutex_trylock(&fi->inmem_lock))
3754 return -EAGAIN;
3755 }
3756
3757
3758 extra_count = atomic_written ? 1 : 0;
3759 rc = migrate_page_move_mapping(mapping, newpage,
3760 page, extra_count);
3761 if (rc != MIGRATEPAGE_SUCCESS) {
3762 if (atomic_written)
3763 mutex_unlock(&fi->inmem_lock);
3764 return rc;
3765 }
3766
3767 if (atomic_written) {
3768 struct inmem_pages *cur;
3769
3770 list_for_each_entry(cur, &fi->inmem_pages, list)
3771 if (cur->page == page) {
3772 cur->page = newpage;
3773 break;
3774 }
3775 mutex_unlock(&fi->inmem_lock);
3776 put_page(page);
3777 get_page(newpage);
3778 }
3779
3780 if (PagePrivate(page)) {
3781 f2fs_set_page_private(newpage, page_private(page));
3782 f2fs_clear_page_private(page);
3783 }
3784
3785 if (mode != MIGRATE_SYNC_NO_COPY)
3786 migrate_page_copy(newpage, page);
3787 else
3788 migrate_page_states(newpage, page);
3789
3790 return MIGRATEPAGE_SUCCESS;
3791}
3792#endif
3793
3794#ifdef CONFIG_SWAP
3795static int f2fs_is_file_aligned(struct inode *inode)
3796{
3797 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3798 block_t main_blkaddr = SM_I(sbi)->main_blkaddr;
3799 block_t cur_lblock;
3800 block_t last_lblock;
3801 block_t pblock;
3802 unsigned long nr_pblocks;
3803 unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
3804 unsigned int not_aligned = 0;
3805 int ret = 0;
3806
3807 cur_lblock = 0;
3808 last_lblock = bytes_to_blks(inode, i_size_read(inode));
3809
3810 while (cur_lblock < last_lblock) {
3811 struct f2fs_map_blocks map;
3812
3813 memset(&map, 0, sizeof(map));
3814 map.m_lblk = cur_lblock;
3815 map.m_len = last_lblock - cur_lblock;
3816 map.m_next_pgofs = NULL;
3817 map.m_next_extent = NULL;
3818 map.m_seg_type = NO_CHECK_TYPE;
3819 map.m_may_create = false;
3820
3821 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
3822 if (ret)
3823 goto out;
3824
3825
3826 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
3827 f2fs_err(sbi, "Swapfile has holes\n");
3828 ret = -ENOENT;
3829 goto out;
3830 }
3831
3832 pblock = map.m_pblk;
3833 nr_pblocks = map.m_len;
3834
3835 if ((pblock - main_blkaddr) & (blocks_per_sec - 1) ||
3836 nr_pblocks & (blocks_per_sec - 1)) {
3837 if (f2fs_is_pinned_file(inode)) {
3838 f2fs_err(sbi, "Swapfile does not align to section");
3839 ret = -EINVAL;
3840 goto out;
3841 }
3842 not_aligned++;
3843 }
3844
3845 cur_lblock += nr_pblocks;
3846 }
3847 if (not_aligned)
3848 f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
3849 "\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
3850 not_aligned);
3851out:
3852 return ret;
3853}
3854
3855static int check_swap_activate_fast(struct swap_info_struct *sis,
3856 struct file *swap_file, sector_t *span)
3857{
3858 struct address_space *mapping = swap_file->f_mapping;
3859 struct inode *inode = mapping->host;
3860 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3861 sector_t cur_lblock;
3862 sector_t last_lblock;
3863 sector_t pblock;
3864 sector_t lowest_pblock = -1;
3865 sector_t highest_pblock = 0;
3866 int nr_extents = 0;
3867 unsigned long nr_pblocks;
3868 unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
3869 unsigned int not_aligned = 0;
3870 int ret = 0;
3871
3872
3873
3874
3875
3876 cur_lblock = 0;
3877 last_lblock = bytes_to_blks(inode, i_size_read(inode));
3878
3879 while (cur_lblock < last_lblock && cur_lblock < sis->max) {
3880 struct f2fs_map_blocks map;
3881
3882 cond_resched();
3883
3884 memset(&map, 0, sizeof(map));
3885 map.m_lblk = cur_lblock;
3886 map.m_len = last_lblock - cur_lblock;
3887 map.m_next_pgofs = NULL;
3888 map.m_next_extent = NULL;
3889 map.m_seg_type = NO_CHECK_TYPE;
3890 map.m_may_create = false;
3891
3892 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
3893 if (ret)
3894 goto out;
3895
3896
3897 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
3898 f2fs_err(sbi, "Swapfile has holes\n");
3899 ret = -EINVAL;
3900 goto out;
3901 }
3902
3903 pblock = map.m_pblk;
3904 nr_pblocks = map.m_len;
3905
3906 if ((pblock - SM_I(sbi)->main_blkaddr) & (blocks_per_sec - 1) ||
3907 nr_pblocks & (blocks_per_sec - 1)) {
3908 if (f2fs_is_pinned_file(inode)) {
3909 f2fs_err(sbi, "Swapfile does not align to section");
3910 ret = -EINVAL;
3911 goto out;
3912 }
3913 not_aligned++;
3914 }
3915
3916 if (cur_lblock + nr_pblocks >= sis->max)
3917 nr_pblocks = sis->max - cur_lblock;
3918
3919 if (cur_lblock) {
3920 if (pblock < lowest_pblock)
3921 lowest_pblock = pblock;
3922 if (pblock + nr_pblocks - 1 > highest_pblock)
3923 highest_pblock = pblock + nr_pblocks - 1;
3924 }
3925
3926
3927
3928
3929 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
3930 if (ret < 0)
3931 goto out;
3932 nr_extents += ret;
3933 cur_lblock += nr_pblocks;
3934 }
3935 ret = nr_extents;
3936 *span = 1 + highest_pblock - lowest_pblock;
3937 if (cur_lblock == 0)
3938 cur_lblock = 1;
3939 sis->max = cur_lblock;
3940 sis->pages = cur_lblock - 1;
3941 sis->highest_bit = cur_lblock - 1;
3942
3943 if (not_aligned)
3944 f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
3945 "\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
3946 not_aligned);
3947out:
3948 return ret;
3949}
3950
3951
3952static int check_swap_activate(struct swap_info_struct *sis,
3953 struct file *swap_file, sector_t *span)
3954{
3955 struct address_space *mapping = swap_file->f_mapping;
3956 struct inode *inode = mapping->host;
3957 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3958 unsigned blocks_per_page;
3959 unsigned long page_no;
3960 sector_t probe_block;
3961 sector_t last_block;
3962 sector_t lowest_block = -1;
3963 sector_t highest_block = 0;
3964 int nr_extents = 0;
3965 int ret = 0;
3966
3967 if (PAGE_SIZE == F2FS_BLKSIZE)
3968 return check_swap_activate_fast(sis, swap_file, span);
3969
3970 ret = f2fs_is_file_aligned(inode);
3971 if (ret)
3972 goto out;
3973
3974 blocks_per_page = bytes_to_blks(inode, PAGE_SIZE);
3975
3976
3977
3978
3979
3980 probe_block = 0;
3981 page_no = 0;
3982 last_block = bytes_to_blks(inode, i_size_read(inode));
3983 while ((probe_block + blocks_per_page) <= last_block &&
3984 page_no < sis->max) {
3985 unsigned block_in_page;
3986 sector_t first_block;
3987 sector_t block = 0;
3988
3989 cond_resched();
3990
3991 block = probe_block;
3992 ret = bmap(inode, &block);
3993 if (ret)
3994 goto out;
3995 if (!block)
3996 goto bad_bmap;
3997 first_block = block;
3998
3999
4000
4001
4002 if (first_block & (blocks_per_page - 1)) {
4003 probe_block++;
4004 goto reprobe;
4005 }
4006
4007 for (block_in_page = 1; block_in_page < blocks_per_page;
4008 block_in_page++) {
4009
4010 block = probe_block + block_in_page;
4011 ret = bmap(inode, &block);
4012 if (ret)
4013 goto out;
4014 if (!block)
4015 goto bad_bmap;
4016
4017 if (block != first_block + block_in_page) {
4018
4019 probe_block++;
4020 goto reprobe;
4021 }
4022 }
4023
4024 first_block >>= (PAGE_SHIFT - inode->i_blkbits);
4025 if (page_no) {
4026 if (first_block < lowest_block)
4027 lowest_block = first_block;
4028 if (first_block > highest_block)
4029 highest_block = first_block;
4030 }
4031
4032
4033
4034
4035 ret = add_swap_extent(sis, page_no, 1, first_block);
4036 if (ret < 0)
4037 goto out;
4038 nr_extents += ret;
4039 page_no++;
4040 probe_block += blocks_per_page;
4041reprobe:
4042 continue;
4043 }
4044 ret = nr_extents;
4045 *span = 1 + highest_block - lowest_block;
4046 if (page_no == 0)
4047 page_no = 1;
4048 sis->max = page_no;
4049 sis->pages = page_no - 1;
4050 sis->highest_bit = page_no - 1;
4051out:
4052 return ret;
4053bad_bmap:
4054 f2fs_err(sbi, "Swapfile has holes\n");
4055 return -EINVAL;
4056}
4057
4058static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4059 sector_t *span)
4060{
4061 struct inode *inode = file_inode(file);
4062 int ret;
4063
4064 if (!S_ISREG(inode->i_mode))
4065 return -EINVAL;
4066
4067 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
4068 return -EROFS;
4069
4070 ret = f2fs_convert_inline_inode(inode);
4071 if (ret)
4072 return ret;
4073
4074 if (!f2fs_disable_compressed_file(inode))
4075 return -EINVAL;
4076
4077 f2fs_precache_extents(inode);
4078
4079 ret = check_swap_activate(sis, file, span);
4080 if (ret < 0)
4081 return ret;
4082
4083 set_inode_flag(inode, FI_PIN_FILE);
4084 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4085 return ret;
4086}
4087
4088static void f2fs_swap_deactivate(struct file *file)
4089{
4090 struct inode *inode = file_inode(file);
4091
4092 clear_inode_flag(inode, FI_PIN_FILE);
4093}
4094#else
4095static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4096 sector_t *span)
4097{
4098 return -EOPNOTSUPP;
4099}
4100
4101static void f2fs_swap_deactivate(struct file *file)
4102{
4103}
4104#endif
4105
4106const struct address_space_operations f2fs_dblock_aops = {
4107 .readpage = f2fs_read_data_page,
4108 .readahead = f2fs_readahead,
4109 .writepage = f2fs_write_data_page,
4110 .writepages = f2fs_write_data_pages,
4111 .write_begin = f2fs_write_begin,
4112 .write_end = f2fs_write_end,
4113 .set_page_dirty = f2fs_set_data_page_dirty,
4114 .invalidatepage = f2fs_invalidate_page,
4115 .releasepage = f2fs_release_page,
4116 .direct_IO = f2fs_direct_IO,
4117 .bmap = f2fs_bmap,
4118 .swap_activate = f2fs_swap_activate,
4119 .swap_deactivate = f2fs_swap_deactivate,
4120#ifdef CONFIG_MIGRATION
4121 .migratepage = f2fs_migrate_page,
4122#endif
4123};
4124
4125void f2fs_clear_page_cache_dirty_tag(struct page *page)
4126{
4127 struct address_space *mapping = page_mapping(page);
4128 unsigned long flags;
4129
4130 xa_lock_irqsave(&mapping->i_pages, flags);
4131 __xa_clear_mark(&mapping->i_pages, page_index(page),
4132 PAGECACHE_TAG_DIRTY);
4133 xa_unlock_irqrestore(&mapping->i_pages, flags);
4134}
4135
4136int __init f2fs_init_post_read_processing(void)
4137{
4138 bio_post_read_ctx_cache =
4139 kmem_cache_create("f2fs_bio_post_read_ctx",
4140 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4141 if (!bio_post_read_ctx_cache)
4142 goto fail;
4143 bio_post_read_ctx_pool =
4144 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4145 bio_post_read_ctx_cache);
4146 if (!bio_post_read_ctx_pool)
4147 goto fail_free_cache;
4148 return 0;
4149
4150fail_free_cache:
4151 kmem_cache_destroy(bio_post_read_ctx_cache);
4152fail:
4153 return -ENOMEM;
4154}
4155
4156void f2fs_destroy_post_read_processing(void)
4157{
4158 mempool_destroy(bio_post_read_ctx_pool);
4159 kmem_cache_destroy(bio_post_read_ctx_cache);
4160}
4161
4162int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4163{
4164 if (!f2fs_sb_has_encrypt(sbi) &&
4165 !f2fs_sb_has_verity(sbi) &&
4166 !f2fs_sb_has_compression(sbi))
4167 return 0;
4168
4169 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4170 WQ_UNBOUND | WQ_HIGHPRI,
4171 num_online_cpus());
4172 if (!sbi->post_read_wq)
4173 return -ENOMEM;
4174 return 0;
4175}
4176
4177void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4178{
4179 if (sbi->post_read_wq)
4180 destroy_workqueue(sbi->post_read_wq);
4181}
4182
4183int __init f2fs_init_bio_entry_cache(void)
4184{
4185 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4186 sizeof(struct bio_entry));
4187 if (!bio_entry_slab)
4188 return -ENOMEM;
4189 return 0;
4190}
4191
4192void f2fs_destroy_bio_entry_cache(void)
4193{
4194 kmem_cache_destroy(bio_entry_slab);
4195}
4196