1
2
3
4
5
6
7
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/stat.h>
11#include <linux/buffer_head.h>
12#include <linux/writeback.h>
13#include <linux/blkdev.h>
14#include <linux/falloc.h>
15#include <linux/types.h>
16#include <linux/compat.h>
17#include <linux/uaccess.h>
18#include <linux/mount.h>
19#include <linux/pagevec.h>
20#include <linux/uio.h>
21#include <linux/uuid.h>
22#include <linux/file.h>
23#include <linux/nls.h>
24#include <linux/sched/signal.h>
25#include <linux/fileattr.h>
26
27#include "f2fs.h"
28#include "node.h"
29#include "segment.h"
30#include "xattr.h"
31#include "acl.h"
32#include "gc.h"
33#include <trace/events/f2fs.h>
34#include <uapi/linux/f2fs.h>
35
36static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
37{
38 struct inode *inode = file_inode(vmf->vma->vm_file);
39 vm_fault_t ret;
40
41 down_read(&F2FS_I(inode)->i_mmap_sem);
42 ret = filemap_fault(vmf);
43 up_read(&F2FS_I(inode)->i_mmap_sem);
44
45 if (!ret)
46 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
47 F2FS_BLKSIZE);
48
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
50
51 return ret;
52}
53
54static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
55{
56 struct page *page = vmf->page;
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 struct dnode_of_data dn;
60 bool need_alloc = true;
61 int err = 0;
62
63 if (unlikely(IS_IMMUTABLE(inode)))
64 return VM_FAULT_SIGBUS;
65
66 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
67 return VM_FAULT_SIGBUS;
68
69 if (unlikely(f2fs_cp_error(sbi))) {
70 err = -EIO;
71 goto err;
72 }
73
74 if (!f2fs_is_checkpoint_ready(sbi)) {
75 err = -ENOSPC;
76 goto err;
77 }
78
79 err = f2fs_convert_inline_inode(inode);
80 if (err)
81 goto err;
82
83#ifdef CONFIG_F2FS_FS_COMPRESSION
84 if (f2fs_compressed_file(inode)) {
85 int ret = f2fs_is_compressed_cluster(inode, page->index);
86
87 if (ret < 0) {
88 err = ret;
89 goto err;
90 } else if (ret) {
91 need_alloc = false;
92 }
93 }
94#endif
95
96 if (need_alloc)
97 f2fs_balance_fs(sbi, true);
98
99 sb_start_pagefault(inode->i_sb);
100
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
102
103 file_update_time(vmf->vma->vm_file);
104 down_read(&F2FS_I(inode)->i_mmap_sem);
105 lock_page(page);
106 if (unlikely(page->mapping != inode->i_mapping ||
107 page_offset(page) > i_size_read(inode) ||
108 !PageUptodate(page))) {
109 unlock_page(page);
110 err = -EFAULT;
111 goto out_sem;
112 }
113
114 if (need_alloc) {
115
116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
117 set_new_dnode(&dn, inode, NULL, NULL, 0);
118 err = f2fs_get_block(&dn, page->index);
119 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
120 }
121
122#ifdef CONFIG_F2FS_FS_COMPRESSION
123 if (!need_alloc) {
124 set_new_dnode(&dn, inode, NULL, NULL, 0);
125 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
126 f2fs_put_dnode(&dn);
127 }
128#endif
129 if (err) {
130 unlock_page(page);
131 goto out_sem;
132 }
133
134 f2fs_wait_on_page_writeback(page, DATA, false, true);
135
136
137 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
138
139
140
141
142 if (PageMappedToDisk(page))
143 goto out_sem;
144
145
146 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
147 i_size_read(inode)) {
148 loff_t offset;
149
150 offset = i_size_read(inode) & ~PAGE_MASK;
151 zero_user_segment(page, offset, PAGE_SIZE);
152 }
153 set_page_dirty(page);
154 if (!PageUptodate(page))
155 SetPageUptodate(page);
156
157 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
158 f2fs_update_time(sbi, REQ_TIME);
159
160 trace_f2fs_vm_page_mkwrite(page, DATA);
161out_sem:
162 up_read(&F2FS_I(inode)->i_mmap_sem);
163
164 sb_end_pagefault(inode->i_sb);
165err:
166 return block_page_mkwrite_return(err);
167}
168
169static const struct vm_operations_struct f2fs_file_vm_ops = {
170 .fault = f2fs_filemap_fault,
171 .map_pages = filemap_map_pages,
172 .page_mkwrite = f2fs_vm_page_mkwrite,
173};
174
175static int get_parent_ino(struct inode *inode, nid_t *pino)
176{
177 struct dentry *dentry;
178
179
180
181
182
183 dentry = d_find_alias(inode);
184 if (!dentry)
185 return 0;
186
187 *pino = parent_ino(dentry);
188 dput(dentry);
189 return 1;
190}
191
192static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
193{
194 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
195 enum cp_reason_type cp_reason = CP_NO_NEEDED;
196
197 if (!S_ISREG(inode->i_mode))
198 cp_reason = CP_NON_REGULAR;
199 else if (f2fs_compressed_file(inode))
200 cp_reason = CP_COMPRESSED;
201 else if (inode->i_nlink != 1)
202 cp_reason = CP_HARDLINK;
203 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
204 cp_reason = CP_SB_NEED_CP;
205 else if (file_wrong_pino(inode))
206 cp_reason = CP_WRONG_PINO;
207 else if (!f2fs_space_for_roll_forward(sbi))
208 cp_reason = CP_NO_SPC_ROLL;
209 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
210 cp_reason = CP_NODE_NEED_CP;
211 else if (test_opt(sbi, FASTBOOT))
212 cp_reason = CP_FASTBOOT_MODE;
213 else if (F2FS_OPTION(sbi).active_logs == 2)
214 cp_reason = CP_SPEC_LOG_NUM;
215 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
216 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
217 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
218 TRANS_DIR_INO))
219 cp_reason = CP_RECOVER_DIR;
220
221 return cp_reason;
222}
223
224static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
225{
226 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
227 bool ret = false;
228
229 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
230 ret = true;
231 f2fs_put_page(i, 0);
232 return ret;
233}
234
235static void try_to_fix_pino(struct inode *inode)
236{
237 struct f2fs_inode_info *fi = F2FS_I(inode);
238 nid_t pino;
239
240 down_write(&fi->i_sem);
241 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
242 get_parent_ino(inode, &pino)) {
243 f2fs_i_pino_write(inode, pino);
244 file_got_pino(inode);
245 }
246 up_write(&fi->i_sem);
247}
248
249static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
250 int datasync, bool atomic)
251{
252 struct inode *inode = file->f_mapping->host;
253 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
254 nid_t ino = inode->i_ino;
255 int ret = 0;
256 enum cp_reason_type cp_reason = 0;
257 struct writeback_control wbc = {
258 .sync_mode = WB_SYNC_ALL,
259 .nr_to_write = LONG_MAX,
260 .for_reclaim = 0,
261 };
262 unsigned int seq_id = 0;
263
264 if (unlikely(f2fs_readonly(inode->i_sb) ||
265 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
266 return 0;
267
268 trace_f2fs_sync_file_enter(inode);
269
270 if (S_ISDIR(inode->i_mode))
271 goto go_write;
272
273
274 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
275 set_inode_flag(inode, FI_NEED_IPU);
276 ret = file_write_and_wait_range(file, start, end);
277 clear_inode_flag(inode, FI_NEED_IPU);
278
279 if (ret) {
280 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
281 return ret;
282 }
283
284
285 if (!f2fs_skip_inode_update(inode, datasync)) {
286 f2fs_write_inode(inode, NULL);
287 goto go_write;
288 }
289
290
291
292
293 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
294 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
295
296
297 if (need_inode_page_update(sbi, ino))
298 goto go_write;
299
300 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
301 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
302 goto flush_out;
303 goto out;
304 }
305go_write:
306
307
308
309
310 down_read(&F2FS_I(inode)->i_sem);
311 cp_reason = need_do_checkpoint(inode);
312 up_read(&F2FS_I(inode)->i_sem);
313
314 if (cp_reason) {
315
316 ret = f2fs_sync_fs(inode->i_sb, 1);
317
318
319
320
321
322 try_to_fix_pino(inode);
323 clear_inode_flag(inode, FI_APPEND_WRITE);
324 clear_inode_flag(inode, FI_UPDATE_WRITE);
325 goto out;
326 }
327sync_nodes:
328 atomic_inc(&sbi->wb_sync_req[NODE]);
329 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
330 atomic_dec(&sbi->wb_sync_req[NODE]);
331 if (ret)
332 goto out;
333
334
335 if (unlikely(f2fs_cp_error(sbi))) {
336 ret = -EIO;
337 goto out;
338 }
339
340 if (f2fs_need_inode_block_update(sbi, ino)) {
341 f2fs_mark_inode_dirty_sync(inode, true);
342 f2fs_write_inode(inode, NULL);
343 goto sync_nodes;
344 }
345
346
347
348
349
350
351
352
353
354 if (!atomic) {
355 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
356 if (ret)
357 goto out;
358 }
359
360
361 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
362 clear_inode_flag(inode, FI_APPEND_WRITE);
363flush_out:
364 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
365 ret = f2fs_issue_flush(sbi, inode->i_ino);
366 if (!ret) {
367 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
368 clear_inode_flag(inode, FI_UPDATE_WRITE);
369 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
370 }
371 f2fs_update_time(sbi, REQ_TIME);
372out:
373 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
374 return ret;
375}
376
377int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
378{
379 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
380 return -EIO;
381 return f2fs_do_sync_file(file, start, end, datasync, false);
382}
383
384static bool __found_offset(struct address_space *mapping, block_t blkaddr,
385 pgoff_t index, int whence)
386{
387 switch (whence) {
388 case SEEK_DATA:
389 if (__is_valid_data_blkaddr(blkaddr))
390 return true;
391 if (blkaddr == NEW_ADDR &&
392 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
393 return true;
394 break;
395 case SEEK_HOLE:
396 if (blkaddr == NULL_ADDR)
397 return true;
398 break;
399 }
400 return false;
401}
402
403static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
404{
405 struct inode *inode = file->f_mapping->host;
406 loff_t maxbytes = inode->i_sb->s_maxbytes;
407 struct dnode_of_data dn;
408 pgoff_t pgofs, end_offset;
409 loff_t data_ofs = offset;
410 loff_t isize;
411 int err = 0;
412
413 inode_lock(inode);
414
415 isize = i_size_read(inode);
416 if (offset >= isize)
417 goto fail;
418
419
420 if (f2fs_has_inline_data(inode)) {
421 if (whence == SEEK_HOLE) {
422 data_ofs = isize;
423 goto found;
424 } else if (whence == SEEK_DATA) {
425 data_ofs = offset;
426 goto found;
427 }
428 }
429
430 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
431
432 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
433 set_new_dnode(&dn, inode, NULL, NULL, 0);
434 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
435 if (err && err != -ENOENT) {
436 goto fail;
437 } else if (err == -ENOENT) {
438
439 if (whence == SEEK_DATA) {
440 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
441 continue;
442 } else {
443 goto found;
444 }
445 }
446
447 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
448
449
450 for (; dn.ofs_in_node < end_offset;
451 dn.ofs_in_node++, pgofs++,
452 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
453 block_t blkaddr;
454
455 blkaddr = f2fs_data_blkaddr(&dn);
456
457 if (__is_valid_data_blkaddr(blkaddr) &&
458 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
459 blkaddr, DATA_GENERIC_ENHANCE)) {
460 f2fs_put_dnode(&dn);
461 goto fail;
462 }
463
464 if (__found_offset(file->f_mapping, blkaddr,
465 pgofs, whence)) {
466 f2fs_put_dnode(&dn);
467 goto found;
468 }
469 }
470 f2fs_put_dnode(&dn);
471 }
472
473 if (whence == SEEK_DATA)
474 goto fail;
475found:
476 if (whence == SEEK_HOLE && data_ofs > isize)
477 data_ofs = isize;
478 inode_unlock(inode);
479 return vfs_setpos(file, data_ofs, maxbytes);
480fail:
481 inode_unlock(inode);
482 return -ENXIO;
483}
484
485static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
486{
487 struct inode *inode = file->f_mapping->host;
488 loff_t maxbytes = inode->i_sb->s_maxbytes;
489
490 if (f2fs_compressed_file(inode))
491 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
492
493 switch (whence) {
494 case SEEK_SET:
495 case SEEK_CUR:
496 case SEEK_END:
497 return generic_file_llseek_size(file, offset, whence,
498 maxbytes, i_size_read(inode));
499 case SEEK_DATA:
500 case SEEK_HOLE:
501 if (offset < 0)
502 return -ENXIO;
503 return f2fs_seek_block(file, offset, whence);
504 }
505
506 return -EINVAL;
507}
508
509static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
510{
511 struct inode *inode = file_inode(file);
512
513 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
514 return -EIO;
515
516 if (!f2fs_is_compress_backend_ready(inode))
517 return -EOPNOTSUPP;
518
519 file_accessed(file);
520 vma->vm_ops = &f2fs_file_vm_ops;
521 set_inode_flag(inode, FI_MMAP_FILE);
522 return 0;
523}
524
525static int f2fs_file_open(struct inode *inode, struct file *filp)
526{
527 int err = fscrypt_file_open(inode, filp);
528
529 if (err)
530 return err;
531
532 if (!f2fs_is_compress_backend_ready(inode))
533 return -EOPNOTSUPP;
534
535 err = fsverity_file_open(inode, filp);
536 if (err)
537 return err;
538
539 filp->f_mode |= FMODE_NOWAIT;
540
541 return dquot_file_open(inode, filp);
542}
543
544void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
545{
546 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
547 struct f2fs_node *raw_node;
548 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
549 __le32 *addr;
550 int base = 0;
551 bool compressed_cluster = false;
552 int cluster_index = 0, valid_blocks = 0;
553 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
554 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
555
556 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
557 base = get_extra_isize(dn->inode);
558
559 raw_node = F2FS_NODE(dn->node_page);
560 addr = blkaddr_in_node(raw_node) + base + ofs;
561
562
563 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
564 block_t blkaddr = le32_to_cpu(*addr);
565
566 if (f2fs_compressed_file(dn->inode) &&
567 !(cluster_index & (cluster_size - 1))) {
568 if (compressed_cluster)
569 f2fs_i_compr_blocks_update(dn->inode,
570 valid_blocks, false);
571 compressed_cluster = (blkaddr == COMPRESS_ADDR);
572 valid_blocks = 0;
573 }
574
575 if (blkaddr == NULL_ADDR)
576 continue;
577
578 dn->data_blkaddr = NULL_ADDR;
579 f2fs_set_data_blkaddr(dn);
580
581 if (__is_valid_data_blkaddr(blkaddr)) {
582 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
583 DATA_GENERIC_ENHANCE))
584 continue;
585 if (compressed_cluster)
586 valid_blocks++;
587 }
588
589 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
590 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
591
592 f2fs_invalidate_blocks(sbi, blkaddr);
593
594 if (!released || blkaddr != COMPRESS_ADDR)
595 nr_free++;
596 }
597
598 if (compressed_cluster)
599 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
600
601 if (nr_free) {
602 pgoff_t fofs;
603
604
605
606
607 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
608 dn->inode) + ofs;
609 f2fs_update_extent_cache_range(dn, fofs, 0, len);
610 dec_valid_block_count(sbi, dn->inode, nr_free);
611 }
612 dn->ofs_in_node = ofs;
613
614 f2fs_update_time(sbi, REQ_TIME);
615 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
616 dn->ofs_in_node, nr_free);
617}
618
619void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
620{
621 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
622}
623
624static int truncate_partial_data_page(struct inode *inode, u64 from,
625 bool cache_only)
626{
627 loff_t offset = from & (PAGE_SIZE - 1);
628 pgoff_t index = from >> PAGE_SHIFT;
629 struct address_space *mapping = inode->i_mapping;
630 struct page *page;
631
632 if (!offset && !cache_only)
633 return 0;
634
635 if (cache_only) {
636 page = find_lock_page(mapping, index);
637 if (page && PageUptodate(page))
638 goto truncate_out;
639 f2fs_put_page(page, 1);
640 return 0;
641 }
642
643 page = f2fs_get_lock_data_page(inode, index, true);
644 if (IS_ERR(page))
645 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
646truncate_out:
647 f2fs_wait_on_page_writeback(page, DATA, true, true);
648 zero_user(page, offset, PAGE_SIZE - offset);
649
650
651 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
652 if (!cache_only)
653 set_page_dirty(page);
654 f2fs_put_page(page, 1);
655 return 0;
656}
657
658int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
659{
660 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
661 struct dnode_of_data dn;
662 pgoff_t free_from;
663 int count = 0, err = 0;
664 struct page *ipage;
665 bool truncate_page = false;
666
667 trace_f2fs_truncate_blocks_enter(inode, from);
668
669 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
670
671 if (free_from >= max_file_blocks(inode))
672 goto free_partial;
673
674 if (lock)
675 f2fs_lock_op(sbi);
676
677 ipage = f2fs_get_node_page(sbi, inode->i_ino);
678 if (IS_ERR(ipage)) {
679 err = PTR_ERR(ipage);
680 goto out;
681 }
682
683 if (f2fs_has_inline_data(inode)) {
684 f2fs_truncate_inline_inode(inode, ipage, from);
685 f2fs_put_page(ipage, 1);
686 truncate_page = true;
687 goto out;
688 }
689
690 set_new_dnode(&dn, inode, ipage, NULL, 0);
691 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
692 if (err) {
693 if (err == -ENOENT)
694 goto free_next;
695 goto out;
696 }
697
698 count = ADDRS_PER_PAGE(dn.node_page, inode);
699
700 count -= dn.ofs_in_node;
701 f2fs_bug_on(sbi, count < 0);
702
703 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
704 f2fs_truncate_data_blocks_range(&dn, count);
705 free_from += count;
706 }
707
708 f2fs_put_dnode(&dn);
709free_next:
710 err = f2fs_truncate_inode_blocks(inode, free_from);
711out:
712 if (lock)
713 f2fs_unlock_op(sbi);
714free_partial:
715
716 if (!err)
717 err = truncate_partial_data_page(inode, from, truncate_page);
718
719 trace_f2fs_truncate_blocks_exit(inode, err);
720 return err;
721}
722
723int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
724{
725 u64 free_from = from;
726 int err;
727
728#ifdef CONFIG_F2FS_FS_COMPRESSION
729
730
731
732
733 if (f2fs_compressed_file(inode))
734 free_from = round_up(from,
735 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
736#endif
737
738 err = f2fs_do_truncate_blocks(inode, free_from, lock);
739 if (err)
740 return err;
741
742#ifdef CONFIG_F2FS_FS_COMPRESSION
743 if (from != free_from) {
744 err = f2fs_truncate_partial_cluster(inode, from, lock);
745 if (err)
746 return err;
747 }
748#endif
749
750 return 0;
751}
752
753int f2fs_truncate(struct inode *inode)
754{
755 int err;
756
757 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
758 return -EIO;
759
760 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
761 S_ISLNK(inode->i_mode)))
762 return 0;
763
764 trace_f2fs_truncate(inode);
765
766 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
767 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
768 return -EIO;
769 }
770
771 err = dquot_initialize(inode);
772 if (err)
773 return err;
774
775
776 if (!f2fs_may_inline_data(inode)) {
777 err = f2fs_convert_inline_inode(inode);
778 if (err)
779 return err;
780 }
781
782 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
783 if (err)
784 return err;
785
786 inode->i_mtime = inode->i_ctime = current_time(inode);
787 f2fs_mark_inode_dirty_sync(inode, false);
788 return 0;
789}
790
791int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
792 struct kstat *stat, u32 request_mask, unsigned int query_flags)
793{
794 struct inode *inode = d_inode(path->dentry);
795 struct f2fs_inode_info *fi = F2FS_I(inode);
796 struct f2fs_inode *ri;
797 unsigned int flags;
798
799 if (f2fs_has_extra_attr(inode) &&
800 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
801 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
802 stat->result_mask |= STATX_BTIME;
803 stat->btime.tv_sec = fi->i_crtime.tv_sec;
804 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
805 }
806
807 flags = fi->i_flags;
808 if (flags & F2FS_COMPR_FL)
809 stat->attributes |= STATX_ATTR_COMPRESSED;
810 if (flags & F2FS_APPEND_FL)
811 stat->attributes |= STATX_ATTR_APPEND;
812 if (IS_ENCRYPTED(inode))
813 stat->attributes |= STATX_ATTR_ENCRYPTED;
814 if (flags & F2FS_IMMUTABLE_FL)
815 stat->attributes |= STATX_ATTR_IMMUTABLE;
816 if (flags & F2FS_NODUMP_FL)
817 stat->attributes |= STATX_ATTR_NODUMP;
818 if (IS_VERITY(inode))
819 stat->attributes |= STATX_ATTR_VERITY;
820
821 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
822 STATX_ATTR_APPEND |
823 STATX_ATTR_ENCRYPTED |
824 STATX_ATTR_IMMUTABLE |
825 STATX_ATTR_NODUMP |
826 STATX_ATTR_VERITY);
827
828 generic_fillattr(&init_user_ns, inode, stat);
829
830
831 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
832 f2fs_has_inline_dentry(inode))
833 stat->blocks += (stat->size + 511) >> 9;
834
835 return 0;
836}
837
838#ifdef CONFIG_F2FS_FS_POSIX_ACL
839static void __setattr_copy(struct user_namespace *mnt_userns,
840 struct inode *inode, const struct iattr *attr)
841{
842 unsigned int ia_valid = attr->ia_valid;
843
844 if (ia_valid & ATTR_UID)
845 inode->i_uid = attr->ia_uid;
846 if (ia_valid & ATTR_GID)
847 inode->i_gid = attr->ia_gid;
848 if (ia_valid & ATTR_ATIME)
849 inode->i_atime = attr->ia_atime;
850 if (ia_valid & ATTR_MTIME)
851 inode->i_mtime = attr->ia_mtime;
852 if (ia_valid & ATTR_CTIME)
853 inode->i_ctime = attr->ia_ctime;
854 if (ia_valid & ATTR_MODE) {
855 umode_t mode = attr->ia_mode;
856 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
857
858 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
859 mode &= ~S_ISGID;
860 set_acl_inode(inode, mode);
861 }
862}
863#else
864#define __setattr_copy setattr_copy
865#endif
866
867int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
868 struct iattr *attr)
869{
870 struct inode *inode = d_inode(dentry);
871 int err;
872
873 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
874 return -EIO;
875
876 if (unlikely(IS_IMMUTABLE(inode)))
877 return -EPERM;
878
879 if (unlikely(IS_APPEND(inode) &&
880 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
881 ATTR_GID | ATTR_TIMES_SET))))
882 return -EPERM;
883
884 if ((attr->ia_valid & ATTR_SIZE) &&
885 !f2fs_is_compress_backend_ready(inode))
886 return -EOPNOTSUPP;
887
888 err = setattr_prepare(&init_user_ns, dentry, attr);
889 if (err)
890 return err;
891
892 err = fscrypt_prepare_setattr(dentry, attr);
893 if (err)
894 return err;
895
896 err = fsverity_prepare_setattr(dentry, attr);
897 if (err)
898 return err;
899
900 if (is_quota_modification(inode, attr)) {
901 err = dquot_initialize(inode);
902 if (err)
903 return err;
904 }
905 if ((attr->ia_valid & ATTR_UID &&
906 !uid_eq(attr->ia_uid, inode->i_uid)) ||
907 (attr->ia_valid & ATTR_GID &&
908 !gid_eq(attr->ia_gid, inode->i_gid))) {
909 f2fs_lock_op(F2FS_I_SB(inode));
910 err = dquot_transfer(inode, attr);
911 if (err) {
912 set_sbi_flag(F2FS_I_SB(inode),
913 SBI_QUOTA_NEED_REPAIR);
914 f2fs_unlock_op(F2FS_I_SB(inode));
915 return err;
916 }
917
918
919
920
921 if (attr->ia_valid & ATTR_UID)
922 inode->i_uid = attr->ia_uid;
923 if (attr->ia_valid & ATTR_GID)
924 inode->i_gid = attr->ia_gid;
925 f2fs_mark_inode_dirty_sync(inode, true);
926 f2fs_unlock_op(F2FS_I_SB(inode));
927 }
928
929 if (attr->ia_valid & ATTR_SIZE) {
930 loff_t old_size = i_size_read(inode);
931
932 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
933
934
935
936
937 err = f2fs_convert_inline_inode(inode);
938 if (err)
939 return err;
940 }
941
942 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
943 down_write(&F2FS_I(inode)->i_mmap_sem);
944
945 truncate_setsize(inode, attr->ia_size);
946
947 if (attr->ia_size <= old_size)
948 err = f2fs_truncate(inode);
949
950
951
952
953 up_write(&F2FS_I(inode)->i_mmap_sem);
954 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
955 if (err)
956 return err;
957
958 spin_lock(&F2FS_I(inode)->i_size_lock);
959 inode->i_mtime = inode->i_ctime = current_time(inode);
960 F2FS_I(inode)->last_disk_size = i_size_read(inode);
961 spin_unlock(&F2FS_I(inode)->i_size_lock);
962 }
963
964 __setattr_copy(&init_user_ns, inode, attr);
965
966 if (attr->ia_valid & ATTR_MODE) {
967 err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
968
969 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
970 if (!err)
971 inode->i_mode = F2FS_I(inode)->i_acl_mode;
972 clear_inode_flag(inode, FI_ACL_MODE);
973 }
974 }
975
976
977 f2fs_mark_inode_dirty_sync(inode, true);
978
979
980 f2fs_balance_fs(F2FS_I_SB(inode), true);
981
982 return err;
983}
984
985const struct inode_operations f2fs_file_inode_operations = {
986 .getattr = f2fs_getattr,
987 .setattr = f2fs_setattr,
988 .get_acl = f2fs_get_acl,
989 .set_acl = f2fs_set_acl,
990 .listxattr = f2fs_listxattr,
991 .fiemap = f2fs_fiemap,
992 .fileattr_get = f2fs_fileattr_get,
993 .fileattr_set = f2fs_fileattr_set,
994};
995
996static int fill_zero(struct inode *inode, pgoff_t index,
997 loff_t start, loff_t len)
998{
999 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1000 struct page *page;
1001
1002 if (!len)
1003 return 0;
1004
1005 f2fs_balance_fs(sbi, true);
1006
1007 f2fs_lock_op(sbi);
1008 page = f2fs_get_new_data_page(inode, NULL, index, false);
1009 f2fs_unlock_op(sbi);
1010
1011 if (IS_ERR(page))
1012 return PTR_ERR(page);
1013
1014 f2fs_wait_on_page_writeback(page, DATA, true, true);
1015 zero_user(page, start, len);
1016 set_page_dirty(page);
1017 f2fs_put_page(page, 1);
1018 return 0;
1019}
1020
1021int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1022{
1023 int err;
1024
1025 while (pg_start < pg_end) {
1026 struct dnode_of_data dn;
1027 pgoff_t end_offset, count;
1028
1029 set_new_dnode(&dn, inode, NULL, NULL, 0);
1030 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1031 if (err) {
1032 if (err == -ENOENT) {
1033 pg_start = f2fs_get_next_page_offset(&dn,
1034 pg_start);
1035 continue;
1036 }
1037 return err;
1038 }
1039
1040 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1041 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1042
1043 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1044
1045 f2fs_truncate_data_blocks_range(&dn, count);
1046 f2fs_put_dnode(&dn);
1047
1048 pg_start += count;
1049 }
1050 return 0;
1051}
1052
1053static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1054{
1055 pgoff_t pg_start, pg_end;
1056 loff_t off_start, off_end;
1057 int ret;
1058
1059 ret = f2fs_convert_inline_inode(inode);
1060 if (ret)
1061 return ret;
1062
1063 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1064 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1065
1066 off_start = offset & (PAGE_SIZE - 1);
1067 off_end = (offset + len) & (PAGE_SIZE - 1);
1068
1069 if (pg_start == pg_end) {
1070 ret = fill_zero(inode, pg_start, off_start,
1071 off_end - off_start);
1072 if (ret)
1073 return ret;
1074 } else {
1075 if (off_start) {
1076 ret = fill_zero(inode, pg_start++, off_start,
1077 PAGE_SIZE - off_start);
1078 if (ret)
1079 return ret;
1080 }
1081 if (off_end) {
1082 ret = fill_zero(inode, pg_end, 0, off_end);
1083 if (ret)
1084 return ret;
1085 }
1086
1087 if (pg_start < pg_end) {
1088 struct address_space *mapping = inode->i_mapping;
1089 loff_t blk_start, blk_end;
1090 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1091
1092 f2fs_balance_fs(sbi, true);
1093
1094 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1095 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1096
1097 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1098 down_write(&F2FS_I(inode)->i_mmap_sem);
1099
1100 truncate_inode_pages_range(mapping, blk_start,
1101 blk_end - 1);
1102
1103 f2fs_lock_op(sbi);
1104 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1105 f2fs_unlock_op(sbi);
1106
1107 up_write(&F2FS_I(inode)->i_mmap_sem);
1108 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1109 }
1110 }
1111
1112 return ret;
1113}
1114
1115static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1116 int *do_replace, pgoff_t off, pgoff_t len)
1117{
1118 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1119 struct dnode_of_data dn;
1120 int ret, done, i;
1121
1122next_dnode:
1123 set_new_dnode(&dn, inode, NULL, NULL, 0);
1124 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1125 if (ret && ret != -ENOENT) {
1126 return ret;
1127 } else if (ret == -ENOENT) {
1128 if (dn.max_level == 0)
1129 return -ENOENT;
1130 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1131 dn.ofs_in_node, len);
1132 blkaddr += done;
1133 do_replace += done;
1134 goto next;
1135 }
1136
1137 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1138 dn.ofs_in_node, len);
1139 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1140 *blkaddr = f2fs_data_blkaddr(&dn);
1141
1142 if (__is_valid_data_blkaddr(*blkaddr) &&
1143 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1144 DATA_GENERIC_ENHANCE)) {
1145 f2fs_put_dnode(&dn);
1146 return -EFSCORRUPTED;
1147 }
1148
1149 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1150
1151 if (f2fs_lfs_mode(sbi)) {
1152 f2fs_put_dnode(&dn);
1153 return -EOPNOTSUPP;
1154 }
1155
1156
1157 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1158 *do_replace = 1;
1159 }
1160 }
1161 f2fs_put_dnode(&dn);
1162next:
1163 len -= done;
1164 off += done;
1165 if (len)
1166 goto next_dnode;
1167 return 0;
1168}
1169
1170static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1171 int *do_replace, pgoff_t off, int len)
1172{
1173 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174 struct dnode_of_data dn;
1175 int ret, i;
1176
1177 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1178 if (*do_replace == 0)
1179 continue;
1180
1181 set_new_dnode(&dn, inode, NULL, NULL, 0);
1182 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1183 if (ret) {
1184 dec_valid_block_count(sbi, inode, 1);
1185 f2fs_invalidate_blocks(sbi, *blkaddr);
1186 } else {
1187 f2fs_update_data_blkaddr(&dn, *blkaddr);
1188 }
1189 f2fs_put_dnode(&dn);
1190 }
1191 return 0;
1192}
1193
1194static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1195 block_t *blkaddr, int *do_replace,
1196 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1197{
1198 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1199 pgoff_t i = 0;
1200 int ret;
1201
1202 while (i < len) {
1203 if (blkaddr[i] == NULL_ADDR && !full) {
1204 i++;
1205 continue;
1206 }
1207
1208 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1209 struct dnode_of_data dn;
1210 struct node_info ni;
1211 size_t new_size;
1212 pgoff_t ilen;
1213
1214 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1215 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1216 if (ret)
1217 return ret;
1218
1219 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1220 if (ret) {
1221 f2fs_put_dnode(&dn);
1222 return ret;
1223 }
1224
1225 ilen = min((pgoff_t)
1226 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1227 dn.ofs_in_node, len - i);
1228 do {
1229 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1230 f2fs_truncate_data_blocks_range(&dn, 1);
1231
1232 if (do_replace[i]) {
1233 f2fs_i_blocks_write(src_inode,
1234 1, false, false);
1235 f2fs_i_blocks_write(dst_inode,
1236 1, true, false);
1237 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1238 blkaddr[i], ni.version, true, false);
1239
1240 do_replace[i] = 0;
1241 }
1242 dn.ofs_in_node++;
1243 i++;
1244 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1245 if (dst_inode->i_size < new_size)
1246 f2fs_i_size_write(dst_inode, new_size);
1247 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1248
1249 f2fs_put_dnode(&dn);
1250 } else {
1251 struct page *psrc, *pdst;
1252
1253 psrc = f2fs_get_lock_data_page(src_inode,
1254 src + i, true);
1255 if (IS_ERR(psrc))
1256 return PTR_ERR(psrc);
1257 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1258 true);
1259 if (IS_ERR(pdst)) {
1260 f2fs_put_page(psrc, 1);
1261 return PTR_ERR(pdst);
1262 }
1263 f2fs_copy_page(psrc, pdst);
1264 set_page_dirty(pdst);
1265 f2fs_put_page(pdst, 1);
1266 f2fs_put_page(psrc, 1);
1267
1268 ret = f2fs_truncate_hole(src_inode,
1269 src + i, src + i + 1);
1270 if (ret)
1271 return ret;
1272 i++;
1273 }
1274 }
1275 return 0;
1276}
1277
1278static int __exchange_data_block(struct inode *src_inode,
1279 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1280 pgoff_t len, bool full)
1281{
1282 block_t *src_blkaddr;
1283 int *do_replace;
1284 pgoff_t olen;
1285 int ret;
1286
1287 while (len) {
1288 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1289
1290 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1291 array_size(olen, sizeof(block_t)),
1292 GFP_NOFS);
1293 if (!src_blkaddr)
1294 return -ENOMEM;
1295
1296 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1297 array_size(olen, sizeof(int)),
1298 GFP_NOFS);
1299 if (!do_replace) {
1300 kvfree(src_blkaddr);
1301 return -ENOMEM;
1302 }
1303
1304 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1305 do_replace, src, olen);
1306 if (ret)
1307 goto roll_back;
1308
1309 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1310 do_replace, src, dst, olen, full);
1311 if (ret)
1312 goto roll_back;
1313
1314 src += olen;
1315 dst += olen;
1316 len -= olen;
1317
1318 kvfree(src_blkaddr);
1319 kvfree(do_replace);
1320 }
1321 return 0;
1322
1323roll_back:
1324 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1325 kvfree(src_blkaddr);
1326 kvfree(do_replace);
1327 return ret;
1328}
1329
1330static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1331{
1332 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1333 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1334 pgoff_t start = offset >> PAGE_SHIFT;
1335 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1336 int ret;
1337
1338 f2fs_balance_fs(sbi, true);
1339
1340
1341 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1342 down_write(&F2FS_I(inode)->i_mmap_sem);
1343
1344 f2fs_lock_op(sbi);
1345 f2fs_drop_extent_tree(inode);
1346 truncate_pagecache(inode, offset);
1347 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1348 f2fs_unlock_op(sbi);
1349
1350 up_write(&F2FS_I(inode)->i_mmap_sem);
1351 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1352 return ret;
1353}
1354
1355static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1356{
1357 loff_t new_size;
1358 int ret;
1359
1360 if (offset + len >= i_size_read(inode))
1361 return -EINVAL;
1362
1363
1364 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1365 return -EINVAL;
1366
1367 ret = f2fs_convert_inline_inode(inode);
1368 if (ret)
1369 return ret;
1370
1371
1372 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1373 if (ret)
1374 return ret;
1375
1376 ret = f2fs_do_collapse(inode, offset, len);
1377 if (ret)
1378 return ret;
1379
1380
1381 down_write(&F2FS_I(inode)->i_mmap_sem);
1382 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1383 truncate_pagecache(inode, offset);
1384
1385 new_size = i_size_read(inode) - len;
1386 ret = f2fs_truncate_blocks(inode, new_size, true);
1387 up_write(&F2FS_I(inode)->i_mmap_sem);
1388 if (!ret)
1389 f2fs_i_size_write(inode, new_size);
1390 return ret;
1391}
1392
1393static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1394 pgoff_t end)
1395{
1396 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1397 pgoff_t index = start;
1398 unsigned int ofs_in_node = dn->ofs_in_node;
1399 blkcnt_t count = 0;
1400 int ret;
1401
1402 for (; index < end; index++, dn->ofs_in_node++) {
1403 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1404 count++;
1405 }
1406
1407 dn->ofs_in_node = ofs_in_node;
1408 ret = f2fs_reserve_new_blocks(dn, count);
1409 if (ret)
1410 return ret;
1411
1412 dn->ofs_in_node = ofs_in_node;
1413 for (index = start; index < end; index++, dn->ofs_in_node++) {
1414 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1415
1416
1417
1418
1419 if (dn->data_blkaddr == NULL_ADDR) {
1420 ret = -ENOSPC;
1421 break;
1422 }
1423 if (dn->data_blkaddr != NEW_ADDR) {
1424 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1425 dn->data_blkaddr = NEW_ADDR;
1426 f2fs_set_data_blkaddr(dn);
1427 }
1428 }
1429
1430 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1431
1432 return ret;
1433}
1434
1435static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1436 int mode)
1437{
1438 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1439 struct address_space *mapping = inode->i_mapping;
1440 pgoff_t index, pg_start, pg_end;
1441 loff_t new_size = i_size_read(inode);
1442 loff_t off_start, off_end;
1443 int ret = 0;
1444
1445 ret = inode_newsize_ok(inode, (len + offset));
1446 if (ret)
1447 return ret;
1448
1449 ret = f2fs_convert_inline_inode(inode);
1450 if (ret)
1451 return ret;
1452
1453 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1454 if (ret)
1455 return ret;
1456
1457 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1458 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1459
1460 off_start = offset & (PAGE_SIZE - 1);
1461 off_end = (offset + len) & (PAGE_SIZE - 1);
1462
1463 if (pg_start == pg_end) {
1464 ret = fill_zero(inode, pg_start, off_start,
1465 off_end - off_start);
1466 if (ret)
1467 return ret;
1468
1469 new_size = max_t(loff_t, new_size, offset + len);
1470 } else {
1471 if (off_start) {
1472 ret = fill_zero(inode, pg_start++, off_start,
1473 PAGE_SIZE - off_start);
1474 if (ret)
1475 return ret;
1476
1477 new_size = max_t(loff_t, new_size,
1478 (loff_t)pg_start << PAGE_SHIFT);
1479 }
1480
1481 for (index = pg_start; index < pg_end;) {
1482 struct dnode_of_data dn;
1483 unsigned int end_offset;
1484 pgoff_t end;
1485
1486 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1487 down_write(&F2FS_I(inode)->i_mmap_sem);
1488
1489 truncate_pagecache_range(inode,
1490 (loff_t)index << PAGE_SHIFT,
1491 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1492
1493 f2fs_lock_op(sbi);
1494
1495 set_new_dnode(&dn, inode, NULL, NULL, 0);
1496 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1497 if (ret) {
1498 f2fs_unlock_op(sbi);
1499 up_write(&F2FS_I(inode)->i_mmap_sem);
1500 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1501 goto out;
1502 }
1503
1504 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1505 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1506
1507 ret = f2fs_do_zero_range(&dn, index, end);
1508 f2fs_put_dnode(&dn);
1509
1510 f2fs_unlock_op(sbi);
1511 up_write(&F2FS_I(inode)->i_mmap_sem);
1512 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1513
1514 f2fs_balance_fs(sbi, dn.node_changed);
1515
1516 if (ret)
1517 goto out;
1518
1519 index = end;
1520 new_size = max_t(loff_t, new_size,
1521 (loff_t)index << PAGE_SHIFT);
1522 }
1523
1524 if (off_end) {
1525 ret = fill_zero(inode, pg_end, 0, off_end);
1526 if (ret)
1527 goto out;
1528
1529 new_size = max_t(loff_t, new_size, offset + len);
1530 }
1531 }
1532
1533out:
1534 if (new_size > i_size_read(inode)) {
1535 if (mode & FALLOC_FL_KEEP_SIZE)
1536 file_set_keep_isize(inode);
1537 else
1538 f2fs_i_size_write(inode, new_size);
1539 }
1540 return ret;
1541}
1542
1543static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1544{
1545 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1546 pgoff_t nr, pg_start, pg_end, delta, idx;
1547 loff_t new_size;
1548 int ret = 0;
1549
1550 new_size = i_size_read(inode) + len;
1551 ret = inode_newsize_ok(inode, new_size);
1552 if (ret)
1553 return ret;
1554
1555 if (offset >= i_size_read(inode))
1556 return -EINVAL;
1557
1558
1559 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1560 return -EINVAL;
1561
1562 ret = f2fs_convert_inline_inode(inode);
1563 if (ret)
1564 return ret;
1565
1566 f2fs_balance_fs(sbi, true);
1567
1568 down_write(&F2FS_I(inode)->i_mmap_sem);
1569 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1570 up_write(&F2FS_I(inode)->i_mmap_sem);
1571 if (ret)
1572 return ret;
1573
1574
1575 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1576 if (ret)
1577 return ret;
1578
1579 pg_start = offset >> PAGE_SHIFT;
1580 pg_end = (offset + len) >> PAGE_SHIFT;
1581 delta = pg_end - pg_start;
1582 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1583
1584
1585 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1586 down_write(&F2FS_I(inode)->i_mmap_sem);
1587 truncate_pagecache(inode, offset);
1588
1589 while (!ret && idx > pg_start) {
1590 nr = idx - pg_start;
1591 if (nr > delta)
1592 nr = delta;
1593 idx -= nr;
1594
1595 f2fs_lock_op(sbi);
1596 f2fs_drop_extent_tree(inode);
1597
1598 ret = __exchange_data_block(inode, inode, idx,
1599 idx + delta, nr, false);
1600 f2fs_unlock_op(sbi);
1601 }
1602 up_write(&F2FS_I(inode)->i_mmap_sem);
1603 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1604
1605
1606 down_write(&F2FS_I(inode)->i_mmap_sem);
1607 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1608 truncate_pagecache(inode, offset);
1609 up_write(&F2FS_I(inode)->i_mmap_sem);
1610
1611 if (!ret)
1612 f2fs_i_size_write(inode, new_size);
1613 return ret;
1614}
1615
1616static int expand_inode_data(struct inode *inode, loff_t offset,
1617 loff_t len, int mode)
1618{
1619 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1620 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1621 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1622 .m_may_create = true };
1623 pgoff_t pg_start, pg_end;
1624 loff_t new_size = i_size_read(inode);
1625 loff_t off_end;
1626 block_t expanded = 0;
1627 int err;
1628
1629 err = inode_newsize_ok(inode, (len + offset));
1630 if (err)
1631 return err;
1632
1633 err = f2fs_convert_inline_inode(inode);
1634 if (err)
1635 return err;
1636
1637 f2fs_balance_fs(sbi, true);
1638
1639 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1640 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1641 off_end = (offset + len) & (PAGE_SIZE - 1);
1642
1643 map.m_lblk = pg_start;
1644 map.m_len = pg_end - pg_start;
1645 if (off_end)
1646 map.m_len++;
1647
1648 if (!map.m_len)
1649 return 0;
1650
1651 if (f2fs_is_pinned_file(inode)) {
1652 block_t sec_blks = BLKS_PER_SEC(sbi);
1653 block_t sec_len = roundup(map.m_len, sec_blks);
1654
1655 map.m_len = sec_blks;
1656next_alloc:
1657 if (has_not_enough_free_secs(sbi, 0,
1658 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1659 down_write(&sbi->gc_lock);
1660 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1661 if (err && err != -ENODATA && err != -EAGAIN)
1662 goto out_err;
1663 }
1664
1665 down_write(&sbi->pin_sem);
1666
1667 f2fs_lock_op(sbi);
1668 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1669 f2fs_unlock_op(sbi);
1670
1671 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1672 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1673
1674 up_write(&sbi->pin_sem);
1675
1676 expanded += map.m_len;
1677 sec_len -= map.m_len;
1678 map.m_lblk += map.m_len;
1679 if (!err && sec_len)
1680 goto next_alloc;
1681
1682 map.m_len = expanded;
1683 } else {
1684 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1685 expanded = map.m_len;
1686 }
1687out_err:
1688 if (err) {
1689 pgoff_t last_off;
1690
1691 if (!expanded)
1692 return err;
1693
1694 last_off = pg_start + expanded - 1;
1695
1696
1697 new_size = (last_off == pg_end) ? offset + len :
1698 (loff_t)(last_off + 1) << PAGE_SHIFT;
1699 } else {
1700 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1701 }
1702
1703 if (new_size > i_size_read(inode)) {
1704 if (mode & FALLOC_FL_KEEP_SIZE)
1705 file_set_keep_isize(inode);
1706 else
1707 f2fs_i_size_write(inode, new_size);
1708 }
1709
1710 return err;
1711}
1712
1713static long f2fs_fallocate(struct file *file, int mode,
1714 loff_t offset, loff_t len)
1715{
1716 struct inode *inode = file_inode(file);
1717 long ret = 0;
1718
1719 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1720 return -EIO;
1721 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1722 return -ENOSPC;
1723 if (!f2fs_is_compress_backend_ready(inode))
1724 return -EOPNOTSUPP;
1725
1726
1727 if (!S_ISREG(inode->i_mode))
1728 return -EINVAL;
1729
1730 if (IS_ENCRYPTED(inode) &&
1731 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1732 return -EOPNOTSUPP;
1733
1734 if (f2fs_compressed_file(inode) &&
1735 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1736 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1737 return -EOPNOTSUPP;
1738
1739 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1740 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1741 FALLOC_FL_INSERT_RANGE))
1742 return -EOPNOTSUPP;
1743
1744 inode_lock(inode);
1745
1746 if (mode & FALLOC_FL_PUNCH_HOLE) {
1747 if (offset >= inode->i_size)
1748 goto out;
1749
1750 ret = punch_hole(inode, offset, len);
1751 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1752 ret = f2fs_collapse_range(inode, offset, len);
1753 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1754 ret = f2fs_zero_range(inode, offset, len, mode);
1755 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1756 ret = f2fs_insert_range(inode, offset, len);
1757 } else {
1758 ret = expand_inode_data(inode, offset, len, mode);
1759 }
1760
1761 if (!ret) {
1762 inode->i_mtime = inode->i_ctime = current_time(inode);
1763 f2fs_mark_inode_dirty_sync(inode, false);
1764 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1765 }
1766
1767out:
1768 inode_unlock(inode);
1769
1770 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1771 return ret;
1772}
1773
1774static int f2fs_release_file(struct inode *inode, struct file *filp)
1775{
1776
1777
1778
1779
1780 if (!(filp->f_mode & FMODE_WRITE) ||
1781 atomic_read(&inode->i_writecount) != 1)
1782 return 0;
1783
1784
1785 if (f2fs_is_atomic_file(inode))
1786 f2fs_drop_inmem_pages(inode);
1787 if (f2fs_is_volatile_file(inode)) {
1788 set_inode_flag(inode, FI_DROP_CACHE);
1789 filemap_fdatawrite(inode->i_mapping);
1790 clear_inode_flag(inode, FI_DROP_CACHE);
1791 clear_inode_flag(inode, FI_VOLATILE_FILE);
1792 stat_dec_volatile_write(inode);
1793 }
1794 return 0;
1795}
1796
1797static int f2fs_file_flush(struct file *file, fl_owner_t id)
1798{
1799 struct inode *inode = file_inode(file);
1800
1801
1802
1803
1804
1805
1806
1807 if (f2fs_is_atomic_file(inode) &&
1808 F2FS_I(inode)->inmem_task == current)
1809 f2fs_drop_inmem_pages(inode);
1810 return 0;
1811}
1812
1813static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1814{
1815 struct f2fs_inode_info *fi = F2FS_I(inode);
1816 u32 masked_flags = fi->i_flags & mask;
1817
1818
1819 iflags &= mask;
1820
1821
1822 if (IS_NOQUOTA(inode))
1823 return -EPERM;
1824
1825 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1826 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1827 return -EOPNOTSUPP;
1828 if (!f2fs_empty_dir(inode))
1829 return -ENOTEMPTY;
1830 }
1831
1832 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1833 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1834 return -EOPNOTSUPP;
1835 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1836 return -EINVAL;
1837 }
1838
1839 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1840 if (masked_flags & F2FS_COMPR_FL) {
1841 if (!f2fs_disable_compressed_file(inode))
1842 return -EINVAL;
1843 }
1844 if (iflags & F2FS_NOCOMP_FL)
1845 return -EINVAL;
1846 if (iflags & F2FS_COMPR_FL) {
1847 if (!f2fs_may_compress(inode))
1848 return -EINVAL;
1849 if (S_ISREG(inode->i_mode) && inode->i_size)
1850 return -EINVAL;
1851
1852 set_compress_context(inode);
1853 }
1854 }
1855 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1856 if (masked_flags & F2FS_COMPR_FL)
1857 return -EINVAL;
1858 }
1859
1860 fi->i_flags = iflags | (fi->i_flags & ~mask);
1861 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1862 (fi->i_flags & F2FS_NOCOMP_FL));
1863
1864 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1865 set_inode_flag(inode, FI_PROJ_INHERIT);
1866 else
1867 clear_inode_flag(inode, FI_PROJ_INHERIT);
1868
1869 inode->i_ctime = current_time(inode);
1870 f2fs_set_inode_flags(inode);
1871 f2fs_mark_inode_dirty_sync(inode, true);
1872 return 0;
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887static const struct {
1888 u32 iflag;
1889 u32 fsflag;
1890} f2fs_fsflags_map[] = {
1891 { F2FS_COMPR_FL, FS_COMPR_FL },
1892 { F2FS_SYNC_FL, FS_SYNC_FL },
1893 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1894 { F2FS_APPEND_FL, FS_APPEND_FL },
1895 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1896 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1897 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1898 { F2FS_INDEX_FL, FS_INDEX_FL },
1899 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1900 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1901 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1902};
1903
1904#define F2FS_GETTABLE_FS_FL ( \
1905 FS_COMPR_FL | \
1906 FS_SYNC_FL | \
1907 FS_IMMUTABLE_FL | \
1908 FS_APPEND_FL | \
1909 FS_NODUMP_FL | \
1910 FS_NOATIME_FL | \
1911 FS_NOCOMP_FL | \
1912 FS_INDEX_FL | \
1913 FS_DIRSYNC_FL | \
1914 FS_PROJINHERIT_FL | \
1915 FS_ENCRYPT_FL | \
1916 FS_INLINE_DATA_FL | \
1917 FS_NOCOW_FL | \
1918 FS_VERITY_FL | \
1919 FS_CASEFOLD_FL)
1920
1921#define F2FS_SETTABLE_FS_FL ( \
1922 FS_COMPR_FL | \
1923 FS_SYNC_FL | \
1924 FS_IMMUTABLE_FL | \
1925 FS_APPEND_FL | \
1926 FS_NODUMP_FL | \
1927 FS_NOATIME_FL | \
1928 FS_NOCOMP_FL | \
1929 FS_DIRSYNC_FL | \
1930 FS_PROJINHERIT_FL | \
1931 FS_CASEFOLD_FL)
1932
1933
1934static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1935{
1936 u32 fsflags = 0;
1937 int i;
1938
1939 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1940 if (iflags & f2fs_fsflags_map[i].iflag)
1941 fsflags |= f2fs_fsflags_map[i].fsflag;
1942
1943 return fsflags;
1944}
1945
1946
1947static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1948{
1949 u32 iflags = 0;
1950 int i;
1951
1952 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1953 if (fsflags & f2fs_fsflags_map[i].fsflag)
1954 iflags |= f2fs_fsflags_map[i].iflag;
1955
1956 return iflags;
1957}
1958
1959static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1960{
1961 struct inode *inode = file_inode(filp);
1962
1963 return put_user(inode->i_generation, (int __user *)arg);
1964}
1965
1966static int f2fs_ioc_start_atomic_write(struct file *filp)
1967{
1968 struct inode *inode = file_inode(filp);
1969 struct f2fs_inode_info *fi = F2FS_I(inode);
1970 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1971 int ret;
1972
1973 if (!inode_owner_or_capable(&init_user_ns, inode))
1974 return -EACCES;
1975
1976 if (!S_ISREG(inode->i_mode))
1977 return -EINVAL;
1978
1979 if (filp->f_flags & O_DIRECT)
1980 return -EINVAL;
1981
1982 ret = mnt_want_write_file(filp);
1983 if (ret)
1984 return ret;
1985
1986 inode_lock(inode);
1987
1988 f2fs_disable_compressed_file(inode);
1989
1990 if (f2fs_is_atomic_file(inode)) {
1991 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1992 ret = -EINVAL;
1993 goto out;
1994 }
1995
1996 ret = f2fs_convert_inline_inode(inode);
1997 if (ret)
1998 goto out;
1999
2000 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2001
2002
2003
2004
2005
2006 if (get_dirty_pages(inode))
2007 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2008 inode->i_ino, get_dirty_pages(inode));
2009 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2010 if (ret) {
2011 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2012 goto out;
2013 }
2014
2015 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2016 if (list_empty(&fi->inmem_ilist))
2017 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2018 sbi->atomic_files++;
2019 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2020
2021
2022 set_inode_flag(inode, FI_ATOMIC_FILE);
2023 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2024 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2025
2026 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2027 F2FS_I(inode)->inmem_task = current;
2028 stat_update_max_atomic_write(inode);
2029out:
2030 inode_unlock(inode);
2031 mnt_drop_write_file(filp);
2032 return ret;
2033}
2034
2035static int f2fs_ioc_commit_atomic_write(struct file *filp)
2036{
2037 struct inode *inode = file_inode(filp);
2038 int ret;
2039
2040 if (!inode_owner_or_capable(&init_user_ns, inode))
2041 return -EACCES;
2042
2043 ret = mnt_want_write_file(filp);
2044 if (ret)
2045 return ret;
2046
2047 f2fs_balance_fs(F2FS_I_SB(inode), true);
2048
2049 inode_lock(inode);
2050
2051 if (f2fs_is_volatile_file(inode)) {
2052 ret = -EINVAL;
2053 goto err_out;
2054 }
2055
2056 if (f2fs_is_atomic_file(inode)) {
2057 ret = f2fs_commit_inmem_pages(inode);
2058 if (ret)
2059 goto err_out;
2060
2061 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2062 if (!ret)
2063 f2fs_drop_inmem_pages(inode);
2064 } else {
2065 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2066 }
2067err_out:
2068 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2069 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2070 ret = -EINVAL;
2071 }
2072 inode_unlock(inode);
2073 mnt_drop_write_file(filp);
2074 return ret;
2075}
2076
2077static int f2fs_ioc_start_volatile_write(struct file *filp)
2078{
2079 struct inode *inode = file_inode(filp);
2080 int ret;
2081
2082 if (!inode_owner_or_capable(&init_user_ns, inode))
2083 return -EACCES;
2084
2085 if (!S_ISREG(inode->i_mode))
2086 return -EINVAL;
2087
2088 ret = mnt_want_write_file(filp);
2089 if (ret)
2090 return ret;
2091
2092 inode_lock(inode);
2093
2094 if (f2fs_is_volatile_file(inode))
2095 goto out;
2096
2097 ret = f2fs_convert_inline_inode(inode);
2098 if (ret)
2099 goto out;
2100
2101 stat_inc_volatile_write(inode);
2102 stat_update_max_volatile_write(inode);
2103
2104 set_inode_flag(inode, FI_VOLATILE_FILE);
2105 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2106out:
2107 inode_unlock(inode);
2108 mnt_drop_write_file(filp);
2109 return ret;
2110}
2111
2112static int f2fs_ioc_release_volatile_write(struct file *filp)
2113{
2114 struct inode *inode = file_inode(filp);
2115 int ret;
2116
2117 if (!inode_owner_or_capable(&init_user_ns, inode))
2118 return -EACCES;
2119
2120 ret = mnt_want_write_file(filp);
2121 if (ret)
2122 return ret;
2123
2124 inode_lock(inode);
2125
2126 if (!f2fs_is_volatile_file(inode))
2127 goto out;
2128
2129 if (!f2fs_is_first_block_written(inode)) {
2130 ret = truncate_partial_data_page(inode, 0, true);
2131 goto out;
2132 }
2133
2134 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2135out:
2136 inode_unlock(inode);
2137 mnt_drop_write_file(filp);
2138 return ret;
2139}
2140
2141static int f2fs_ioc_abort_volatile_write(struct file *filp)
2142{
2143 struct inode *inode = file_inode(filp);
2144 int ret;
2145
2146 if (!inode_owner_or_capable(&init_user_ns, inode))
2147 return -EACCES;
2148
2149 ret = mnt_want_write_file(filp);
2150 if (ret)
2151 return ret;
2152
2153 inode_lock(inode);
2154
2155 if (f2fs_is_atomic_file(inode))
2156 f2fs_drop_inmem_pages(inode);
2157 if (f2fs_is_volatile_file(inode)) {
2158 clear_inode_flag(inode, FI_VOLATILE_FILE);
2159 stat_dec_volatile_write(inode);
2160 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2161 }
2162
2163 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2164
2165 inode_unlock(inode);
2166
2167 mnt_drop_write_file(filp);
2168 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2169 return ret;
2170}
2171
2172static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2173{
2174 struct inode *inode = file_inode(filp);
2175 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2176 struct super_block *sb = sbi->sb;
2177 __u32 in;
2178 int ret = 0;
2179
2180 if (!capable(CAP_SYS_ADMIN))
2181 return -EPERM;
2182
2183 if (get_user(in, (__u32 __user *)arg))
2184 return -EFAULT;
2185
2186 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2187 ret = mnt_want_write_file(filp);
2188 if (ret) {
2189 if (ret == -EROFS) {
2190 ret = 0;
2191 f2fs_stop_checkpoint(sbi, false);
2192 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2193 trace_f2fs_shutdown(sbi, in, ret);
2194 }
2195 return ret;
2196 }
2197 }
2198
2199 switch (in) {
2200 case F2FS_GOING_DOWN_FULLSYNC:
2201 ret = freeze_bdev(sb->s_bdev);
2202 if (ret)
2203 goto out;
2204 f2fs_stop_checkpoint(sbi, false);
2205 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2206 thaw_bdev(sb->s_bdev);
2207 break;
2208 case F2FS_GOING_DOWN_METASYNC:
2209
2210 ret = f2fs_sync_fs(sb, 1);
2211 if (ret)
2212 goto out;
2213 f2fs_stop_checkpoint(sbi, false);
2214 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2215 break;
2216 case F2FS_GOING_DOWN_NOSYNC:
2217 f2fs_stop_checkpoint(sbi, false);
2218 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2219 break;
2220 case F2FS_GOING_DOWN_METAFLUSH:
2221 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2222 f2fs_stop_checkpoint(sbi, false);
2223 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2224 break;
2225 case F2FS_GOING_DOWN_NEED_FSCK:
2226 set_sbi_flag(sbi, SBI_NEED_FSCK);
2227 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2228 set_sbi_flag(sbi, SBI_IS_DIRTY);
2229
2230 ret = f2fs_sync_fs(sb, 1);
2231 goto out;
2232 default:
2233 ret = -EINVAL;
2234 goto out;
2235 }
2236
2237 f2fs_stop_gc_thread(sbi);
2238 f2fs_stop_discard_thread(sbi);
2239
2240 f2fs_drop_discard_cmd(sbi);
2241 clear_opt(sbi, DISCARD);
2242
2243 f2fs_update_time(sbi, REQ_TIME);
2244out:
2245 if (in != F2FS_GOING_DOWN_FULLSYNC)
2246 mnt_drop_write_file(filp);
2247
2248 trace_f2fs_shutdown(sbi, in, ret);
2249
2250 return ret;
2251}
2252
2253static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2254{
2255 struct inode *inode = file_inode(filp);
2256 struct super_block *sb = inode->i_sb;
2257 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2258 struct fstrim_range range;
2259 int ret;
2260
2261 if (!capable(CAP_SYS_ADMIN))
2262 return -EPERM;
2263
2264 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2265 return -EOPNOTSUPP;
2266
2267 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2268 sizeof(range)))
2269 return -EFAULT;
2270
2271 ret = mnt_want_write_file(filp);
2272 if (ret)
2273 return ret;
2274
2275 range.minlen = max((unsigned int)range.minlen,
2276 q->limits.discard_granularity);
2277 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2278 mnt_drop_write_file(filp);
2279 if (ret < 0)
2280 return ret;
2281
2282 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2283 sizeof(range)))
2284 return -EFAULT;
2285 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2286 return 0;
2287}
2288
2289static bool uuid_is_nonzero(__u8 u[16])
2290{
2291 int i;
2292
2293 for (i = 0; i < 16; i++)
2294 if (u[i])
2295 return true;
2296 return false;
2297}
2298
2299static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2300{
2301 struct inode *inode = file_inode(filp);
2302
2303 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2304 return -EOPNOTSUPP;
2305
2306 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2307
2308 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2309}
2310
2311static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2312{
2313 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2314 return -EOPNOTSUPP;
2315 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2316}
2317
2318static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2319{
2320 struct inode *inode = file_inode(filp);
2321 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2322 int err;
2323
2324 if (!f2fs_sb_has_encrypt(sbi))
2325 return -EOPNOTSUPP;
2326
2327 err = mnt_want_write_file(filp);
2328 if (err)
2329 return err;
2330
2331 down_write(&sbi->sb_lock);
2332
2333 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2334 goto got_it;
2335
2336
2337 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2338
2339 err = f2fs_commit_super(sbi, false);
2340 if (err) {
2341
2342 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2343 goto out_err;
2344 }
2345got_it:
2346 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2347 16))
2348 err = -EFAULT;
2349out_err:
2350 up_write(&sbi->sb_lock);
2351 mnt_drop_write_file(filp);
2352 return err;
2353}
2354
2355static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2356 unsigned long arg)
2357{
2358 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2359 return -EOPNOTSUPP;
2360
2361 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2362}
2363
2364static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2365{
2366 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2367 return -EOPNOTSUPP;
2368
2369 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2370}
2371
2372static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2373{
2374 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2375 return -EOPNOTSUPP;
2376
2377 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2378}
2379
2380static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2381 unsigned long arg)
2382{
2383 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2384 return -EOPNOTSUPP;
2385
2386 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2387}
2388
2389static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2390 unsigned long arg)
2391{
2392 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2393 return -EOPNOTSUPP;
2394
2395 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2396}
2397
2398static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2399{
2400 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2401 return -EOPNOTSUPP;
2402
2403 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2404}
2405
2406static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2407{
2408 struct inode *inode = file_inode(filp);
2409 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2410 __u32 sync;
2411 int ret;
2412
2413 if (!capable(CAP_SYS_ADMIN))
2414 return -EPERM;
2415
2416 if (get_user(sync, (__u32 __user *)arg))
2417 return -EFAULT;
2418
2419 if (f2fs_readonly(sbi->sb))
2420 return -EROFS;
2421
2422 ret = mnt_want_write_file(filp);
2423 if (ret)
2424 return ret;
2425
2426 if (!sync) {
2427 if (!down_write_trylock(&sbi->gc_lock)) {
2428 ret = -EBUSY;
2429 goto out;
2430 }
2431 } else {
2432 down_write(&sbi->gc_lock);
2433 }
2434
2435 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2436out:
2437 mnt_drop_write_file(filp);
2438 return ret;
2439}
2440
2441static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2442{
2443 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2444 u64 end;
2445 int ret;
2446
2447 if (!capable(CAP_SYS_ADMIN))
2448 return -EPERM;
2449 if (f2fs_readonly(sbi->sb))
2450 return -EROFS;
2451
2452 end = range->start + range->len;
2453 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2454 end >= MAX_BLKADDR(sbi))
2455 return -EINVAL;
2456
2457 ret = mnt_want_write_file(filp);
2458 if (ret)
2459 return ret;
2460
2461do_more:
2462 if (!range->sync) {
2463 if (!down_write_trylock(&sbi->gc_lock)) {
2464 ret = -EBUSY;
2465 goto out;
2466 }
2467 } else {
2468 down_write(&sbi->gc_lock);
2469 }
2470
2471 ret = f2fs_gc(sbi, range->sync, true, false,
2472 GET_SEGNO(sbi, range->start));
2473 if (ret) {
2474 if (ret == -EBUSY)
2475 ret = -EAGAIN;
2476 goto out;
2477 }
2478 range->start += BLKS_PER_SEC(sbi);
2479 if (range->start <= end)
2480 goto do_more;
2481out:
2482 mnt_drop_write_file(filp);
2483 return ret;
2484}
2485
2486static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2487{
2488 struct f2fs_gc_range range;
2489
2490 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2491 sizeof(range)))
2492 return -EFAULT;
2493 return __f2fs_ioc_gc_range(filp, &range);
2494}
2495
2496static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2497{
2498 struct inode *inode = file_inode(filp);
2499 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2500 int ret;
2501
2502 if (!capable(CAP_SYS_ADMIN))
2503 return -EPERM;
2504
2505 if (f2fs_readonly(sbi->sb))
2506 return -EROFS;
2507
2508 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2509 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2510 return -EINVAL;
2511 }
2512
2513 ret = mnt_want_write_file(filp);
2514 if (ret)
2515 return ret;
2516
2517 ret = f2fs_sync_fs(sbi->sb, 1);
2518
2519 mnt_drop_write_file(filp);
2520 return ret;
2521}
2522
2523static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2524 struct file *filp,
2525 struct f2fs_defragment *range)
2526{
2527 struct inode *inode = file_inode(filp);
2528 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2529 .m_seg_type = NO_CHECK_TYPE,
2530 .m_may_create = false };
2531 struct extent_info ei = {0, 0, 0};
2532 pgoff_t pg_start, pg_end, next_pgofs;
2533 unsigned int blk_per_seg = sbi->blocks_per_seg;
2534 unsigned int total = 0, sec_num;
2535 block_t blk_end = 0;
2536 bool fragmented = false;
2537 int err;
2538
2539
2540 if (f2fs_should_update_inplace(inode, NULL))
2541 return -EINVAL;
2542
2543 pg_start = range->start >> PAGE_SHIFT;
2544 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2545
2546 f2fs_balance_fs(sbi, true);
2547
2548 inode_lock(inode);
2549
2550
2551 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2552 range->start + range->len - 1);
2553 if (err)
2554 goto out;
2555
2556
2557
2558
2559
2560 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2561 if (ei.fofs + ei.len >= pg_end)
2562 goto out;
2563 }
2564
2565 map.m_lblk = pg_start;
2566 map.m_next_pgofs = &next_pgofs;
2567
2568
2569
2570
2571
2572
2573 while (map.m_lblk < pg_end) {
2574 map.m_len = pg_end - map.m_lblk;
2575 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2576 if (err)
2577 goto out;
2578
2579 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2580 map.m_lblk = next_pgofs;
2581 continue;
2582 }
2583
2584 if (blk_end && blk_end != map.m_pblk)
2585 fragmented = true;
2586
2587
2588 total += map.m_len;
2589
2590 blk_end = map.m_pblk + map.m_len;
2591
2592 map.m_lblk += map.m_len;
2593 }
2594
2595 if (!fragmented) {
2596 total = 0;
2597 goto out;
2598 }
2599
2600 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2601
2602
2603
2604
2605
2606
2607 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2608 err = -EAGAIN;
2609 goto out;
2610 }
2611
2612 map.m_lblk = pg_start;
2613 map.m_len = pg_end - pg_start;
2614 total = 0;
2615
2616 while (map.m_lblk < pg_end) {
2617 pgoff_t idx;
2618 int cnt = 0;
2619
2620do_map:
2621 map.m_len = pg_end - map.m_lblk;
2622 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2623 if (err)
2624 goto clear_out;
2625
2626 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2627 map.m_lblk = next_pgofs;
2628 goto check;
2629 }
2630
2631 set_inode_flag(inode, FI_DO_DEFRAG);
2632
2633 idx = map.m_lblk;
2634 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2635 struct page *page;
2636
2637 page = f2fs_get_lock_data_page(inode, idx, true);
2638 if (IS_ERR(page)) {
2639 err = PTR_ERR(page);
2640 goto clear_out;
2641 }
2642
2643 set_page_dirty(page);
2644 f2fs_put_page(page, 1);
2645
2646 idx++;
2647 cnt++;
2648 total++;
2649 }
2650
2651 map.m_lblk = idx;
2652check:
2653 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2654 goto do_map;
2655
2656 clear_inode_flag(inode, FI_DO_DEFRAG);
2657
2658 err = filemap_fdatawrite(inode->i_mapping);
2659 if (err)
2660 goto out;
2661 }
2662clear_out:
2663 clear_inode_flag(inode, FI_DO_DEFRAG);
2664out:
2665 inode_unlock(inode);
2666 if (!err)
2667 range->len = (u64)total << PAGE_SHIFT;
2668 return err;
2669}
2670
2671static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2672{
2673 struct inode *inode = file_inode(filp);
2674 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2675 struct f2fs_defragment range;
2676 int err;
2677
2678 if (!capable(CAP_SYS_ADMIN))
2679 return -EPERM;
2680
2681 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2682 return -EINVAL;
2683
2684 if (f2fs_readonly(sbi->sb))
2685 return -EROFS;
2686
2687 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2688 sizeof(range)))
2689 return -EFAULT;
2690
2691
2692 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2693 return -EINVAL;
2694
2695 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2696 max_file_blocks(inode)))
2697 return -EINVAL;
2698
2699 err = mnt_want_write_file(filp);
2700 if (err)
2701 return err;
2702
2703 err = f2fs_defragment_range(sbi, filp, &range);
2704 mnt_drop_write_file(filp);
2705
2706 f2fs_update_time(sbi, REQ_TIME);
2707 if (err < 0)
2708 return err;
2709
2710 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2711 sizeof(range)))
2712 return -EFAULT;
2713
2714 return 0;
2715}
2716
2717static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2718 struct file *file_out, loff_t pos_out, size_t len)
2719{
2720 struct inode *src = file_inode(file_in);
2721 struct inode *dst = file_inode(file_out);
2722 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2723 size_t olen = len, dst_max_i_size = 0;
2724 size_t dst_osize;
2725 int ret;
2726
2727 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2728 src->i_sb != dst->i_sb)
2729 return -EXDEV;
2730
2731 if (unlikely(f2fs_readonly(src->i_sb)))
2732 return -EROFS;
2733
2734 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2735 return -EINVAL;
2736
2737 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2738 return -EOPNOTSUPP;
2739
2740 if (pos_out < 0 || pos_in < 0)
2741 return -EINVAL;
2742
2743 if (src == dst) {
2744 if (pos_in == pos_out)
2745 return 0;
2746 if (pos_out > pos_in && pos_out < pos_in + len)
2747 return -EINVAL;
2748 }
2749
2750 inode_lock(src);
2751 if (src != dst) {
2752 ret = -EBUSY;
2753 if (!inode_trylock(dst))
2754 goto out;
2755 }
2756
2757 ret = -EINVAL;
2758 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2759 goto out_unlock;
2760 if (len == 0)
2761 olen = len = src->i_size - pos_in;
2762 if (pos_in + len == src->i_size)
2763 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2764 if (len == 0) {
2765 ret = 0;
2766 goto out_unlock;
2767 }
2768
2769 dst_osize = dst->i_size;
2770 if (pos_out + olen > dst->i_size)
2771 dst_max_i_size = pos_out + olen;
2772
2773
2774 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2775 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2776 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2777 goto out_unlock;
2778
2779 ret = f2fs_convert_inline_inode(src);
2780 if (ret)
2781 goto out_unlock;
2782
2783 ret = f2fs_convert_inline_inode(dst);
2784 if (ret)
2785 goto out_unlock;
2786
2787
2788 ret = filemap_write_and_wait_range(src->i_mapping,
2789 pos_in, pos_in + len);
2790 if (ret)
2791 goto out_unlock;
2792
2793 ret = filemap_write_and_wait_range(dst->i_mapping,
2794 pos_out, pos_out + len);
2795 if (ret)
2796 goto out_unlock;
2797
2798 f2fs_balance_fs(sbi, true);
2799
2800 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2801 if (src != dst) {
2802 ret = -EBUSY;
2803 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2804 goto out_src;
2805 }
2806
2807 f2fs_lock_op(sbi);
2808 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2809 pos_out >> F2FS_BLKSIZE_BITS,
2810 len >> F2FS_BLKSIZE_BITS, false);
2811
2812 if (!ret) {
2813 if (dst_max_i_size)
2814 f2fs_i_size_write(dst, dst_max_i_size);
2815 else if (dst_osize != dst->i_size)
2816 f2fs_i_size_write(dst, dst_osize);
2817 }
2818 f2fs_unlock_op(sbi);
2819
2820 if (src != dst)
2821 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2822out_src:
2823 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2824out_unlock:
2825 if (src != dst)
2826 inode_unlock(dst);
2827out:
2828 inode_unlock(src);
2829 return ret;
2830}
2831
2832static int __f2fs_ioc_move_range(struct file *filp,
2833 struct f2fs_move_range *range)
2834{
2835 struct fd dst;
2836 int err;
2837
2838 if (!(filp->f_mode & FMODE_READ) ||
2839 !(filp->f_mode & FMODE_WRITE))
2840 return -EBADF;
2841
2842 dst = fdget(range->dst_fd);
2843 if (!dst.file)
2844 return -EBADF;
2845
2846 if (!(dst.file->f_mode & FMODE_WRITE)) {
2847 err = -EBADF;
2848 goto err_out;
2849 }
2850
2851 err = mnt_want_write_file(filp);
2852 if (err)
2853 goto err_out;
2854
2855 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2856 range->pos_out, range->len);
2857
2858 mnt_drop_write_file(filp);
2859err_out:
2860 fdput(dst);
2861 return err;
2862}
2863
2864static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2865{
2866 struct f2fs_move_range range;
2867
2868 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2869 sizeof(range)))
2870 return -EFAULT;
2871 return __f2fs_ioc_move_range(filp, &range);
2872}
2873
2874static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2875{
2876 struct inode *inode = file_inode(filp);
2877 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2878 struct sit_info *sm = SIT_I(sbi);
2879 unsigned int start_segno = 0, end_segno = 0;
2880 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2881 struct f2fs_flush_device range;
2882 int ret;
2883
2884 if (!capable(CAP_SYS_ADMIN))
2885 return -EPERM;
2886
2887 if (f2fs_readonly(sbi->sb))
2888 return -EROFS;
2889
2890 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2891 return -EINVAL;
2892
2893 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2894 sizeof(range)))
2895 return -EFAULT;
2896
2897 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2898 __is_large_section(sbi)) {
2899 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2900 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2901 return -EINVAL;
2902 }
2903
2904 ret = mnt_want_write_file(filp);
2905 if (ret)
2906 return ret;
2907
2908 if (range.dev_num != 0)
2909 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2910 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2911
2912 start_segno = sm->last_victim[FLUSH_DEVICE];
2913 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2914 start_segno = dev_start_segno;
2915 end_segno = min(start_segno + range.segments, dev_end_segno);
2916
2917 while (start_segno < end_segno) {
2918 if (!down_write_trylock(&sbi->gc_lock)) {
2919 ret = -EBUSY;
2920 goto out;
2921 }
2922 sm->last_victim[GC_CB] = end_segno + 1;
2923 sm->last_victim[GC_GREEDY] = end_segno + 1;
2924 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2925 ret = f2fs_gc(sbi, true, true, true, start_segno);
2926 if (ret == -EAGAIN)
2927 ret = 0;
2928 else if (ret < 0)
2929 break;
2930 start_segno++;
2931 }
2932out:
2933 mnt_drop_write_file(filp);
2934 return ret;
2935}
2936
2937static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2938{
2939 struct inode *inode = file_inode(filp);
2940 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2941
2942
2943 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2944
2945 return put_user(sb_feature, (u32 __user *)arg);
2946}
2947
2948#ifdef CONFIG_QUOTA
2949int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2950{
2951 struct dquot *transfer_to[MAXQUOTAS] = {};
2952 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2953 struct super_block *sb = sbi->sb;
2954 int err = 0;
2955
2956 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2957 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2958 err = __dquot_transfer(inode, transfer_to);
2959 if (err)
2960 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2961 dqput(transfer_to[PRJQUOTA]);
2962 }
2963 return err;
2964}
2965
2966static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2967{
2968 struct f2fs_inode_info *fi = F2FS_I(inode);
2969 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2970 struct page *ipage;
2971 kprojid_t kprojid;
2972 int err;
2973
2974 if (!f2fs_sb_has_project_quota(sbi)) {
2975 if (projid != F2FS_DEF_PROJID)
2976 return -EOPNOTSUPP;
2977 else
2978 return 0;
2979 }
2980
2981 if (!f2fs_has_extra_attr(inode))
2982 return -EOPNOTSUPP;
2983
2984 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2985
2986 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2987 return 0;
2988
2989 err = -EPERM;
2990
2991 if (IS_NOQUOTA(inode))
2992 return err;
2993
2994 ipage = f2fs_get_node_page(sbi, inode->i_ino);
2995 if (IS_ERR(ipage))
2996 return PTR_ERR(ipage);
2997
2998 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
2999 i_projid)) {
3000 err = -EOVERFLOW;
3001 f2fs_put_page(ipage, 1);
3002 return err;
3003 }
3004 f2fs_put_page(ipage, 1);
3005
3006 err = dquot_initialize(inode);
3007 if (err)
3008 return err;
3009
3010 f2fs_lock_op(sbi);
3011 err = f2fs_transfer_project_quota(inode, kprojid);
3012 if (err)
3013 goto out_unlock;
3014
3015 F2FS_I(inode)->i_projid = kprojid;
3016 inode->i_ctime = current_time(inode);
3017 f2fs_mark_inode_dirty_sync(inode, true);
3018out_unlock:
3019 f2fs_unlock_op(sbi);
3020 return err;
3021}
3022#else
3023int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3024{
3025 return 0;
3026}
3027
3028static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3029{
3030 if (projid != F2FS_DEF_PROJID)
3031 return -EOPNOTSUPP;
3032 return 0;
3033}
3034#endif
3035
3036int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3037{
3038 struct inode *inode = d_inode(dentry);
3039 struct f2fs_inode_info *fi = F2FS_I(inode);
3040 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3041
3042 if (IS_ENCRYPTED(inode))
3043 fsflags |= FS_ENCRYPT_FL;
3044 if (IS_VERITY(inode))
3045 fsflags |= FS_VERITY_FL;
3046 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3047 fsflags |= FS_INLINE_DATA_FL;
3048 if (is_inode_flag_set(inode, FI_PIN_FILE))
3049 fsflags |= FS_NOCOW_FL;
3050
3051 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3052
3053 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3054 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3055
3056 return 0;
3057}
3058
3059int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3060 struct dentry *dentry, struct fileattr *fa)
3061{
3062 struct inode *inode = d_inode(dentry);
3063 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3064 u32 iflags;
3065 int err;
3066
3067 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3068 return -EIO;
3069 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3070 return -ENOSPC;
3071 if (fsflags & ~F2FS_GETTABLE_FS_FL)
3072 return -EOPNOTSUPP;
3073 fsflags &= F2FS_SETTABLE_FS_FL;
3074 if (!fa->flags_valid)
3075 mask &= FS_COMMON_FL;
3076
3077 iflags = f2fs_fsflags_to_iflags(fsflags);
3078 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3079 return -EOPNOTSUPP;
3080
3081 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3082 if (!err)
3083 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3084
3085 return err;
3086}
3087
3088int f2fs_pin_file_control(struct inode *inode, bool inc)
3089{
3090 struct f2fs_inode_info *fi = F2FS_I(inode);
3091 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3092
3093
3094 if (inc)
3095 f2fs_i_gc_failures_write(inode,
3096 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3097
3098 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3099 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3100 __func__, inode->i_ino,
3101 fi->i_gc_failures[GC_FAILURE_PIN]);
3102 clear_inode_flag(inode, FI_PIN_FILE);
3103 return -EAGAIN;
3104 }
3105 return 0;
3106}
3107
3108static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3109{
3110 struct inode *inode = file_inode(filp);
3111 __u32 pin;
3112 int ret = 0;
3113
3114 if (get_user(pin, (__u32 __user *)arg))
3115 return -EFAULT;
3116
3117 if (!S_ISREG(inode->i_mode))
3118 return -EINVAL;
3119
3120 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3121 return -EROFS;
3122
3123 ret = mnt_want_write_file(filp);
3124 if (ret)
3125 return ret;
3126
3127 inode_lock(inode);
3128
3129 if (f2fs_should_update_outplace(inode, NULL)) {
3130 ret = -EINVAL;
3131 goto out;
3132 }
3133
3134 if (!pin) {
3135 clear_inode_flag(inode, FI_PIN_FILE);
3136 f2fs_i_gc_failures_write(inode, 0);
3137 goto done;
3138 }
3139
3140 if (f2fs_pin_file_control(inode, false)) {
3141 ret = -EAGAIN;
3142 goto out;
3143 }
3144
3145 ret = f2fs_convert_inline_inode(inode);
3146 if (ret)
3147 goto out;
3148
3149 if (!f2fs_disable_compressed_file(inode)) {
3150 ret = -EOPNOTSUPP;
3151 goto out;
3152 }
3153
3154 set_inode_flag(inode, FI_PIN_FILE);
3155 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3156done:
3157 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3158out:
3159 inode_unlock(inode);
3160 mnt_drop_write_file(filp);
3161 return ret;
3162}
3163
3164static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3165{
3166 struct inode *inode = file_inode(filp);
3167 __u32 pin = 0;
3168
3169 if (is_inode_flag_set(inode, FI_PIN_FILE))
3170 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3171 return put_user(pin, (u32 __user *)arg);
3172}
3173
3174int f2fs_precache_extents(struct inode *inode)
3175{
3176 struct f2fs_inode_info *fi = F2FS_I(inode);
3177 struct f2fs_map_blocks map;
3178 pgoff_t m_next_extent;
3179 loff_t end;
3180 int err;
3181
3182 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3183 return -EOPNOTSUPP;
3184
3185 map.m_lblk = 0;
3186 map.m_next_pgofs = NULL;
3187 map.m_next_extent = &m_next_extent;
3188 map.m_seg_type = NO_CHECK_TYPE;
3189 map.m_may_create = false;
3190 end = max_file_blocks(inode);
3191
3192 while (map.m_lblk < end) {
3193 map.m_len = end - map.m_lblk;
3194
3195 down_write(&fi->i_gc_rwsem[WRITE]);
3196 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3197 up_write(&fi->i_gc_rwsem[WRITE]);
3198 if (err)
3199 return err;
3200
3201 map.m_lblk = m_next_extent;
3202 }
3203
3204 return 0;
3205}
3206
3207static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3208{
3209 return f2fs_precache_extents(file_inode(filp));
3210}
3211
3212static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3213{
3214 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3215 __u64 block_count;
3216
3217 if (!capable(CAP_SYS_ADMIN))
3218 return -EPERM;
3219
3220 if (f2fs_readonly(sbi->sb))
3221 return -EROFS;
3222
3223 if (copy_from_user(&block_count, (void __user *)arg,
3224 sizeof(block_count)))
3225 return -EFAULT;
3226
3227 return f2fs_resize_fs(sbi, block_count);
3228}
3229
3230static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3231{
3232 struct inode *inode = file_inode(filp);
3233
3234 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3235
3236 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3237 f2fs_warn(F2FS_I_SB(inode),
3238 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3239 inode->i_ino);
3240 return -EOPNOTSUPP;
3241 }
3242
3243 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3244}
3245
3246static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3247{
3248 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3249 return -EOPNOTSUPP;
3250
3251 return fsverity_ioctl_measure(filp, (void __user *)arg);
3252}
3253
3254static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3255{
3256 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3257 return -EOPNOTSUPP;
3258
3259 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3260}
3261
3262static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3263{
3264 struct inode *inode = file_inode(filp);
3265 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3266 char *vbuf;
3267 int count;
3268 int err = 0;
3269
3270 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3271 if (!vbuf)
3272 return -ENOMEM;
3273
3274 down_read(&sbi->sb_lock);
3275 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3276 ARRAY_SIZE(sbi->raw_super->volume_name),
3277 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3278 up_read(&sbi->sb_lock);
3279
3280 if (copy_to_user((char __user *)arg, vbuf,
3281 min(FSLABEL_MAX, count)))
3282 err = -EFAULT;
3283
3284 kfree(vbuf);
3285 return err;
3286}
3287
3288static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3289{
3290 struct inode *inode = file_inode(filp);
3291 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3292 char *vbuf;
3293 int err = 0;
3294
3295 if (!capable(CAP_SYS_ADMIN))
3296 return -EPERM;
3297
3298 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3299 if (IS_ERR(vbuf))
3300 return PTR_ERR(vbuf);
3301
3302 err = mnt_want_write_file(filp);
3303 if (err)
3304 goto out;
3305
3306 down_write(&sbi->sb_lock);
3307
3308 memset(sbi->raw_super->volume_name, 0,
3309 sizeof(sbi->raw_super->volume_name));
3310 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3311 sbi->raw_super->volume_name,
3312 ARRAY_SIZE(sbi->raw_super->volume_name));
3313
3314 err = f2fs_commit_super(sbi, false);
3315
3316 up_write(&sbi->sb_lock);
3317
3318 mnt_drop_write_file(filp);
3319out:
3320 kfree(vbuf);
3321 return err;
3322}
3323
3324static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3325{
3326 struct inode *inode = file_inode(filp);
3327 __u64 blocks;
3328
3329 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3330 return -EOPNOTSUPP;
3331
3332 if (!f2fs_compressed_file(inode))
3333 return -EINVAL;
3334
3335 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3336 return put_user(blocks, (u64 __user *)arg);
3337}
3338
3339static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3340{
3341 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3342 unsigned int released_blocks = 0;
3343 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3344 block_t blkaddr;
3345 int i;
3346
3347 for (i = 0; i < count; i++) {
3348 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3349 dn->ofs_in_node + i);
3350
3351 if (!__is_valid_data_blkaddr(blkaddr))
3352 continue;
3353 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3354 DATA_GENERIC_ENHANCE)))
3355 return -EFSCORRUPTED;
3356 }
3357
3358 while (count) {
3359 int compr_blocks = 0;
3360
3361 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3362 blkaddr = f2fs_data_blkaddr(dn);
3363
3364 if (i == 0) {
3365 if (blkaddr == COMPRESS_ADDR)
3366 continue;
3367 dn->ofs_in_node += cluster_size;
3368 goto next;
3369 }
3370
3371 if (__is_valid_data_blkaddr(blkaddr))
3372 compr_blocks++;
3373
3374 if (blkaddr != NEW_ADDR)
3375 continue;
3376
3377 dn->data_blkaddr = NULL_ADDR;
3378 f2fs_set_data_blkaddr(dn);
3379 }
3380
3381 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3382 dec_valid_block_count(sbi, dn->inode,
3383 cluster_size - compr_blocks);
3384
3385 released_blocks += cluster_size - compr_blocks;
3386next:
3387 count -= cluster_size;
3388 }
3389
3390 return released_blocks;
3391}
3392
3393static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3394{
3395 struct inode *inode = file_inode(filp);
3396 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3397 pgoff_t page_idx = 0, last_idx;
3398 unsigned int released_blocks = 0;
3399 int ret;
3400 int writecount;
3401
3402 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3403 return -EOPNOTSUPP;
3404
3405 if (!f2fs_compressed_file(inode))
3406 return -EINVAL;
3407
3408 if (f2fs_readonly(sbi->sb))
3409 return -EROFS;
3410
3411 ret = mnt_want_write_file(filp);
3412 if (ret)
3413 return ret;
3414
3415 f2fs_balance_fs(F2FS_I_SB(inode), true);
3416
3417 inode_lock(inode);
3418
3419 writecount = atomic_read(&inode->i_writecount);
3420 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3421 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3422 ret = -EBUSY;
3423 goto out;
3424 }
3425
3426 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3427 ret = -EINVAL;
3428 goto out;
3429 }
3430
3431 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3432 if (ret)
3433 goto out;
3434
3435 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3436 inode->i_ctime = current_time(inode);
3437 f2fs_mark_inode_dirty_sync(inode, true);
3438
3439 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3440 goto out;
3441
3442 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3443 down_write(&F2FS_I(inode)->i_mmap_sem);
3444
3445 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3446
3447 while (page_idx < last_idx) {
3448 struct dnode_of_data dn;
3449 pgoff_t end_offset, count;
3450
3451 set_new_dnode(&dn, inode, NULL, NULL, 0);
3452 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3453 if (ret) {
3454 if (ret == -ENOENT) {
3455 page_idx = f2fs_get_next_page_offset(&dn,
3456 page_idx);
3457 ret = 0;
3458 continue;
3459 }
3460 break;
3461 }
3462
3463 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3464 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3465 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3466
3467 ret = release_compress_blocks(&dn, count);
3468
3469 f2fs_put_dnode(&dn);
3470
3471 if (ret < 0)
3472 break;
3473
3474 page_idx += count;
3475 released_blocks += ret;
3476 }
3477
3478 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3479 up_write(&F2FS_I(inode)->i_mmap_sem);
3480out:
3481 inode_unlock(inode);
3482
3483 mnt_drop_write_file(filp);
3484
3485 if (ret >= 0) {
3486 ret = put_user(released_blocks, (u64 __user *)arg);
3487 } else if (released_blocks &&
3488 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3489 set_sbi_flag(sbi, SBI_NEED_FSCK);
3490 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3491 "iblocks=%llu, released=%u, compr_blocks=%u, "
3492 "run fsck to fix.",
3493 __func__, inode->i_ino, inode->i_blocks,
3494 released_blocks,
3495 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3496 }
3497
3498 return ret;
3499}
3500
3501static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3502{
3503 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3504 unsigned int reserved_blocks = 0;
3505 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3506 block_t blkaddr;
3507 int i;
3508
3509 for (i = 0; i < count; i++) {
3510 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3511 dn->ofs_in_node + i);
3512
3513 if (!__is_valid_data_blkaddr(blkaddr))
3514 continue;
3515 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3516 DATA_GENERIC_ENHANCE)))
3517 return -EFSCORRUPTED;
3518 }
3519
3520 while (count) {
3521 int compr_blocks = 0;
3522 blkcnt_t reserved;
3523 int ret;
3524
3525 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3526 blkaddr = f2fs_data_blkaddr(dn);
3527
3528 if (i == 0) {
3529 if (blkaddr == COMPRESS_ADDR)
3530 continue;
3531 dn->ofs_in_node += cluster_size;
3532 goto next;
3533 }
3534
3535 if (__is_valid_data_blkaddr(blkaddr)) {
3536 compr_blocks++;
3537 continue;
3538 }
3539
3540 dn->data_blkaddr = NEW_ADDR;
3541 f2fs_set_data_blkaddr(dn);
3542 }
3543
3544 reserved = cluster_size - compr_blocks;
3545 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3546 if (ret)
3547 return ret;
3548
3549 if (reserved != cluster_size - compr_blocks)
3550 return -ENOSPC;
3551
3552 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3553
3554 reserved_blocks += reserved;
3555next:
3556 count -= cluster_size;
3557 }
3558
3559 return reserved_blocks;
3560}
3561
3562static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3563{
3564 struct inode *inode = file_inode(filp);
3565 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3566 pgoff_t page_idx = 0, last_idx;
3567 unsigned int reserved_blocks = 0;
3568 int ret;
3569
3570 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3571 return -EOPNOTSUPP;
3572
3573 if (!f2fs_compressed_file(inode))
3574 return -EINVAL;
3575
3576 if (f2fs_readonly(sbi->sb))
3577 return -EROFS;
3578
3579 ret = mnt_want_write_file(filp);
3580 if (ret)
3581 return ret;
3582
3583 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3584 goto out;
3585
3586 f2fs_balance_fs(F2FS_I_SB(inode), true);
3587
3588 inode_lock(inode);
3589
3590 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3591 ret = -EINVAL;
3592 goto unlock_inode;
3593 }
3594
3595 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3596 down_write(&F2FS_I(inode)->i_mmap_sem);
3597
3598 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3599
3600 while (page_idx < last_idx) {
3601 struct dnode_of_data dn;
3602 pgoff_t end_offset, count;
3603
3604 set_new_dnode(&dn, inode, NULL, NULL, 0);
3605 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3606 if (ret) {
3607 if (ret == -ENOENT) {
3608 page_idx = f2fs_get_next_page_offset(&dn,
3609 page_idx);
3610 ret = 0;
3611 continue;
3612 }
3613 break;
3614 }
3615
3616 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3617 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3618 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3619
3620 ret = reserve_compress_blocks(&dn, count);
3621
3622 f2fs_put_dnode(&dn);
3623
3624 if (ret < 0)
3625 break;
3626
3627 page_idx += count;
3628 reserved_blocks += ret;
3629 }
3630
3631 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3632 up_write(&F2FS_I(inode)->i_mmap_sem);
3633
3634 if (ret >= 0) {
3635 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3636 inode->i_ctime = current_time(inode);
3637 f2fs_mark_inode_dirty_sync(inode, true);
3638 }
3639unlock_inode:
3640 inode_unlock(inode);
3641out:
3642 mnt_drop_write_file(filp);
3643
3644 if (ret >= 0) {
3645 ret = put_user(reserved_blocks, (u64 __user *)arg);
3646 } else if (reserved_blocks &&
3647 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3648 set_sbi_flag(sbi, SBI_NEED_FSCK);
3649 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3650 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3651 "run fsck to fix.",
3652 __func__, inode->i_ino, inode->i_blocks,
3653 reserved_blocks,
3654 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3655 }
3656
3657 return ret;
3658}
3659
3660static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3661 pgoff_t off, block_t block, block_t len, u32 flags)
3662{
3663 struct request_queue *q = bdev_get_queue(bdev);
3664 sector_t sector = SECTOR_FROM_BLOCK(block);
3665 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3666 int ret = 0;
3667
3668 if (!q)
3669 return -ENXIO;
3670
3671 if (flags & F2FS_TRIM_FILE_DISCARD)
3672 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3673 blk_queue_secure_erase(q) ?
3674 BLKDEV_DISCARD_SECURE : 0);
3675
3676 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3677 if (IS_ENCRYPTED(inode))
3678 ret = fscrypt_zeroout_range(inode, off, block, len);
3679 else
3680 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3681 GFP_NOFS, 0);
3682 }
3683
3684 return ret;
3685}
3686
3687static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3688{
3689 struct inode *inode = file_inode(filp);
3690 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3691 struct address_space *mapping = inode->i_mapping;
3692 struct block_device *prev_bdev = NULL;
3693 struct f2fs_sectrim_range range;
3694 pgoff_t index, pg_end, prev_index = 0;
3695 block_t prev_block = 0, len = 0;
3696 loff_t end_addr;
3697 bool to_end = false;
3698 int ret = 0;
3699
3700 if (!(filp->f_mode & FMODE_WRITE))
3701 return -EBADF;
3702
3703 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3704 sizeof(range)))
3705 return -EFAULT;
3706
3707 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3708 !S_ISREG(inode->i_mode))
3709 return -EINVAL;
3710
3711 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3712 !f2fs_hw_support_discard(sbi)) ||
3713 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3714 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3715 return -EOPNOTSUPP;
3716
3717 file_start_write(filp);
3718 inode_lock(inode);
3719
3720 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3721 range.start >= inode->i_size) {
3722 ret = -EINVAL;
3723 goto err;
3724 }
3725
3726 if (range.len == 0)
3727 goto err;
3728
3729 if (inode->i_size - range.start > range.len) {
3730 end_addr = range.start + range.len;
3731 } else {
3732 end_addr = range.len == (u64)-1 ?
3733 sbi->sb->s_maxbytes : inode->i_size;
3734 to_end = true;
3735 }
3736
3737 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3738 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3739 ret = -EINVAL;
3740 goto err;
3741 }
3742
3743 index = F2FS_BYTES_TO_BLK(range.start);
3744 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3745
3746 ret = f2fs_convert_inline_inode(inode);
3747 if (ret)
3748 goto err;
3749
3750 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3751 down_write(&F2FS_I(inode)->i_mmap_sem);
3752
3753 ret = filemap_write_and_wait_range(mapping, range.start,
3754 to_end ? LLONG_MAX : end_addr - 1);
3755 if (ret)
3756 goto out;
3757
3758 truncate_inode_pages_range(mapping, range.start,
3759 to_end ? -1 : end_addr - 1);
3760
3761 while (index < pg_end) {
3762 struct dnode_of_data dn;
3763 pgoff_t end_offset, count;
3764 int i;
3765
3766 set_new_dnode(&dn, inode, NULL, NULL, 0);
3767 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3768 if (ret) {
3769 if (ret == -ENOENT) {
3770 index = f2fs_get_next_page_offset(&dn, index);
3771 continue;
3772 }
3773 goto out;
3774 }
3775
3776 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3777 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3778 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3779 struct block_device *cur_bdev;
3780 block_t blkaddr = f2fs_data_blkaddr(&dn);
3781
3782 if (!__is_valid_data_blkaddr(blkaddr))
3783 continue;
3784
3785 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3786 DATA_GENERIC_ENHANCE)) {
3787 ret = -EFSCORRUPTED;
3788 f2fs_put_dnode(&dn);
3789 goto out;
3790 }
3791
3792 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3793 if (f2fs_is_multi_device(sbi)) {
3794 int di = f2fs_target_device_index(sbi, blkaddr);
3795
3796 blkaddr -= FDEV(di).start_blk;
3797 }
3798
3799 if (len) {
3800 if (prev_bdev == cur_bdev &&
3801 index == prev_index + len &&
3802 blkaddr == prev_block + len) {
3803 len++;
3804 } else {
3805 ret = f2fs_secure_erase(prev_bdev,
3806 inode, prev_index, prev_block,
3807 len, range.flags);
3808 if (ret) {
3809 f2fs_put_dnode(&dn);
3810 goto out;
3811 }
3812
3813 len = 0;
3814 }
3815 }
3816
3817 if (!len) {
3818 prev_bdev = cur_bdev;
3819 prev_index = index;
3820 prev_block = blkaddr;
3821 len = 1;
3822 }
3823 }
3824
3825 f2fs_put_dnode(&dn);
3826
3827 if (fatal_signal_pending(current)) {
3828 ret = -EINTR;
3829 goto out;
3830 }
3831 cond_resched();
3832 }
3833
3834 if (len)
3835 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3836 prev_block, len, range.flags);
3837out:
3838 up_write(&F2FS_I(inode)->i_mmap_sem);
3839 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3840err:
3841 inode_unlock(inode);
3842 file_end_write(filp);
3843
3844 return ret;
3845}
3846
3847static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3848{
3849 struct inode *inode = file_inode(filp);
3850 struct f2fs_comp_option option;
3851
3852 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3853 return -EOPNOTSUPP;
3854
3855 inode_lock_shared(inode);
3856
3857 if (!f2fs_compressed_file(inode)) {
3858 inode_unlock_shared(inode);
3859 return -ENODATA;
3860 }
3861
3862 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3863 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3864
3865 inode_unlock_shared(inode);
3866
3867 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3868 sizeof(option)))
3869 return -EFAULT;
3870
3871 return 0;
3872}
3873
3874static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3875{
3876 struct inode *inode = file_inode(filp);
3877 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3878 struct f2fs_comp_option option;
3879 int ret = 0;
3880
3881 if (!f2fs_sb_has_compression(sbi))
3882 return -EOPNOTSUPP;
3883
3884 if (!(filp->f_mode & FMODE_WRITE))
3885 return -EBADF;
3886
3887 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3888 sizeof(option)))
3889 return -EFAULT;
3890
3891 if (!f2fs_compressed_file(inode) ||
3892 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3893 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3894 option.algorithm >= COMPRESS_MAX)
3895 return -EINVAL;
3896
3897 file_start_write(filp);
3898 inode_lock(inode);
3899
3900 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3901 ret = -EBUSY;
3902 goto out;
3903 }
3904
3905 if (inode->i_size != 0) {
3906 ret = -EFBIG;
3907 goto out;
3908 }
3909
3910 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3911 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3912 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3913 f2fs_mark_inode_dirty_sync(inode, true);
3914
3915 if (!f2fs_is_compress_backend_ready(inode))
3916 f2fs_warn(sbi, "compression algorithm is successfully set, "
3917 "but current kernel doesn't support this algorithm.");
3918out:
3919 inode_unlock(inode);
3920 file_end_write(filp);
3921
3922 return ret;
3923}
3924
3925static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3926{
3927 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3928 struct address_space *mapping = inode->i_mapping;
3929 struct page *page;
3930 pgoff_t redirty_idx = page_idx;
3931 int i, page_len = 0, ret = 0;
3932
3933 page_cache_ra_unbounded(&ractl, len, 0);
3934
3935 for (i = 0; i < len; i++, page_idx++) {
3936 page = read_cache_page(mapping, page_idx, NULL, NULL);
3937 if (IS_ERR(page)) {
3938 ret = PTR_ERR(page);
3939 break;
3940 }
3941 page_len++;
3942 }
3943
3944 for (i = 0; i < page_len; i++, redirty_idx++) {
3945 page = find_lock_page(mapping, redirty_idx);
3946 if (!page) {
3947 ret = -ENOMEM;
3948 break;
3949 }
3950 set_page_dirty(page);
3951 f2fs_put_page(page, 1);
3952 f2fs_put_page(page, 0);
3953 }
3954
3955 return ret;
3956}
3957
3958static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3959{
3960 struct inode *inode = file_inode(filp);
3961 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3962 struct f2fs_inode_info *fi = F2FS_I(inode);
3963 pgoff_t page_idx = 0, last_idx;
3964 unsigned int blk_per_seg = sbi->blocks_per_seg;
3965 int cluster_size = F2FS_I(inode)->i_cluster_size;
3966 int count, ret;
3967
3968 if (!f2fs_sb_has_compression(sbi) ||
3969 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3970 return -EOPNOTSUPP;
3971
3972 if (!(filp->f_mode & FMODE_WRITE))
3973 return -EBADF;
3974
3975 if (!f2fs_compressed_file(inode))
3976 return -EINVAL;
3977
3978 f2fs_balance_fs(F2FS_I_SB(inode), true);
3979
3980 file_start_write(filp);
3981 inode_lock(inode);
3982
3983 if (!f2fs_is_compress_backend_ready(inode)) {
3984 ret = -EOPNOTSUPP;
3985 goto out;
3986 }
3987
3988 if (f2fs_is_mmap_file(inode)) {
3989 ret = -EBUSY;
3990 goto out;
3991 }
3992
3993 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3994 if (ret)
3995 goto out;
3996
3997 if (!atomic_read(&fi->i_compr_blocks))
3998 goto out;
3999
4000 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4001
4002 count = last_idx - page_idx;
4003 while (count) {
4004 int len = min(cluster_size, count);
4005
4006 ret = redirty_blocks(inode, page_idx, len);
4007 if (ret < 0)
4008 break;
4009
4010 if (get_dirty_pages(inode) >= blk_per_seg)
4011 filemap_fdatawrite(inode->i_mapping);
4012
4013 count -= len;
4014 page_idx += len;
4015 }
4016
4017 if (!ret)
4018 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4019 LLONG_MAX);
4020
4021 if (ret)
4022 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4023 __func__, ret);
4024out:
4025 inode_unlock(inode);
4026 file_end_write(filp);
4027
4028 return ret;
4029}
4030
4031static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4032{
4033 struct inode *inode = file_inode(filp);
4034 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4035 pgoff_t page_idx = 0, last_idx;
4036 unsigned int blk_per_seg = sbi->blocks_per_seg;
4037 int cluster_size = F2FS_I(inode)->i_cluster_size;
4038 int count, ret;
4039
4040 if (!f2fs_sb_has_compression(sbi) ||
4041 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4042 return -EOPNOTSUPP;
4043
4044 if (!(filp->f_mode & FMODE_WRITE))
4045 return -EBADF;
4046
4047 if (!f2fs_compressed_file(inode))
4048 return -EINVAL;
4049
4050 f2fs_balance_fs(F2FS_I_SB(inode), true);
4051
4052 file_start_write(filp);
4053 inode_lock(inode);
4054
4055 if (!f2fs_is_compress_backend_ready(inode)) {
4056 ret = -EOPNOTSUPP;
4057 goto out;
4058 }
4059
4060 if (f2fs_is_mmap_file(inode)) {
4061 ret = -EBUSY;
4062 goto out;
4063 }
4064
4065 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4066 if (ret)
4067 goto out;
4068
4069 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4070
4071 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4072
4073 count = last_idx - page_idx;
4074 while (count) {
4075 int len = min(cluster_size, count);
4076
4077 ret = redirty_blocks(inode, page_idx, len);
4078 if (ret < 0)
4079 break;
4080
4081 if (get_dirty_pages(inode) >= blk_per_seg)
4082 filemap_fdatawrite(inode->i_mapping);
4083
4084 count -= len;
4085 page_idx += len;
4086 }
4087
4088 if (!ret)
4089 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4090 LLONG_MAX);
4091
4092 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4093
4094 if (ret)
4095 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4096 __func__, ret);
4097out:
4098 inode_unlock(inode);
4099 file_end_write(filp);
4100
4101 return ret;
4102}
4103
4104static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4105{
4106 switch (cmd) {
4107 case FS_IOC_GETVERSION:
4108 return f2fs_ioc_getversion(filp, arg);
4109 case F2FS_IOC_START_ATOMIC_WRITE:
4110 return f2fs_ioc_start_atomic_write(filp);
4111 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4112 return f2fs_ioc_commit_atomic_write(filp);
4113 case F2FS_IOC_START_VOLATILE_WRITE:
4114 return f2fs_ioc_start_volatile_write(filp);
4115 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4116 return f2fs_ioc_release_volatile_write(filp);
4117 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4118 return f2fs_ioc_abort_volatile_write(filp);
4119 case F2FS_IOC_SHUTDOWN:
4120 return f2fs_ioc_shutdown(filp, arg);
4121 case FITRIM:
4122 return f2fs_ioc_fitrim(filp, arg);
4123 case FS_IOC_SET_ENCRYPTION_POLICY:
4124 return f2fs_ioc_set_encryption_policy(filp, arg);
4125 case FS_IOC_GET_ENCRYPTION_POLICY:
4126 return f2fs_ioc_get_encryption_policy(filp, arg);
4127 case FS_IOC_GET_ENCRYPTION_PWSALT:
4128 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4129 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4130 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4131 case FS_IOC_ADD_ENCRYPTION_KEY:
4132 return f2fs_ioc_add_encryption_key(filp, arg);
4133 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4134 return f2fs_ioc_remove_encryption_key(filp, arg);
4135 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4136 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4137 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4138 return f2fs_ioc_get_encryption_key_status(filp, arg);
4139 case FS_IOC_GET_ENCRYPTION_NONCE:
4140 return f2fs_ioc_get_encryption_nonce(filp, arg);
4141 case F2FS_IOC_GARBAGE_COLLECT:
4142 return f2fs_ioc_gc(filp, arg);
4143 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4144 return f2fs_ioc_gc_range(filp, arg);
4145 case F2FS_IOC_WRITE_CHECKPOINT:
4146 return f2fs_ioc_write_checkpoint(filp, arg);
4147 case F2FS_IOC_DEFRAGMENT:
4148 return f2fs_ioc_defragment(filp, arg);
4149 case F2FS_IOC_MOVE_RANGE:
4150 return f2fs_ioc_move_range(filp, arg);
4151 case F2FS_IOC_FLUSH_DEVICE:
4152 return f2fs_ioc_flush_device(filp, arg);
4153 case F2FS_IOC_GET_FEATURES:
4154 return f2fs_ioc_get_features(filp, arg);
4155 case F2FS_IOC_GET_PIN_FILE:
4156 return f2fs_ioc_get_pin_file(filp, arg);
4157 case F2FS_IOC_SET_PIN_FILE:
4158 return f2fs_ioc_set_pin_file(filp, arg);
4159 case F2FS_IOC_PRECACHE_EXTENTS:
4160 return f2fs_ioc_precache_extents(filp, arg);
4161 case F2FS_IOC_RESIZE_FS:
4162 return f2fs_ioc_resize_fs(filp, arg);
4163 case FS_IOC_ENABLE_VERITY:
4164 return f2fs_ioc_enable_verity(filp, arg);
4165 case FS_IOC_MEASURE_VERITY:
4166 return f2fs_ioc_measure_verity(filp, arg);
4167 case FS_IOC_READ_VERITY_METADATA:
4168 return f2fs_ioc_read_verity_metadata(filp, arg);
4169 case FS_IOC_GETFSLABEL:
4170 return f2fs_ioc_getfslabel(filp, arg);
4171 case FS_IOC_SETFSLABEL:
4172 return f2fs_ioc_setfslabel(filp, arg);
4173 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4174 return f2fs_get_compress_blocks(filp, arg);
4175 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4176 return f2fs_release_compress_blocks(filp, arg);
4177 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4178 return f2fs_reserve_compress_blocks(filp, arg);
4179 case F2FS_IOC_SEC_TRIM_FILE:
4180 return f2fs_sec_trim_file(filp, arg);
4181 case F2FS_IOC_GET_COMPRESS_OPTION:
4182 return f2fs_ioc_get_compress_option(filp, arg);
4183 case F2FS_IOC_SET_COMPRESS_OPTION:
4184 return f2fs_ioc_set_compress_option(filp, arg);
4185 case F2FS_IOC_DECOMPRESS_FILE:
4186 return f2fs_ioc_decompress_file(filp, arg);
4187 case F2FS_IOC_COMPRESS_FILE:
4188 return f2fs_ioc_compress_file(filp, arg);
4189 default:
4190 return -ENOTTY;
4191 }
4192}
4193
4194long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4195{
4196 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4197 return -EIO;
4198 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4199 return -ENOSPC;
4200
4201 return __f2fs_ioctl(filp, cmd, arg);
4202}
4203
4204static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4205{
4206 struct file *file = iocb->ki_filp;
4207 struct inode *inode = file_inode(file);
4208 int ret;
4209
4210 if (!f2fs_is_compress_backend_ready(inode))
4211 return -EOPNOTSUPP;
4212
4213 ret = generic_file_read_iter(iocb, iter);
4214
4215 if (ret > 0)
4216 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4217
4218 return ret;
4219}
4220
4221static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4222{
4223 struct file *file = iocb->ki_filp;
4224 struct inode *inode = file_inode(file);
4225 ssize_t ret;
4226
4227 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4228 ret = -EIO;
4229 goto out;
4230 }
4231
4232 if (!f2fs_is_compress_backend_ready(inode)) {
4233 ret = -EOPNOTSUPP;
4234 goto out;
4235 }
4236
4237 if (iocb->ki_flags & IOCB_NOWAIT) {
4238 if (!inode_trylock(inode)) {
4239 ret = -EAGAIN;
4240 goto out;
4241 }
4242 } else {
4243 inode_lock(inode);
4244 }
4245
4246 if (unlikely(IS_IMMUTABLE(inode))) {
4247 ret = -EPERM;
4248 goto unlock;
4249 }
4250
4251 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4252 ret = -EPERM;
4253 goto unlock;
4254 }
4255
4256 ret = generic_write_checks(iocb, from);
4257 if (ret > 0) {
4258 bool preallocated = false;
4259 size_t target_size = 0;
4260 int err;
4261
4262 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4263 set_inode_flag(inode, FI_NO_PREALLOC);
4264
4265 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4266 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4267 iov_iter_count(from)) ||
4268 f2fs_has_inline_data(inode) ||
4269 f2fs_force_buffered_io(inode, iocb, from)) {
4270 clear_inode_flag(inode, FI_NO_PREALLOC);
4271 inode_unlock(inode);
4272 ret = -EAGAIN;
4273 goto out;
4274 }
4275 goto write;
4276 }
4277
4278 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4279 goto write;
4280
4281 if (iocb->ki_flags & IOCB_DIRECT) {
4282
4283
4284
4285
4286 err = f2fs_convert_inline_inode(inode);
4287 if (err)
4288 goto out_err;
4289
4290
4291
4292
4293
4294 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4295 allow_outplace_dio(inode, iocb, from))
4296 goto write;
4297 }
4298 preallocated = true;
4299 target_size = iocb->ki_pos + iov_iter_count(from);
4300
4301 err = f2fs_preallocate_blocks(iocb, from);
4302 if (err) {
4303out_err:
4304 clear_inode_flag(inode, FI_NO_PREALLOC);
4305 inode_unlock(inode);
4306 ret = err;
4307 goto out;
4308 }
4309write:
4310 ret = __generic_file_write_iter(iocb, from);
4311 clear_inode_flag(inode, FI_NO_PREALLOC);
4312
4313
4314 if (preallocated && i_size_read(inode) < target_size) {
4315 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4316 down_write(&F2FS_I(inode)->i_mmap_sem);
4317 f2fs_truncate(inode);
4318 up_write(&F2FS_I(inode)->i_mmap_sem);
4319 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4320 }
4321
4322 if (ret > 0)
4323 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4324 }
4325unlock:
4326 inode_unlock(inode);
4327out:
4328 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4329 iov_iter_count(from), ret);
4330 if (ret > 0)
4331 ret = generic_write_sync(iocb, ret);
4332 return ret;
4333}
4334
4335#ifdef CONFIG_COMPAT
4336struct compat_f2fs_gc_range {
4337 u32 sync;
4338 compat_u64 start;
4339 compat_u64 len;
4340};
4341#define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4342 struct compat_f2fs_gc_range)
4343
4344static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4345{
4346 struct compat_f2fs_gc_range __user *urange;
4347 struct f2fs_gc_range range;
4348 int err;
4349
4350 urange = compat_ptr(arg);
4351 err = get_user(range.sync, &urange->sync);
4352 err |= get_user(range.start, &urange->start);
4353 err |= get_user(range.len, &urange->len);
4354 if (err)
4355 return -EFAULT;
4356
4357 return __f2fs_ioc_gc_range(file, &range);
4358}
4359
4360struct compat_f2fs_move_range {
4361 u32 dst_fd;
4362 compat_u64 pos_in;
4363 compat_u64 pos_out;
4364 compat_u64 len;
4365};
4366#define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4367 struct compat_f2fs_move_range)
4368
4369static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4370{
4371 struct compat_f2fs_move_range __user *urange;
4372 struct f2fs_move_range range;
4373 int err;
4374
4375 urange = compat_ptr(arg);
4376 err = get_user(range.dst_fd, &urange->dst_fd);
4377 err |= get_user(range.pos_in, &urange->pos_in);
4378 err |= get_user(range.pos_out, &urange->pos_out);
4379 err |= get_user(range.len, &urange->len);
4380 if (err)
4381 return -EFAULT;
4382
4383 return __f2fs_ioc_move_range(file, &range);
4384}
4385
4386long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4387{
4388 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4389 return -EIO;
4390 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4391 return -ENOSPC;
4392
4393 switch (cmd) {
4394 case FS_IOC32_GETVERSION:
4395 cmd = FS_IOC_GETVERSION;
4396 break;
4397 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4398 return f2fs_compat_ioc_gc_range(file, arg);
4399 case F2FS_IOC32_MOVE_RANGE:
4400 return f2fs_compat_ioc_move_range(file, arg);
4401 case F2FS_IOC_START_ATOMIC_WRITE:
4402 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4403 case F2FS_IOC_START_VOLATILE_WRITE:
4404 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4405 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4406 case F2FS_IOC_SHUTDOWN:
4407 case FITRIM:
4408 case FS_IOC_SET_ENCRYPTION_POLICY:
4409 case FS_IOC_GET_ENCRYPTION_PWSALT:
4410 case FS_IOC_GET_ENCRYPTION_POLICY:
4411 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4412 case FS_IOC_ADD_ENCRYPTION_KEY:
4413 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4414 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4415 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4416 case FS_IOC_GET_ENCRYPTION_NONCE:
4417 case F2FS_IOC_GARBAGE_COLLECT:
4418 case F2FS_IOC_WRITE_CHECKPOINT:
4419 case F2FS_IOC_DEFRAGMENT:
4420 case F2FS_IOC_FLUSH_DEVICE:
4421 case F2FS_IOC_GET_FEATURES:
4422 case F2FS_IOC_GET_PIN_FILE:
4423 case F2FS_IOC_SET_PIN_FILE:
4424 case F2FS_IOC_PRECACHE_EXTENTS:
4425 case F2FS_IOC_RESIZE_FS:
4426 case FS_IOC_ENABLE_VERITY:
4427 case FS_IOC_MEASURE_VERITY:
4428 case FS_IOC_READ_VERITY_METADATA:
4429 case FS_IOC_GETFSLABEL:
4430 case FS_IOC_SETFSLABEL:
4431 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4432 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4433 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4434 case F2FS_IOC_SEC_TRIM_FILE:
4435 case F2FS_IOC_GET_COMPRESS_OPTION:
4436 case F2FS_IOC_SET_COMPRESS_OPTION:
4437 case F2FS_IOC_DECOMPRESS_FILE:
4438 case F2FS_IOC_COMPRESS_FILE:
4439 break;
4440 default:
4441 return -ENOIOCTLCMD;
4442 }
4443 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4444}
4445#endif
4446
4447const struct file_operations f2fs_file_operations = {
4448 .llseek = f2fs_llseek,
4449 .read_iter = f2fs_file_read_iter,
4450 .write_iter = f2fs_file_write_iter,
4451 .open = f2fs_file_open,
4452 .release = f2fs_release_file,
4453 .mmap = f2fs_file_mmap,
4454 .flush = f2fs_file_flush,
4455 .fsync = f2fs_sync_file,
4456 .fallocate = f2fs_fallocate,
4457 .unlocked_ioctl = f2fs_ioctl,
4458#ifdef CONFIG_COMPAT
4459 .compat_ioctl = f2fs_compat_ioctl,
4460#endif
4461 .splice_read = generic_file_splice_read,
4462 .splice_write = iter_file_splice_write,
4463};
4464