1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/fs.h>
25#include <linux/mm.h>
26#include <linux/writeback.h>
27#include "nilfs.h"
28#include "segment.h"
29
30int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
31{
32
33
34
35
36
37
38
39
40 struct the_nilfs *nilfs;
41 struct inode *inode = file->f_mapping->host;
42 int err = 0;
43
44 if (nilfs_inode_dirty(inode)) {
45 if (datasync)
46 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
47 start, end);
48 else
49 err = nilfs_construct_segment(inode->i_sb);
50 }
51
52 nilfs = inode->i_sb->s_fs_info;
53 if (!err)
54 err = nilfs_flush_device(nilfs);
55
56 return err;
57}
58
59static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
60{
61 struct page *page = vmf->page;
62 struct inode *inode = file_inode(vma->vm_file);
63 struct nilfs_transaction_info ti;
64 int ret = 0;
65
66 if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
67 return VM_FAULT_SIGBUS;
68
69 sb_start_pagefault(inode->i_sb);
70 lock_page(page);
71 if (page->mapping != inode->i_mapping ||
72 page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
73 unlock_page(page);
74 ret = -EFAULT;
75 goto out;
76 }
77
78
79
80
81 if (PageMappedToDisk(page))
82 goto mapped;
83
84 if (page_has_buffers(page)) {
85 struct buffer_head *bh, *head;
86 int fully_mapped = 1;
87
88 bh = head = page_buffers(page);
89 do {
90 if (!buffer_mapped(bh)) {
91 fully_mapped = 0;
92 break;
93 }
94 } while (bh = bh->b_this_page, bh != head);
95
96 if (fully_mapped) {
97 SetPageMappedToDisk(page);
98 goto mapped;
99 }
100 }
101 unlock_page(page);
102
103
104
105
106 ret = nilfs_transaction_begin(inode->i_sb, &ti, 1);
107
108 if (unlikely(ret))
109 goto out;
110
111 file_update_time(vma->vm_file);
112 ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
113 if (ret) {
114 nilfs_transaction_abort(inode->i_sb);
115 goto out;
116 }
117 nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
118 nilfs_transaction_commit(inode->i_sb);
119
120 mapped:
121 wait_for_stable_page(page);
122 out:
123 sb_end_pagefault(inode->i_sb);
124 return block_page_mkwrite_return(ret);
125}
126
127static const struct vm_operations_struct nilfs_file_vm_ops = {
128 .fault = filemap_fault,
129 .map_pages = filemap_map_pages,
130 .page_mkwrite = nilfs_page_mkwrite,
131};
132
133static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
134{
135 file_accessed(file);
136 vma->vm_ops = &nilfs_file_vm_ops;
137 return 0;
138}
139
140
141
142
143
144const struct file_operations nilfs_file_operations = {
145 .llseek = generic_file_llseek,
146 .read_iter = generic_file_read_iter,
147 .write_iter = generic_file_write_iter,
148 .unlocked_ioctl = nilfs_ioctl,
149#ifdef CONFIG_COMPAT
150 .compat_ioctl = nilfs_compat_ioctl,
151#endif
152 .mmap = nilfs_file_mmap,
153 .open = generic_file_open,
154
155 .fsync = nilfs_sync_file,
156 .splice_read = generic_file_splice_read,
157};
158
159const struct inode_operations nilfs_file_inode_operations = {
160 .setattr = nilfs_setattr,
161 .permission = nilfs_permission,
162 .fiemap = nilfs_fiemap,
163};
164
165
166