1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/fs.h>
25#include <linux/mm.h>
26#include <linux/writeback.h>
27#include "nilfs.h"
28#include "segment.h"
29
30int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
31{
32
33
34
35
36
37
38
39
40 struct inode *inode = file->f_mapping->host;
41 int err;
42
43 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
44 if (err)
45 return err;
46 mutex_lock(&inode->i_mutex);
47
48 if (!nilfs_inode_dirty(inode)) {
49 mutex_unlock(&inode->i_mutex);
50 return 0;
51 }
52
53 if (datasync)
54 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0,
55 LLONG_MAX);
56 else
57 err = nilfs_construct_segment(inode->i_sb);
58
59 mutex_unlock(&inode->i_mutex);
60 return err;
61}
62
63static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
64{
65 struct page *page = vmf->page;
66 struct inode *inode = vma->vm_file->f_dentry->d_inode;
67 struct nilfs_transaction_info ti;
68 int ret;
69
70 if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
71 return VM_FAULT_SIGBUS;
72
73 lock_page(page);
74 if (page->mapping != inode->i_mapping ||
75 page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
76 unlock_page(page);
77 return VM_FAULT_NOPAGE;
78 }
79
80
81
82
83 if (PageMappedToDisk(page))
84 goto mapped;
85
86 if (page_has_buffers(page)) {
87 struct buffer_head *bh, *head;
88 int fully_mapped = 1;
89
90 bh = head = page_buffers(page);
91 do {
92 if (!buffer_mapped(bh)) {
93 fully_mapped = 0;
94 break;
95 }
96 } while (bh = bh->b_this_page, bh != head);
97
98 if (fully_mapped) {
99 SetPageMappedToDisk(page);
100 goto mapped;
101 }
102 }
103 unlock_page(page);
104
105
106
107
108 ret = nilfs_transaction_begin(inode->i_sb, &ti, 1);
109
110 if (unlikely(ret))
111 return VM_FAULT_SIGBUS;
112
113 ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
114 if (ret != VM_FAULT_LOCKED) {
115 nilfs_transaction_abort(inode->i_sb);
116 return ret;
117 }
118 nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
119 nilfs_transaction_commit(inode->i_sb);
120
121 mapped:
122 wait_on_page_writeback(page);
123 return VM_FAULT_LOCKED;
124}
125
126static const struct vm_operations_struct nilfs_file_vm_ops = {
127 .fault = filemap_fault,
128 .page_mkwrite = nilfs_page_mkwrite,
129};
130
131static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
132{
133 file_accessed(file);
134 vma->vm_ops = &nilfs_file_vm_ops;
135 vma->vm_flags |= VM_CAN_NONLINEAR;
136 return 0;
137}
138
139
140
141
142
143const struct file_operations nilfs_file_operations = {
144 .llseek = generic_file_llseek,
145 .read = do_sync_read,
146 .write = do_sync_write,
147 .aio_read = generic_file_aio_read,
148 .aio_write = generic_file_aio_write,
149 .unlocked_ioctl = nilfs_ioctl,
150#ifdef CONFIG_COMPAT
151 .compat_ioctl = nilfs_compat_ioctl,
152#endif
153 .mmap = nilfs_file_mmap,
154 .open = generic_file_open,
155
156 .fsync = nilfs_sync_file,
157 .splice_read = generic_file_splice_read,
158};
159
160const struct inode_operations nilfs_file_inode_operations = {
161 .truncate = nilfs_truncate,
162 .setattr = nilfs_setattr,
163 .permission = nilfs_permission,
164 .fiemap = nilfs_fiemap,
165};
166
167
168