1
2
3
4
5
6
7
8#include <linux/mm.h>
9#include <linux/page-flags.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/sizes.h>
13#include "vfsmod.h"
14
15struct vboxsf_handle {
16 u64 handle;
17 u32 root;
18 u32 access_flags;
19 struct kref refcount;
20 struct list_head head;
21};
22
23struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
24 u64 handle, u32 access_flags)
25{
26 struct vboxsf_inode *sf_i = VBOXSF_I(inode);
27 struct vboxsf_handle *sf_handle;
28
29 sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
30 if (!sf_handle)
31 return ERR_PTR(-ENOMEM);
32
33
34 sf_i->force_restat = 1;
35
36
37 sf_handle->handle = handle;
38 sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
39 sf_handle->access_flags = access_flags;
40 kref_init(&sf_handle->refcount);
41
42 mutex_lock(&sf_i->handle_list_mutex);
43 list_add(&sf_handle->head, &sf_i->handle_list);
44 mutex_unlock(&sf_i->handle_list_mutex);
45
46 return sf_handle;
47}
48
49static int vboxsf_file_open(struct inode *inode, struct file *file)
50{
51 struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
52 struct shfl_createparms params = {};
53 struct vboxsf_handle *sf_handle;
54 u32 access_flags = 0;
55 int err;
56
57
58
59
60
61
62
63
64
65 params.handle = SHFL_HANDLE_NIL;
66 if (file->f_flags & O_CREAT) {
67 params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW;
68
69
70
71
72 if (file->f_flags & O_TRUNC)
73 params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
74 else
75 params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
76 } else {
77 params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW;
78 if (file->f_flags & O_TRUNC)
79 params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
80 }
81
82 switch (file->f_flags & O_ACCMODE) {
83 case O_RDONLY:
84 access_flags |= SHFL_CF_ACCESS_READ;
85 break;
86
87 case O_WRONLY:
88 access_flags |= SHFL_CF_ACCESS_WRITE;
89 break;
90
91 case O_RDWR:
92 access_flags |= SHFL_CF_ACCESS_READWRITE;
93 break;
94
95 default:
96 WARN_ON(1);
97 }
98
99 if (file->f_flags & O_APPEND)
100 access_flags |= SHFL_CF_ACCESS_APPEND;
101
102 params.create_flags |= access_flags;
103 params.info.attr.mode = inode->i_mode;
104
105 err = vboxsf_create_at_dentry(file_dentry(file), ¶ms);
106 if (err == 0 && params.handle == SHFL_HANDLE_NIL)
107 err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
108 if (err)
109 return err;
110
111 sf_handle = vboxsf_create_sf_handle(inode, params.handle, access_flags);
112 if (IS_ERR(sf_handle)) {
113 vboxsf_close(sbi->root, params.handle);
114 return PTR_ERR(sf_handle);
115 }
116
117 file->private_data = sf_handle;
118 return 0;
119}
120
121static void vboxsf_handle_release(struct kref *refcount)
122{
123 struct vboxsf_handle *sf_handle =
124 container_of(refcount, struct vboxsf_handle, refcount);
125
126 vboxsf_close(sf_handle->root, sf_handle->handle);
127 kfree(sf_handle);
128}
129
130void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle)
131{
132 struct vboxsf_inode *sf_i = VBOXSF_I(inode);
133
134 mutex_lock(&sf_i->handle_list_mutex);
135 list_del(&sf_handle->head);
136 mutex_unlock(&sf_i->handle_list_mutex);
137
138 kref_put(&sf_handle->refcount, vboxsf_handle_release);
139}
140
141static int vboxsf_file_release(struct inode *inode, struct file *file)
142{
143
144
145
146
147 filemap_write_and_wait(inode->i_mapping);
148
149 vboxsf_release_sf_handle(inode, file->private_data);
150 return 0;
151}
152
153
154
155
156
157static void vboxsf_vma_close(struct vm_area_struct *vma)
158{
159 filemap_write_and_wait(vma->vm_file->f_mapping);
160}
161
162static const struct vm_operations_struct vboxsf_file_vm_ops = {
163 .close = vboxsf_vma_close,
164 .fault = filemap_fault,
165 .map_pages = filemap_map_pages,
166};
167
168static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
169{
170 int err;
171
172 err = generic_file_mmap(file, vma);
173 if (!err)
174 vma->vm_ops = &vboxsf_file_vm_ops;
175
176 return err;
177}
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212const struct file_operations vboxsf_reg_fops = {
213 .llseek = generic_file_llseek,
214 .read_iter = generic_file_read_iter,
215 .write_iter = generic_file_write_iter,
216 .mmap = vboxsf_file_mmap,
217 .open = vboxsf_file_open,
218 .release = vboxsf_file_release,
219 .fsync = noop_fsync,
220 .splice_read = generic_file_splice_read,
221};
222
223const struct inode_operations vboxsf_reg_iops = {
224 .getattr = vboxsf_getattr,
225 .setattr = vboxsf_setattr
226};
227
228static int vboxsf_readpage(struct file *file, struct page *page)
229{
230 struct vboxsf_handle *sf_handle = file->private_data;
231 loff_t off = page_offset(page);
232 u32 nread = PAGE_SIZE;
233 u8 *buf;
234 int err;
235
236 buf = kmap(page);
237
238 err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf);
239 if (err == 0) {
240 memset(&buf[nread], 0, PAGE_SIZE - nread);
241 flush_dcache_page(page);
242 SetPageUptodate(page);
243 } else {
244 SetPageError(page);
245 }
246
247 kunmap(page);
248 unlock_page(page);
249 return err;
250}
251
252static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
253{
254 struct vboxsf_handle *h, *sf_handle = NULL;
255
256 mutex_lock(&sf_i->handle_list_mutex);
257 list_for_each_entry(h, &sf_i->handle_list, head) {
258 if (h->access_flags == SHFL_CF_ACCESS_WRITE ||
259 h->access_flags == SHFL_CF_ACCESS_READWRITE) {
260 kref_get(&h->refcount);
261 sf_handle = h;
262 break;
263 }
264 }
265 mutex_unlock(&sf_i->handle_list_mutex);
266
267 return sf_handle;
268}
269
270static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
271{
272 struct inode *inode = page->mapping->host;
273 struct vboxsf_inode *sf_i = VBOXSF_I(inode);
274 struct vboxsf_handle *sf_handle;
275 loff_t off = page_offset(page);
276 loff_t size = i_size_read(inode);
277 u32 nwrite = PAGE_SIZE;
278 u8 *buf;
279 int err;
280
281 if (off + PAGE_SIZE > size)
282 nwrite = size & ~PAGE_MASK;
283
284 sf_handle = vboxsf_get_write_handle(sf_i);
285 if (!sf_handle)
286 return -EBADF;
287
288 buf = kmap(page);
289 err = vboxsf_write(sf_handle->root, sf_handle->handle,
290 off, &nwrite, buf);
291 kunmap(page);
292
293 kref_put(&sf_handle->refcount, vboxsf_handle_release);
294
295 if (err == 0) {
296 ClearPageError(page);
297
298 sf_i->force_restat = 1;
299 } else {
300 ClearPageUptodate(page);
301 }
302
303 unlock_page(page);
304 return err;
305}
306
307static int vboxsf_write_end(struct file *file, struct address_space *mapping,
308 loff_t pos, unsigned int len, unsigned int copied,
309 struct page *page, void *fsdata)
310{
311 struct inode *inode = mapping->host;
312 struct vboxsf_handle *sf_handle = file->private_data;
313 unsigned int from = pos & ~PAGE_MASK;
314 u32 nwritten = len;
315 u8 *buf;
316 int err;
317
318
319 if (!PageUptodate(page) && copied < len)
320 zero_user(page, from + copied, len - copied);
321
322 buf = kmap(page);
323 err = vboxsf_write(sf_handle->root, sf_handle->handle,
324 pos, &nwritten, buf + from);
325 kunmap(page);
326
327 if (err) {
328 nwritten = 0;
329 goto out;
330 }
331
332
333 VBOXSF_I(inode)->force_restat = 1;
334
335 if (!PageUptodate(page) && nwritten == PAGE_SIZE)
336 SetPageUptodate(page);
337
338 pos += nwritten;
339 if (pos > inode->i_size)
340 i_size_write(inode, pos);
341
342out:
343 unlock_page(page);
344 put_page(page);
345
346 return nwritten;
347}
348
349
350
351
352
353
354const struct address_space_operations vboxsf_reg_aops = {
355 .readpage = vboxsf_readpage,
356 .writepage = vboxsf_writepage,
357 .set_page_dirty = __set_page_dirty_nobuffers,
358 .write_begin = simple_write_begin,
359 .write_end = vboxsf_write_end,
360};
361
362static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
363 struct delayed_call *done)
364{
365 struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
366 struct shfl_string *path;
367 char *link;
368 int err;
369
370 if (!dentry)
371 return ERR_PTR(-ECHILD);
372
373 path = vboxsf_path_from_dentry(sbi, dentry);
374 if (IS_ERR(path))
375 return ERR_CAST(path);
376
377 link = kzalloc(PATH_MAX, GFP_KERNEL);
378 if (!link) {
379 __putname(path);
380 return ERR_PTR(-ENOMEM);
381 }
382
383 err = vboxsf_readlink(sbi->root, path, PATH_MAX, link);
384 __putname(path);
385 if (err) {
386 kfree(link);
387 return ERR_PTR(err);
388 }
389
390 set_delayed_call(done, kfree_link, link);
391 return link;
392}
393
394const struct inode_operations vboxsf_lnk_iops = {
395 .get_link = vboxsf_get_link
396};
397