1
2
3
4
5
6
7
8
9
10
11#include <linux/fs.h>
12#include <linux/pagemap.h>
13#include <linux/export.h>
14#include <linux/uio.h>
15#include <linux/rmap.h>
16#include <linux/mmu_notifier.h>
17#include <linux/sched.h>
18#include <linux/seqlock.h>
19#include <linux/mutex.h>
20#include <linux/gfp.h>
21#include <asm/tlbflush.h>
22#include <asm/io.h>
23
24
25
26
27
28static DEFINE_MUTEX(xip_sparse_mutex);
29static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
30static struct page *__xip_sparse_page;
31
32
33static struct page *xip_sparse_page(void)
34{
35 if (!__xip_sparse_page) {
36 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
37
38 if (page)
39 __xip_sparse_page = page;
40 }
41 return __xip_sparse_page;
42}
43
44
45
46
47
48
49
50
51static ssize_t
52do_xip_mapping_read(struct address_space *mapping,
53 struct file_ra_state *_ra,
54 struct file *filp,
55 char __user *buf,
56 size_t len,
57 loff_t *ppos)
58{
59 struct inode *inode = mapping->host;
60 pgoff_t index, end_index;
61 unsigned long offset;
62 loff_t isize, pos;
63 size_t copied = 0, error = 0;
64
65 BUG_ON(!mapping->a_ops->get_xip_mem);
66
67 pos = *ppos;
68 index = pos >> PAGE_CACHE_SHIFT;
69 offset = pos & ~PAGE_CACHE_MASK;
70
71 isize = i_size_read(inode);
72 if (!isize)
73 goto out;
74
75 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
76 do {
77 unsigned long nr, left;
78 void *xip_mem;
79 unsigned long xip_pfn;
80 int zero = 0;
81
82
83 nr = PAGE_CACHE_SIZE;
84 if (index >= end_index) {
85 if (index > end_index)
86 goto out;
87 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
88 if (nr <= offset) {
89 goto out;
90 }
91 }
92 nr = nr - offset;
93 if (nr > len - copied)
94 nr = len - copied;
95
96 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
97 &xip_mem, &xip_pfn);
98 if (unlikely(error)) {
99 if (error == -ENODATA) {
100
101 zero = 1;
102 } else
103 goto out;
104 }
105
106
107
108
109
110 if (mapping_writably_mapped(mapping))
111 ;
112
113
114
115
116
117
118
119
120
121
122 if (!zero)
123 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
124 else
125 left = __clear_user(buf + copied, nr);
126
127 if (left) {
128 error = -EFAULT;
129 goto out;
130 }
131
132 copied += (nr - left);
133 offset += (nr - left);
134 index += offset >> PAGE_CACHE_SHIFT;
135 offset &= ~PAGE_CACHE_MASK;
136 } while (copied < len);
137
138out:
139 *ppos = pos + copied;
140 if (filp)
141 file_accessed(filp);
142
143 return (copied ? copied : error);
144}
145
146ssize_t
147xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
148{
149 if (!access_ok(VERIFY_WRITE, buf, len))
150 return -EFAULT;
151
152 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
153 buf, len, ppos);
154}
155EXPORT_SYMBOL_GPL(xip_file_read);
156
157
158
159
160
161
162
163
164static void
165__xip_unmap (struct address_space * mapping,
166 unsigned long pgoff)
167{
168 struct vm_area_struct *vma;
169 struct mm_struct *mm;
170 struct prio_tree_iter iter;
171 unsigned long address;
172 pte_t *pte;
173 pte_t pteval;
174 spinlock_t *ptl;
175 struct page *page;
176 unsigned count;
177 int locked = 0;
178
179 count = read_seqcount_begin(&xip_sparse_seq);
180
181 page = __xip_sparse_page;
182 if (!page)
183 return;
184
185retry:
186 mutex_lock(&mapping->i_mmap_mutex);
187 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
188 mm = vma->vm_mm;
189 address = vma->vm_start +
190 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
191 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
192 pte = page_check_address(page, mm, address, &ptl, 1);
193 if (pte) {
194
195 flush_cache_page(vma, address, pte_pfn(*pte));
196 pteval = ptep_clear_flush_notify(vma, address, pte);
197 page_remove_rmap(page);
198 dec_mm_counter(mm, MM_FILEPAGES);
199 BUG_ON(pte_dirty(pteval));
200 pte_unmap_unlock(pte, ptl);
201 page_cache_release(page);
202 }
203 }
204 mutex_unlock(&mapping->i_mmap_mutex);
205
206 if (locked) {
207 mutex_unlock(&xip_sparse_mutex);
208 } else if (read_seqcount_retry(&xip_sparse_seq, count)) {
209 mutex_lock(&xip_sparse_mutex);
210 locked = 1;
211 goto retry;
212 }
213}
214
215
216
217
218
219
220
221static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
222{
223 struct file *file = vma->vm_file;
224 struct address_space *mapping = file->f_mapping;
225 struct inode *inode = mapping->host;
226 pgoff_t size;
227 void *xip_mem;
228 unsigned long xip_pfn;
229 struct page *page;
230 int error;
231
232
233again:
234 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
235 if (vmf->pgoff >= size)
236 return VM_FAULT_SIGBUS;
237
238 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
239 &xip_mem, &xip_pfn);
240 if (likely(!error))
241 goto found;
242 if (error != -ENODATA)
243 return VM_FAULT_OOM;
244
245
246 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
247 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
248 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
249 int err;
250
251
252 mutex_lock(&xip_sparse_mutex);
253 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
254 &xip_mem, &xip_pfn);
255 mutex_unlock(&xip_sparse_mutex);
256 if (error)
257 return VM_FAULT_SIGBUS;
258
259 __xip_unmap(mapping, vmf->pgoff);
260
261found:
262 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
263 xip_pfn);
264 if (err == -ENOMEM)
265 return VM_FAULT_OOM;
266
267
268
269
270 if (err != -EBUSY)
271 BUG_ON(err);
272 return VM_FAULT_NOPAGE;
273 } else {
274 int err, ret = VM_FAULT_OOM;
275
276 mutex_lock(&xip_sparse_mutex);
277 write_seqcount_begin(&xip_sparse_seq);
278 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
279 &xip_mem, &xip_pfn);
280 if (unlikely(!error)) {
281 write_seqcount_end(&xip_sparse_seq);
282 mutex_unlock(&xip_sparse_mutex);
283 goto again;
284 }
285 if (error != -ENODATA)
286 goto out;
287
288 page = xip_sparse_page();
289 if (!page)
290 goto out;
291 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
292 page);
293 if (err == -ENOMEM)
294 goto out;
295
296 ret = VM_FAULT_NOPAGE;
297out:
298 write_seqcount_end(&xip_sparse_seq);
299 mutex_unlock(&xip_sparse_mutex);
300
301 return ret;
302 }
303}
304
305static const struct vm_operations_struct xip_file_vm_ops = {
306 .fault = xip_file_fault,
307 .page_mkwrite = filemap_page_mkwrite,
308};
309
310int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
311{
312 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
313
314 file_accessed(file);
315 vma->vm_ops = &xip_file_vm_ops;
316 vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
317 return 0;
318}
319EXPORT_SYMBOL_GPL(xip_file_mmap);
320
321static ssize_t
322__xip_file_write(struct file *filp, const char __user *buf,
323 size_t count, loff_t pos, loff_t *ppos)
324{
325 struct address_space * mapping = filp->f_mapping;
326 const struct address_space_operations *a_ops = mapping->a_ops;
327 struct inode *inode = mapping->host;
328 long status = 0;
329 size_t bytes;
330 ssize_t written = 0;
331
332 BUG_ON(!mapping->a_ops->get_xip_mem);
333
334 do {
335 unsigned long index;
336 unsigned long offset;
337 size_t copied;
338 void *xip_mem;
339 unsigned long xip_pfn;
340
341 offset = (pos & (PAGE_CACHE_SIZE -1));
342 index = pos >> PAGE_CACHE_SHIFT;
343 bytes = PAGE_CACHE_SIZE - offset;
344 if (bytes > count)
345 bytes = count;
346
347 status = a_ops->get_xip_mem(mapping, index, 0,
348 &xip_mem, &xip_pfn);
349 if (status == -ENODATA) {
350
351 mutex_lock(&xip_sparse_mutex);
352 status = a_ops->get_xip_mem(mapping, index, 1,
353 &xip_mem, &xip_pfn);
354 mutex_unlock(&xip_sparse_mutex);
355 if (!status)
356
357 __xip_unmap(mapping, index);
358 }
359
360 if (status)
361 break;
362
363 copied = bytes -
364 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
365
366 if (likely(copied > 0)) {
367 status = copied;
368
369 if (status >= 0) {
370 written += status;
371 count -= status;
372 pos += status;
373 buf += status;
374 }
375 }
376 if (unlikely(copied != bytes))
377 if (status >= 0)
378 status = -EFAULT;
379 if (status < 0)
380 break;
381 } while (count);
382 *ppos = pos;
383
384
385
386
387 if (pos > inode->i_size) {
388 i_size_write(inode, pos);
389 mark_inode_dirty(inode);
390 }
391
392 return written ? written : status;
393}
394
395ssize_t
396xip_file_write(struct file *filp, const char __user *buf, size_t len,
397 loff_t *ppos)
398{
399 struct address_space *mapping = filp->f_mapping;
400 struct inode *inode = mapping->host;
401 size_t count;
402 loff_t pos;
403 ssize_t ret;
404
405 sb_start_write(inode->i_sb);
406
407 mutex_lock(&inode->i_mutex);
408
409 if (!access_ok(VERIFY_READ, buf, len)) {
410 ret=-EFAULT;
411 goto out_up;
412 }
413
414 pos = *ppos;
415 count = len;
416
417
418 current->backing_dev_info = mapping->backing_dev_info;
419
420 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
421 if (ret)
422 goto out_backing;
423 if (count == 0)
424 goto out_backing;
425
426 ret = file_remove_suid(filp);
427 if (ret)
428 goto out_backing;
429
430 ret = file_update_time(filp);
431 if (ret)
432 goto out_backing;
433
434 ret = __xip_file_write (filp, buf, count, pos, ppos);
435
436 out_backing:
437 current->backing_dev_info = NULL;
438 out_up:
439 mutex_unlock(&inode->i_mutex);
440 sb_end_write(inode->i_sb);
441 return ret;
442}
443EXPORT_SYMBOL_GPL(xip_file_write);
444
445
446
447
448
449
450int
451xip_truncate_page(struct address_space *mapping, loff_t from)
452{
453 pgoff_t index = from >> PAGE_CACHE_SHIFT;
454 unsigned offset = from & (PAGE_CACHE_SIZE-1);
455 unsigned blocksize;
456 unsigned length;
457 void *xip_mem;
458 unsigned long xip_pfn;
459 int err;
460
461 BUG_ON(!mapping->a_ops->get_xip_mem);
462
463 blocksize = 1 << mapping->host->i_blkbits;
464 length = offset & (blocksize - 1);
465
466
467 if (!length)
468 return 0;
469
470 length = blocksize - length;
471
472 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
473 &xip_mem, &xip_pfn);
474 if (unlikely(err)) {
475 if (err == -ENODATA)
476
477 return 0;
478 else
479 return err;
480 }
481 memset(xip_mem + offset, 0, length);
482 return 0;
483}
484EXPORT_SYMBOL_GPL(xip_truncate_page);
485