1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/string.h>
36#include <linux/stat.h>
37#include <linux/errno.h>
38#include <linux/unistd.h>
39#include <linux/uaccess.h>
40
41#include <linux/fs.h>
42#include <linux/pagemap.h>
43
44#define DEBUG_SUBSYSTEM S_LLITE
45
46#include "llite_internal.h"
47
48static const struct vm_operations_struct ll_file_vm_ops;
49
50void policy_from_vma(union ldlm_policy_data *policy,
51 struct vm_area_struct *vma, unsigned long addr,
52 size_t count)
53{
54 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
55 (vma->vm_pgoff << PAGE_SHIFT);
56 policy->l_extent.end = (policy->l_extent.start + count - 1) |
57 ~PAGE_MASK;
58}
59
60struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
61 size_t count)
62{
63 struct vm_area_struct *vma, *ret = NULL;
64
65
66 LASSERT(!down_write_trylock(&mm->mmap_sem));
67
68 for (vma = find_vma(mm, addr);
69 vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
70 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
71 vma->vm_flags & VM_SHARED) {
72 ret = vma;
73 break;
74 }
75 }
76 return ret;
77}
78
79
80
81
82
83
84
85
86
87
88static struct cl_io *
89ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
90 pgoff_t index, unsigned long *ra_flags)
91{
92 struct file *file = vma->vm_file;
93 struct inode *inode = file_inode(file);
94 struct cl_io *io;
95 struct cl_fault_io *fio;
96 int rc;
97
98 if (ll_file_nolock(file))
99 return ERR_PTR(-EOPNOTSUPP);
100
101restart:
102 io = vvp_env_thread_io(env);
103 io->ci_obj = ll_i2info(inode)->lli_clob;
104 LASSERT(io->ci_obj);
105
106 fio = &io->u.ci_fault;
107 fio->ft_index = index;
108 fio->ft_executable = vma->vm_flags & VM_EXEC;
109
110
111
112
113
114
115 if (ra_flags)
116 *ra_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
117 vma->vm_flags &= ~VM_SEQ_READ;
118 vma->vm_flags |= VM_RAND_READ;
119
120 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
121 fio->ft_index, fio->ft_executable);
122
123 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
124 if (rc == 0) {
125 struct vvp_io *vio = vvp_env_io(env);
126 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
127
128 LASSERT(vio->vui_cl.cis_io == io);
129
130
131 io->ci_lockreq = CILR_MANDATORY;
132 vio->vui_fd = fd;
133 } else {
134 LASSERT(rc < 0);
135 cl_io_fini(env, io);
136 if (io->ci_need_restart)
137 goto restart;
138
139 io = ERR_PTR(rc);
140 }
141
142 return io;
143}
144
145
146static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
147 bool *retry)
148{
149 struct lu_env *env;
150 struct cl_io *io;
151 struct vvp_io *vio;
152 int result;
153 u16 refcheck;
154 sigset_t set;
155 struct inode *inode;
156 struct ll_inode_info *lli;
157
158 env = cl_env_get(&refcheck);
159 if (IS_ERR(env))
160 return PTR_ERR(env);
161
162 io = ll_fault_io_init(env, vma, vmpage->index, NULL);
163 if (IS_ERR(io)) {
164 result = PTR_ERR(io);
165 goto out;
166 }
167
168 result = io->ci_result;
169 if (result < 0)
170 goto out_io;
171
172 io->u.ci_fault.ft_mkwrite = 1;
173 io->u.ci_fault.ft_writable = 1;
174
175 vio = vvp_env_io(env);
176 vio->u.fault.ft_vma = vma;
177 vio->u.fault.ft_vmpage = vmpage;
178
179 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
180
181 inode = vvp_object_inode(io->ci_obj);
182 lli = ll_i2info(inode);
183
184 result = cl_io_loop(env, io);
185
186 cfs_restore_sigs(set);
187
188 if (result == 0) {
189 struct inode *inode = file_inode(vma->vm_file);
190 struct ll_inode_info *lli = ll_i2info(inode);
191
192 lock_page(vmpage);
193 if (!vmpage->mapping) {
194 unlock_page(vmpage);
195
196
197
198
199
200 if (result == 0)
201 result = -ENODATA;
202 } else if (!PageDirty(vmpage)) {
203
204
205
206
207
208
209 unlock_page(vmpage);
210
211 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
212 vmpage, vmpage->index);
213
214 *retry = true;
215 result = -EAGAIN;
216 }
217
218 if (!result)
219 set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
220 }
221
222out_io:
223 cl_io_fini(env, io);
224out:
225 cl_env_put(env, &refcheck);
226 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
227 LASSERT(ergo(result == 0, PageLocked(vmpage)));
228
229 return result;
230}
231
232static inline int to_fault_error(int result)
233{
234 switch (result) {
235 case 0:
236 result = VM_FAULT_LOCKED;
237 break;
238 case -EFAULT:
239 result = VM_FAULT_NOPAGE;
240 break;
241 case -ENOMEM:
242 result = VM_FAULT_OOM;
243 break;
244 default:
245 result = VM_FAULT_SIGBUS;
246 break;
247 }
248 return result;
249}
250
251
252
253
254
255
256
257
258
259
260
261
262static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
263{
264 struct lu_env *env;
265 struct cl_io *io;
266 struct vvp_io *vio = NULL;
267 struct page *vmpage;
268 unsigned long ra_flags;
269 int result = 0;
270 int fault_ret = 0;
271 u16 refcheck;
272
273 env = cl_env_get(&refcheck);
274 if (IS_ERR(env))
275 return PTR_ERR(env);
276
277 io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
278 if (IS_ERR(io)) {
279 result = to_fault_error(PTR_ERR(io));
280 goto out;
281 }
282
283 result = io->ci_result;
284 if (result == 0) {
285 vio = vvp_env_io(env);
286 vio->u.fault.ft_vma = vma;
287 vio->u.fault.ft_vmpage = NULL;
288 vio->u.fault.ft_vmf = vmf;
289 vio->u.fault.ft_flags = 0;
290 vio->u.fault.ft_flags_valid = false;
291
292
293 ll_cl_add(vma->vm_file, env, io);
294
295 result = cl_io_loop(env, io);
296
297 ll_cl_remove(vma->vm_file, env);
298
299
300
301
302 if (vio->u.fault.ft_flags_valid)
303 fault_ret = vio->u.fault.ft_flags;
304
305 vmpage = vio->u.fault.ft_vmpage;
306 if (result != 0 && vmpage) {
307 put_page(vmpage);
308 vmf->page = NULL;
309 }
310 }
311 cl_io_fini(env, io);
312
313 vma->vm_flags |= ra_flags;
314
315out:
316 cl_env_put(env, &refcheck);
317 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
318 fault_ret |= to_fault_error(result);
319
320 CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
321 return fault_ret;
322}
323
324static int ll_fault(struct vm_fault *vmf)
325{
326 int count = 0;
327 bool printed = false;
328 int result;
329 sigset_t set;
330
331
332
333
334
335 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
336
337restart:
338 result = ll_fault0(vmf->vma, vmf);
339 LASSERT(!(result & VM_FAULT_LOCKED));
340 if (result == 0) {
341 struct page *vmpage = vmf->page;
342
343
344 lock_page(vmpage);
345 if (unlikely(!vmpage->mapping)) {
346 unlock_page(vmpage);
347 put_page(vmpage);
348 vmf->page = NULL;
349
350 if (!printed && ++count > 16) {
351 CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
352 current->comm);
353 printed = true;
354 }
355
356 goto restart;
357 }
358
359 result = VM_FAULT_LOCKED;
360 }
361 cfs_restore_sigs(set);
362 return result;
363}
364
365static int ll_page_mkwrite(struct vm_fault *vmf)
366{
367 struct vm_area_struct *vma = vmf->vma;
368 int count = 0;
369 bool printed = false;
370 bool retry;
371 int result;
372
373 file_update_time(vma->vm_file);
374 do {
375 retry = false;
376 result = ll_page_mkwrite0(vma, vmf->page, &retry);
377
378 if (!printed && ++count > 16) {
379 const struct dentry *de = vma->vm_file->f_path.dentry;
380
381 CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
382 current->comm, vmf->pgoff,
383 PFID(ll_inode2fid(de->d_inode)));
384 printed = true;
385 }
386 } while (retry);
387
388 switch (result) {
389 case 0:
390 LASSERT(PageLocked(vmf->page));
391 result = VM_FAULT_LOCKED;
392 break;
393 case -ENODATA:
394 case -EAGAIN:
395 case -EFAULT:
396 result = VM_FAULT_NOPAGE;
397 break;
398 case -ENOMEM:
399 result = VM_FAULT_OOM;
400 break;
401 default:
402 result = VM_FAULT_SIGBUS;
403 break;
404 }
405
406 return result;
407}
408
409
410
411
412
413static void ll_vm_open(struct vm_area_struct *vma)
414{
415 struct inode *inode = file_inode(vma->vm_file);
416 struct vvp_object *vob = cl_inode2vvp(inode);
417
418 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
419 atomic_inc(&vob->vob_mmap_cnt);
420}
421
422
423
424
425static void ll_vm_close(struct vm_area_struct *vma)
426{
427 struct inode *inode = file_inode(vma->vm_file);
428 struct vvp_object *vob = cl_inode2vvp(inode);
429
430 atomic_dec(&vob->vob_mmap_cnt);
431 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
432}
433
434
435
436
437int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
438{
439 int rc = -ENOENT;
440
441 LASSERTF(last > first, "last %llu first %llu\n", last, first);
442 if (mapping_mapped(mapping)) {
443 rc = 0;
444 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
445 last - first + 1, 0);
446 }
447
448 return rc;
449}
450
451static const struct vm_operations_struct ll_file_vm_ops = {
452 .fault = ll_fault,
453 .page_mkwrite = ll_page_mkwrite,
454 .open = ll_vm_open,
455 .close = ll_vm_close,
456};
457
458int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
459{
460 struct inode *inode = file_inode(file);
461 int rc;
462
463 if (ll_file_nolock(file))
464 return -EOPNOTSUPP;
465
466 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
467 rc = generic_file_mmap(file, vma);
468 if (rc == 0) {
469 vma->vm_ops = &ll_file_vm_ops;
470 vma->vm_ops->open(vma);
471
472 rc = ll_glimpse_size(inode);
473 }
474
475 return rc;
476}
477