1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/mm.h>
13#include <linux/swap.h>
14#include <linux/module.h>
15#include <linux/pagemap.h>
16#include <linux/highmem.h>
17#include <linux/pagevec.h>
18#include <linux/task_io_accounting_ops.h>
19#include <linux/buffer_head.h>
20
21#include "internal.h"
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38void do_invalidatepage(struct page *page, unsigned long offset)
39{
40 void (*invalidatepage)(struct page *, unsigned long);
41 invalidatepage = page->mapping->a_ops->invalidatepage;
42#ifdef CONFIG_BLOCK
43 if (!invalidatepage)
44 invalidatepage = block_invalidatepage;
45#endif
46 if (invalidatepage)
47 (*invalidatepage)(page, offset);
48}
49
50static inline void truncate_partial_page(struct page *page, unsigned partial)
51{
52 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
53 if (page_has_private(page))
54 do_invalidatepage(page, partial);
55}
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71void cancel_dirty_page(struct page *page, unsigned int account_size)
72{
73 if (TestClearPageDirty(page)) {
74 struct address_space *mapping = page->mapping;
75 if (mapping && mapping_cap_account_dirty(mapping)) {
76 dec_zone_page_state(page, NR_FILE_DIRTY);
77 dec_bdi_stat(mapping->backing_dev_info,
78 BDI_RECLAIMABLE);
79 if (account_size)
80 task_io_account_cancelled_write(account_size);
81 }
82 }
83}
84EXPORT_SYMBOL(cancel_dirty_page);
85
86
87
88
89
90
91
92
93
94
95
96static int
97truncate_complete_page(struct address_space *mapping, struct page *page)
98{
99 if (page->mapping != mapping)
100 return -EIO;
101
102 if (page_has_private(page))
103 do_invalidatepage(page, 0);
104
105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
106
107 clear_page_mlock(page);
108 remove_from_page_cache(page);
109 ClearPageMappedToDisk(page);
110 page_cache_release(page);
111 return 0;
112}
113
114
115
116
117
118
119
120
121
122static int
123invalidate_complete_page(struct address_space *mapping, struct page *page)
124{
125 int ret;
126
127 if (page->mapping != mapping)
128 return 0;
129
130 if (page_has_private(page) && !try_to_release_page(page, 0))
131 return 0;
132
133 clear_page_mlock(page);
134 ret = remove_mapping(mapping, page);
135
136 return ret;
137}
138
139int truncate_inode_page(struct address_space *mapping, struct page *page)
140{
141 if (page_mapped(page)) {
142 unmap_mapping_range(mapping,
143 (loff_t)page->index << PAGE_CACHE_SHIFT,
144 PAGE_CACHE_SIZE, 0);
145 }
146 return truncate_complete_page(mapping, page);
147}
148
149
150
151
152int generic_error_remove_page(struct address_space *mapping, struct page *page)
153{
154 if (!mapping)
155 return -EINVAL;
156
157
158
159
160 if (!S_ISREG(mapping->host->i_mode))
161 return -EIO;
162 return truncate_inode_page(mapping, page);
163}
164EXPORT_SYMBOL(generic_error_remove_page);
165
166
167
168
169
170
171
172int invalidate_inode_page(struct page *page)
173{
174 struct address_space *mapping = page_mapping(page);
175 if (!mapping)
176 return 0;
177 if (PageDirty(page) || PageWriteback(page))
178 return 0;
179 if (page_mapped(page))
180 return 0;
181 return invalidate_complete_page(mapping, page);
182}
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207void truncate_inode_pages_range(struct address_space *mapping,
208 loff_t lstart, loff_t lend)
209{
210 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
211 pgoff_t end;
212 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
213 struct pagevec pvec;
214 pgoff_t next;
215 int i;
216
217 if (mapping->nrpages == 0)
218 return;
219
220 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
221 end = (lend >> PAGE_CACHE_SHIFT);
222
223 pagevec_init(&pvec, 0);
224 next = start;
225 while (next <= end &&
226 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
227 for (i = 0; i < pagevec_count(&pvec); i++) {
228 struct page *page = pvec.pages[i];
229 pgoff_t page_index = page->index;
230
231 if (page_index > end) {
232 next = page_index;
233 break;
234 }
235
236 if (page_index > next)
237 next = page_index;
238 next++;
239 if (!trylock_page(page))
240 continue;
241 if (PageWriteback(page)) {
242 unlock_page(page);
243 continue;
244 }
245 truncate_inode_page(mapping, page);
246 unlock_page(page);
247 }
248 pagevec_release(&pvec);
249 cond_resched();
250 }
251
252 if (partial) {
253 struct page *page = find_lock_page(mapping, start - 1);
254 if (page) {
255 wait_on_page_writeback(page);
256 truncate_partial_page(page, partial);
257 unlock_page(page);
258 page_cache_release(page);
259 }
260 }
261
262 next = start;
263 for ( ; ; ) {
264 cond_resched();
265 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
266 if (next == start)
267 break;
268 next = start;
269 continue;
270 }
271 if (pvec.pages[0]->index > end) {
272 pagevec_release(&pvec);
273 break;
274 }
275 for (i = 0; i < pagevec_count(&pvec); i++) {
276 struct page *page = pvec.pages[i];
277
278 if (page->index > end)
279 break;
280 lock_page(page);
281 wait_on_page_writeback(page);
282 truncate_inode_page(mapping, page);
283 if (page->index > next)
284 next = page->index;
285 next++;
286 unlock_page(page);
287 }
288 pagevec_release(&pvec);
289 }
290}
291EXPORT_SYMBOL(truncate_inode_pages_range);
292
293
294
295
296
297
298
299
300void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
301{
302 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
303}
304EXPORT_SYMBOL(truncate_inode_pages);
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319unsigned long invalidate_mapping_pages(struct address_space *mapping,
320 pgoff_t start, pgoff_t end)
321{
322 struct pagevec pvec;
323 pgoff_t next = start;
324 unsigned long ret = 0;
325 int i;
326
327 pagevec_init(&pvec, 0);
328 while (next <= end &&
329 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
330 for (i = 0; i < pagevec_count(&pvec); i++) {
331 struct page *page = pvec.pages[i];
332 pgoff_t index;
333 int lock_failed;
334
335 lock_failed = !trylock_page(page);
336
337
338
339
340
341
342
343 index = page->index;
344 if (index > next)
345 next = index;
346 next++;
347 if (lock_failed)
348 continue;
349
350 ret += invalidate_inode_page(page);
351
352 unlock_page(page);
353 if (next > end)
354 break;
355 }
356 pagevec_release(&pvec);
357 cond_resched();
358 }
359 return ret;
360}
361EXPORT_SYMBOL(invalidate_mapping_pages);
362
363
364
365
366
367
368
369
370static int
371invalidate_complete_page2(struct address_space *mapping, struct page *page)
372{
373 if (page->mapping != mapping)
374 return 0;
375
376 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
377 return 0;
378
379 spin_lock_irq(&mapping->tree_lock);
380 if (PageDirty(page))
381 goto failed;
382
383 clear_page_mlock(page);
384 BUG_ON(page_has_private(page));
385 __remove_from_page_cache(page);
386 spin_unlock_irq(&mapping->tree_lock);
387 mem_cgroup_uncharge_cache_page(page);
388 page_cache_release(page);
389 return 1;
390failed:
391 spin_unlock_irq(&mapping->tree_lock);
392 return 0;
393}
394
395static int do_launder_page(struct address_space *mapping, struct page *page)
396{
397 if (!PageDirty(page))
398 return 0;
399 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
400 return 0;
401 return mapping->a_ops->launder_page(page);
402}
403
404
405
406
407
408
409
410
411
412
413
414
415int invalidate_inode_pages2_range(struct address_space *mapping,
416 pgoff_t start, pgoff_t end)
417{
418 struct pagevec pvec;
419 pgoff_t next;
420 int i;
421 int ret = 0;
422 int ret2 = 0;
423 int did_range_unmap = 0;
424 int wrapped = 0;
425
426 pagevec_init(&pvec, 0);
427 next = start;
428 while (next <= end && !wrapped &&
429 pagevec_lookup(&pvec, mapping, next,
430 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
431 for (i = 0; i < pagevec_count(&pvec); i++) {
432 struct page *page = pvec.pages[i];
433 pgoff_t page_index;
434
435 lock_page(page);
436 if (page->mapping != mapping) {
437 unlock_page(page);
438 continue;
439 }
440 page_index = page->index;
441 next = page_index + 1;
442 if (next == 0)
443 wrapped = 1;
444 if (page_index > end) {
445 unlock_page(page);
446 break;
447 }
448 wait_on_page_writeback(page);
449 if (page_mapped(page)) {
450 if (!did_range_unmap) {
451
452
453
454 unmap_mapping_range(mapping,
455 (loff_t)page_index<<PAGE_CACHE_SHIFT,
456 (loff_t)(end - page_index + 1)
457 << PAGE_CACHE_SHIFT,
458 0);
459 did_range_unmap = 1;
460 } else {
461
462
463
464 unmap_mapping_range(mapping,
465 (loff_t)page_index<<PAGE_CACHE_SHIFT,
466 PAGE_CACHE_SIZE, 0);
467 }
468 }
469 BUG_ON(page_mapped(page));
470 ret2 = do_launder_page(mapping, page);
471 if (ret2 == 0) {
472 if (!invalidate_complete_page2(mapping, page))
473 ret2 = -EBUSY;
474 }
475 if (ret2 < 0)
476 ret = ret2;
477 unlock_page(page);
478 }
479 pagevec_release(&pvec);
480 cond_resched();
481 }
482 return ret;
483}
484EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
485
486
487
488
489
490
491
492
493
494
495int invalidate_inode_pages2(struct address_space *mapping)
496{
497 return invalidate_inode_pages2_range(mapping, 0, -1);
498}
499EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
518{
519 if (new < old) {
520 struct address_space *mapping = inode->i_mapping;
521
522
523
524
525
526
527
528
529
530
531 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
532 truncate_inode_pages(mapping, new);
533 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
534 }
535}
536EXPORT_SYMBOL(truncate_pagecache);
537
538
539
540
541
542
543
544
545
546
547int vmtruncate(struct inode *inode, loff_t offset)
548{
549 loff_t oldsize;
550 int error;
551
552 error = inode_newsize_ok(inode, offset);
553 if (error)
554 return error;
555 oldsize = inode->i_size;
556 i_size_write(inode, offset);
557 truncate_pagecache(inode, oldsize, offset);
558 if (inode->i_op->truncate)
559 inode->i_op->truncate(inode);
560
561 return error;
562}
563EXPORT_SYMBOL(vmtruncate);
564