1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/gfp.h>
13#include <linux/mm.h>
14#include <linux/swap.h>
15#include <linux/module.h>
16#include <linux/pagemap.h>
17#include <linux/highmem.h>
18#include <linux/pagevec.h>
19#include <linux/task_io_accounting_ops.h>
20#include <linux/buffer_head.h>
21
22#include "internal.h"
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39void do_invalidatepage(struct page *page, unsigned long offset)
40{
41 void (*invalidatepage)(struct page *, unsigned long);
42 invalidatepage = page->mapping->a_ops->invalidatepage;
43#ifdef CONFIG_BLOCK
44 if (!invalidatepage)
45 invalidatepage = block_invalidatepage;
46#endif
47 if (invalidatepage)
48 (*invalidatepage)(page, offset);
49}
50
51static inline void truncate_partial_page(struct page *page, unsigned partial)
52{
53 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
54 if (page_has_private(page))
55 do_invalidatepage(page, partial);
56}
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72void cancel_dirty_page(struct page *page, unsigned int account_size)
73{
74 if (TestClearPageDirty(page)) {
75 struct address_space *mapping = page->mapping;
76 if (mapping && mapping_cap_account_dirty(mapping)) {
77 dec_zone_page_state(page, NR_FILE_DIRTY);
78 dec_bdi_stat(mapping->backing_dev_info,
79 BDI_RECLAIMABLE);
80 if (account_size)
81 task_io_account_cancelled_write(account_size);
82 }
83 }
84}
85EXPORT_SYMBOL(cancel_dirty_page);
86
87
88
89
90
91
92
93
94
95
96
97static int
98truncate_complete_page(struct address_space *mapping, struct page *page)
99{
100 if (page->mapping != mapping)
101 return -EIO;
102
103 if (page_has_private(page))
104 do_invalidatepage(page, 0);
105
106 cancel_dirty_page(page, PAGE_CACHE_SIZE);
107
108 clear_page_mlock(page);
109 remove_from_page_cache(page);
110 ClearPageMappedToDisk(page);
111 page_cache_release(page);
112 return 0;
113}
114
115
116
117
118
119
120
121
122
123static int
124invalidate_complete_page(struct address_space *mapping, struct page *page)
125{
126 int ret;
127
128 if (page->mapping != mapping)
129 return 0;
130
131 if (page_has_private(page) && !try_to_release_page(page, 0))
132 return 0;
133
134 clear_page_mlock(page);
135 ret = remove_mapping(mapping, page);
136
137 return ret;
138}
139
140int truncate_inode_page(struct address_space *mapping, struct page *page)
141{
142 if (page_mapped(page)) {
143 unmap_mapping_range(mapping,
144 (loff_t)page->index << PAGE_CACHE_SHIFT,
145 PAGE_CACHE_SIZE, 0);
146 }
147 return truncate_complete_page(mapping, page);
148}
149
150
151
152
153int generic_error_remove_page(struct address_space *mapping, struct page *page)
154{
155 if (!mapping)
156 return -EINVAL;
157
158
159
160
161 if (!S_ISREG(mapping->host->i_mode))
162 return -EIO;
163 return truncate_inode_page(mapping, page);
164}
165EXPORT_SYMBOL(generic_error_remove_page);
166
167
168
169
170
171
172
173int invalidate_inode_page(struct page *page)
174{
175 struct address_space *mapping = page_mapping(page);
176 if (!mapping)
177 return 0;
178 if (PageDirty(page) || PageWriteback(page))
179 return 0;
180 if (page_mapped(page))
181 return 0;
182 return invalidate_complete_page(mapping, page);
183}
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208void truncate_inode_pages_range(struct address_space *mapping,
209 loff_t lstart, loff_t lend)
210{
211 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
212 pgoff_t end;
213 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
214 struct pagevec pvec;
215 pgoff_t next;
216 int i;
217
218 if (mapping->nrpages == 0)
219 return;
220
221 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
222 end = (lend >> PAGE_CACHE_SHIFT);
223
224 pagevec_init(&pvec, 0);
225 next = start;
226 while (next <= end &&
227 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
228 mem_cgroup_uncharge_start();
229 for (i = 0; i < pagevec_count(&pvec); i++) {
230 struct page *page = pvec.pages[i];
231 pgoff_t page_index = page->index;
232
233 if (page_index > end) {
234 next = page_index;
235 break;
236 }
237
238 if (page_index > next)
239 next = page_index;
240 next++;
241 if (!trylock_page(page))
242 continue;
243 if (PageWriteback(page)) {
244 unlock_page(page);
245 continue;
246 }
247 truncate_inode_page(mapping, page);
248 unlock_page(page);
249 }
250 pagevec_release(&pvec);
251 mem_cgroup_uncharge_end();
252 cond_resched();
253 }
254
255 if (partial) {
256 struct page *page = find_lock_page(mapping, start - 1);
257 if (page) {
258 wait_on_page_writeback(page);
259 truncate_partial_page(page, partial);
260 unlock_page(page);
261 page_cache_release(page);
262 }
263 }
264
265 next = start;
266 for ( ; ; ) {
267 cond_resched();
268 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
269 if (next == start)
270 break;
271 next = start;
272 continue;
273 }
274 if (pvec.pages[0]->index > end) {
275 pagevec_release(&pvec);
276 break;
277 }
278 mem_cgroup_uncharge_start();
279 for (i = 0; i < pagevec_count(&pvec); i++) {
280 struct page *page = pvec.pages[i];
281
282 if (page->index > end)
283 break;
284 lock_page(page);
285 wait_on_page_writeback(page);
286 truncate_inode_page(mapping, page);
287 if (page->index > next)
288 next = page->index;
289 next++;
290 unlock_page(page);
291 }
292 pagevec_release(&pvec);
293 mem_cgroup_uncharge_end();
294 }
295}
296EXPORT_SYMBOL(truncate_inode_pages_range);
297
298
299
300
301
302
303
304
305void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
306{
307 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
308}
309EXPORT_SYMBOL(truncate_inode_pages);
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324unsigned long invalidate_mapping_pages(struct address_space *mapping,
325 pgoff_t start, pgoff_t end)
326{
327 struct pagevec pvec;
328 pgoff_t next = start;
329 unsigned long ret = 0;
330 int i;
331
332 pagevec_init(&pvec, 0);
333 while (next <= end &&
334 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
335 mem_cgroup_uncharge_start();
336 for (i = 0; i < pagevec_count(&pvec); i++) {
337 struct page *page = pvec.pages[i];
338 pgoff_t index;
339 int lock_failed;
340
341 lock_failed = !trylock_page(page);
342
343
344
345
346
347
348
349 index = page->index;
350 if (index > next)
351 next = index;
352 next++;
353 if (lock_failed)
354 continue;
355
356 ret += invalidate_inode_page(page);
357
358 unlock_page(page);
359 if (next > end)
360 break;
361 }
362 pagevec_release(&pvec);
363 mem_cgroup_uncharge_end();
364 cond_resched();
365 }
366 return ret;
367}
368EXPORT_SYMBOL(invalidate_mapping_pages);
369
370
371
372
373
374
375
376
377static int
378invalidate_complete_page2(struct address_space *mapping, struct page *page)
379{
380 if (page->mapping != mapping)
381 return 0;
382
383 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
384 return 0;
385
386 spin_lock_irq(&mapping->tree_lock);
387 if (PageDirty(page))
388 goto failed;
389
390 clear_page_mlock(page);
391 BUG_ON(page_has_private(page));
392 __remove_from_page_cache(page);
393 spin_unlock_irq(&mapping->tree_lock);
394 mem_cgroup_uncharge_cache_page(page);
395
396 if (mapping->a_ops->freepage)
397 mapping->a_ops->freepage(page);
398
399 page_cache_release(page);
400 return 1;
401failed:
402 spin_unlock_irq(&mapping->tree_lock);
403 return 0;
404}
405
406static int do_launder_page(struct address_space *mapping, struct page *page)
407{
408 if (!PageDirty(page))
409 return 0;
410 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
411 return 0;
412 return mapping->a_ops->launder_page(page);
413}
414
415
416
417
418
419
420
421
422
423
424
425
426int invalidate_inode_pages2_range(struct address_space *mapping,
427 pgoff_t start, pgoff_t end)
428{
429 struct pagevec pvec;
430 pgoff_t next;
431 int i;
432 int ret = 0;
433 int ret2 = 0;
434 int did_range_unmap = 0;
435 int wrapped = 0;
436
437 pagevec_init(&pvec, 0);
438 next = start;
439 while (next <= end && !wrapped &&
440 pagevec_lookup(&pvec, mapping, next,
441 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
442 mem_cgroup_uncharge_start();
443 for (i = 0; i < pagevec_count(&pvec); i++) {
444 struct page *page = pvec.pages[i];
445 pgoff_t page_index;
446
447 lock_page(page);
448 if (page->mapping != mapping) {
449 unlock_page(page);
450 continue;
451 }
452 page_index = page->index;
453 next = page_index + 1;
454 if (next == 0)
455 wrapped = 1;
456 if (page_index > end) {
457 unlock_page(page);
458 break;
459 }
460 wait_on_page_writeback(page);
461 if (page_mapped(page)) {
462 if (!did_range_unmap) {
463
464
465
466 unmap_mapping_range(mapping,
467 (loff_t)page_index<<PAGE_CACHE_SHIFT,
468 (loff_t)(end - page_index + 1)
469 << PAGE_CACHE_SHIFT,
470 0);
471 did_range_unmap = 1;
472 } else {
473
474
475
476 unmap_mapping_range(mapping,
477 (loff_t)page_index<<PAGE_CACHE_SHIFT,
478 PAGE_CACHE_SIZE, 0);
479 }
480 }
481 BUG_ON(page_mapped(page));
482 ret2 = do_launder_page(mapping, page);
483 if (ret2 == 0) {
484 if (!invalidate_complete_page2(mapping, page))
485 ret2 = -EBUSY;
486 }
487 if (ret2 < 0)
488 ret = ret2;
489 unlock_page(page);
490 }
491 pagevec_release(&pvec);
492 mem_cgroup_uncharge_end();
493 cond_resched();
494 }
495 return ret;
496}
497EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
498
499
500
501
502
503
504
505
506
507
508int invalidate_inode_pages2(struct address_space *mapping)
509{
510 return invalidate_inode_pages2_range(mapping, 0, -1);
511}
512EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
531{
532 struct address_space *mapping = inode->i_mapping;
533
534
535
536
537
538
539
540
541
542
543 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
544 truncate_inode_pages(mapping, new);
545 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
546}
547EXPORT_SYMBOL(truncate_pagecache);
548
549
550
551
552
553
554
555
556
557
558
559
560
561void truncate_setsize(struct inode *inode, loff_t newsize)
562{
563 loff_t oldsize;
564
565 oldsize = inode->i_size;
566 i_size_write(inode, newsize);
567
568 truncate_pagecache(inode, oldsize, newsize);
569}
570EXPORT_SYMBOL(truncate_setsize);
571
572
573
574
575
576
577
578
579
580int vmtruncate(struct inode *inode, loff_t offset)
581{
582 int error;
583
584 error = inode_newsize_ok(inode, offset);
585 if (error)
586 return error;
587
588 truncate_setsize(inode, offset);
589 if (inode->i_op->truncate)
590 inode->i_op->truncate(inode);
591 return 0;
592}
593EXPORT_SYMBOL(vmtruncate);
594