1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/gfp.h>
13#include <linux/mm.h>
14#include <linux/swap.h>
15#include <linux/export.h>
16#include <linux/pagemap.h>
17#include <linux/highmem.h>
18#include <linux/pagevec.h>
19#include <linux/task_io_accounting_ops.h>
20#include <linux/buffer_head.h>
21
22#include <linux/cleancache.h>
23#include <linux/rmap.h>
24#include "internal.h"
25
26static void clear_exceptional_entry(struct address_space *mapping,
27 pgoff_t index, void *entry)
28{
29 struct radix_tree_node *node;
30 void **slot;
31
32
33 if (shmem_mapping(mapping))
34 return;
35
36 spin_lock_irq(&mapping->tree_lock);
37
38
39
40
41
42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
43 goto unlock;
44 if (*slot != entry)
45 goto unlock;
46 radix_tree_replace_slot(slot, NULL);
47 mapping->nrshadows--;
48 if (!node)
49 goto unlock;
50 workingset_node_shadows_dec(node);
51
52
53
54
55
56
57
58 if (!workingset_node_shadows(node) &&
59 !list_empty(&node->private_list))
60 list_lru_del(&workingset_shadow_nodes, &node->private_list);
61 __radix_tree_delete_node(&mapping->page_tree, node);
62unlock:
63 spin_unlock_irq(&mapping->tree_lock);
64}
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81void do_invalidatepage(struct page *page, unsigned int offset,
82 unsigned int length)
83{
84 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
85
86 invalidatepage = page->mapping->a_ops->invalidatepage;
87#ifdef CONFIG_BLOCK
88 if (!invalidatepage)
89 invalidatepage = block_invalidatepage;
90#endif
91 if (invalidatepage)
92 (*invalidatepage)(page, offset, length);
93}
94
95
96
97
98
99
100
101
102
103
104
105static int
106truncate_complete_page(struct address_space *mapping, struct page *page)
107{
108 if (page->mapping != mapping)
109 return -EIO;
110
111 if (page_has_private(page))
112 do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
113
114
115
116
117
118
119 cancel_dirty_page(page);
120 ClearPageMappedToDisk(page);
121 delete_from_page_cache(page);
122 return 0;
123}
124
125
126
127
128
129
130
131
132
133static int
134invalidate_complete_page(struct address_space *mapping, struct page *page)
135{
136 int ret;
137
138 if (page->mapping != mapping)
139 return 0;
140
141 if (page_has_private(page) && !try_to_release_page(page, 0))
142 return 0;
143
144 ret = remove_mapping(mapping, page);
145
146 return ret;
147}
148
149int truncate_inode_page(struct address_space *mapping, struct page *page)
150{
151 if (page_mapped(page)) {
152 unmap_mapping_range(mapping,
153 (loff_t)page->index << PAGE_CACHE_SHIFT,
154 PAGE_CACHE_SIZE, 0);
155 }
156 return truncate_complete_page(mapping, page);
157}
158
159
160
161
162int generic_error_remove_page(struct address_space *mapping, struct page *page)
163{
164 if (!mapping)
165 return -EINVAL;
166
167
168
169
170 if (!S_ISREG(mapping->host->i_mode))
171 return -EIO;
172 return truncate_inode_page(mapping, page);
173}
174EXPORT_SYMBOL(generic_error_remove_page);
175
176
177
178
179
180
181
182int invalidate_inode_page(struct page *page)
183{
184 struct address_space *mapping = page_mapping(page);
185 if (!mapping)
186 return 0;
187 if (PageDirty(page) || PageWriteback(page))
188 return 0;
189 if (page_mapped(page))
190 return 0;
191 return invalidate_complete_page(mapping, page);
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218void truncate_inode_pages_range(struct address_space *mapping,
219 loff_t lstart, loff_t lend)
220{
221 pgoff_t start;
222 pgoff_t end;
223 unsigned int partial_start;
224 unsigned int partial_end;
225 struct pagevec pvec;
226 pgoff_t indices[PAGEVEC_SIZE];
227 pgoff_t index;
228 int i;
229
230 cleancache_invalidate_inode(mapping);
231 if (mapping->nrpages == 0 && mapping->nrshadows == 0)
232 return;
233
234
235 partial_start = lstart & (PAGE_CACHE_SIZE - 1);
236 partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
237
238
239
240
241
242
243
244 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
245 if (lend == -1)
246
247
248
249
250
251 end = -1;
252 else
253 end = (lend + 1) >> PAGE_CACHE_SHIFT;
254
255 pagevec_init(&pvec, 0);
256 index = start;
257 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
258 min(end - index, (pgoff_t)PAGEVEC_SIZE),
259 indices)) {
260 for (i = 0; i < pagevec_count(&pvec); i++) {
261 struct page *page = pvec.pages[i];
262
263
264 index = indices[i];
265 if (index >= end)
266 break;
267
268 if (radix_tree_exceptional_entry(page)) {
269 clear_exceptional_entry(mapping, index, page);
270 continue;
271 }
272
273 if (!trylock_page(page))
274 continue;
275 WARN_ON(page->index != index);
276 if (PageWriteback(page)) {
277 unlock_page(page);
278 continue;
279 }
280 truncate_inode_page(mapping, page);
281 unlock_page(page);
282 }
283 pagevec_remove_exceptionals(&pvec);
284 pagevec_release(&pvec);
285 cond_resched();
286 index++;
287 }
288
289 if (partial_start) {
290 struct page *page = find_lock_page(mapping, start - 1);
291 if (page) {
292 unsigned int top = PAGE_CACHE_SIZE;
293 if (start > end) {
294
295 top = partial_end;
296 partial_end = 0;
297 }
298 wait_on_page_writeback(page);
299 zero_user_segment(page, partial_start, top);
300 cleancache_invalidate_page(mapping, page);
301 if (page_has_private(page))
302 do_invalidatepage(page, partial_start,
303 top - partial_start);
304 unlock_page(page);
305 page_cache_release(page);
306 }
307 }
308 if (partial_end) {
309 struct page *page = find_lock_page(mapping, end);
310 if (page) {
311 wait_on_page_writeback(page);
312 zero_user_segment(page, 0, partial_end);
313 cleancache_invalidate_page(mapping, page);
314 if (page_has_private(page))
315 do_invalidatepage(page, 0,
316 partial_end);
317 unlock_page(page);
318 page_cache_release(page);
319 }
320 }
321
322
323
324
325 if (start >= end)
326 return;
327
328 index = start;
329 for ( ; ; ) {
330 cond_resched();
331 if (!pagevec_lookup_entries(&pvec, mapping, index,
332 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
333
334 if (index == start)
335 break;
336
337 index = start;
338 continue;
339 }
340 if (index == start && indices[0] >= end) {
341
342 pagevec_remove_exceptionals(&pvec);
343 pagevec_release(&pvec);
344 break;
345 }
346 for (i = 0; i < pagevec_count(&pvec); i++) {
347 struct page *page = pvec.pages[i];
348
349
350 index = indices[i];
351 if (index >= end) {
352
353 index = start - 1;
354 break;
355 }
356
357 if (radix_tree_exceptional_entry(page)) {
358 clear_exceptional_entry(mapping, index, page);
359 continue;
360 }
361
362 lock_page(page);
363 WARN_ON(page->index != index);
364 wait_on_page_writeback(page);
365 truncate_inode_page(mapping, page);
366 unlock_page(page);
367 }
368 pagevec_remove_exceptionals(&pvec);
369 pagevec_release(&pvec);
370 index++;
371 }
372 cleancache_invalidate_inode(mapping);
373}
374EXPORT_SYMBOL(truncate_inode_pages_range);
375
376
377
378
379
380
381
382
383
384
385
386
387
388void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
389{
390 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
391}
392EXPORT_SYMBOL(truncate_inode_pages);
393
394
395
396
397
398
399
400
401
402
403void truncate_inode_pages_final(struct address_space *mapping)
404{
405 unsigned long nrshadows;
406 unsigned long nrpages;
407
408
409
410
411
412
413
414
415 mapping_set_exiting(mapping);
416
417
418
419
420
421
422 nrpages = mapping->nrpages;
423 smp_rmb();
424 nrshadows = mapping->nrshadows;
425
426 if (nrpages || nrshadows) {
427
428
429
430
431
432
433 spin_lock_irq(&mapping->tree_lock);
434 spin_unlock_irq(&mapping->tree_lock);
435
436 truncate_inode_pages(mapping, 0);
437 }
438}
439EXPORT_SYMBOL(truncate_inode_pages_final);
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454unsigned long invalidate_mapping_pages(struct address_space *mapping,
455 pgoff_t start, pgoff_t end)
456{
457 pgoff_t indices[PAGEVEC_SIZE];
458 struct pagevec pvec;
459 pgoff_t index = start;
460 unsigned long ret;
461 unsigned long count = 0;
462 int i;
463
464 pagevec_init(&pvec, 0);
465 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
466 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
467 indices)) {
468 for (i = 0; i < pagevec_count(&pvec); i++) {
469 struct page *page = pvec.pages[i];
470
471
472 index = indices[i];
473 if (index > end)
474 break;
475
476 if (radix_tree_exceptional_entry(page)) {
477 clear_exceptional_entry(mapping, index, page);
478 continue;
479 }
480
481 if (!trylock_page(page))
482 continue;
483 WARN_ON(page->index != index);
484 ret = invalidate_inode_page(page);
485 unlock_page(page);
486
487
488
489
490 if (!ret)
491 deactivate_file_page(page);
492 count += ret;
493 }
494 pagevec_remove_exceptionals(&pvec);
495 pagevec_release(&pvec);
496 cond_resched();
497 index++;
498 }
499 return count;
500}
501EXPORT_SYMBOL(invalidate_mapping_pages);
502
503
504
505
506
507
508
509
510static int
511invalidate_complete_page2(struct address_space *mapping, struct page *page)
512{
513 struct mem_cgroup *memcg;
514 unsigned long flags;
515
516 if (page->mapping != mapping)
517 return 0;
518
519 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
520 return 0;
521
522 memcg = mem_cgroup_begin_page_stat(page);
523 spin_lock_irqsave(&mapping->tree_lock, flags);
524 if (PageDirty(page))
525 goto failed;
526
527 BUG_ON(page_has_private(page));
528 __delete_from_page_cache(page, NULL, memcg);
529 spin_unlock_irqrestore(&mapping->tree_lock, flags);
530 mem_cgroup_end_page_stat(memcg);
531
532 if (mapping->a_ops->freepage)
533 mapping->a_ops->freepage(page);
534
535 page_cache_release(page);
536 return 1;
537failed:
538 spin_unlock_irqrestore(&mapping->tree_lock, flags);
539 mem_cgroup_end_page_stat(memcg);
540 return 0;
541}
542
543static int do_launder_page(struct address_space *mapping, struct page *page)
544{
545 if (!PageDirty(page))
546 return 0;
547 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
548 return 0;
549 return mapping->a_ops->launder_page(page);
550}
551
552
553
554
555
556
557
558
559
560
561
562
563int invalidate_inode_pages2_range(struct address_space *mapping,
564 pgoff_t start, pgoff_t end)
565{
566 pgoff_t indices[PAGEVEC_SIZE];
567 struct pagevec pvec;
568 pgoff_t index;
569 int i;
570 int ret = 0;
571 int ret2 = 0;
572 int did_range_unmap = 0;
573
574 cleancache_invalidate_inode(mapping);
575 pagevec_init(&pvec, 0);
576 index = start;
577 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
578 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
579 indices)) {
580 for (i = 0; i < pagevec_count(&pvec); i++) {
581 struct page *page = pvec.pages[i];
582
583
584 index = indices[i];
585 if (index > end)
586 break;
587
588 if (radix_tree_exceptional_entry(page)) {
589 clear_exceptional_entry(mapping, index, page);
590 continue;
591 }
592
593 lock_page(page);
594 WARN_ON(page->index != index);
595 if (page->mapping != mapping) {
596 unlock_page(page);
597 continue;
598 }
599 wait_on_page_writeback(page);
600 if (page_mapped(page)) {
601 if (!did_range_unmap) {
602
603
604
605 unmap_mapping_range(mapping,
606 (loff_t)index << PAGE_CACHE_SHIFT,
607 (loff_t)(1 + end - index)
608 << PAGE_CACHE_SHIFT,
609 0);
610 did_range_unmap = 1;
611 } else {
612
613
614
615 unmap_mapping_range(mapping,
616 (loff_t)index << PAGE_CACHE_SHIFT,
617 PAGE_CACHE_SIZE, 0);
618 }
619 }
620 BUG_ON(page_mapped(page));
621 ret2 = do_launder_page(mapping, page);
622 if (ret2 == 0) {
623 if (!invalidate_complete_page2(mapping, page))
624 ret2 = -EBUSY;
625 }
626 if (ret2 < 0)
627 ret = ret2;
628 unlock_page(page);
629 }
630 pagevec_remove_exceptionals(&pvec);
631 pagevec_release(&pvec);
632 cond_resched();
633 index++;
634 }
635 cleancache_invalidate_inode(mapping);
636 return ret;
637}
638EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
639
640
641
642
643
644
645
646
647
648
649int invalidate_inode_pages2(struct address_space *mapping)
650{
651 return invalidate_inode_pages2_range(mapping, 0, -1);
652}
653EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670void truncate_pagecache(struct inode *inode, loff_t newsize)
671{
672 struct address_space *mapping = inode->i_mapping;
673 loff_t holebegin = round_up(newsize, PAGE_SIZE);
674
675
676
677
678
679
680
681
682
683
684 unmap_mapping_range(mapping, holebegin, 0, 1);
685 truncate_inode_pages(mapping, newsize);
686 unmap_mapping_range(mapping, holebegin, 0, 1);
687}
688EXPORT_SYMBOL(truncate_pagecache);
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703void truncate_setsize(struct inode *inode, loff_t newsize)
704{
705 loff_t oldsize = inode->i_size;
706
707 i_size_write(inode, newsize);
708 if (newsize > oldsize)
709 pagecache_isize_extended(inode, oldsize, newsize);
710 truncate_pagecache(inode, newsize);
711}
712EXPORT_SYMBOL(truncate_setsize);
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
734{
735 int bsize = 1 << inode->i_blkbits;
736 loff_t rounded_from;
737 struct page *page;
738 pgoff_t index;
739
740 WARN_ON(to > inode->i_size);
741
742 if (from >= to || bsize == PAGE_CACHE_SIZE)
743 return;
744
745 rounded_from = round_up(from, bsize);
746 if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
747 return;
748
749 index = from >> PAGE_CACHE_SHIFT;
750 page = find_lock_page(inode->i_mapping, index);
751
752 if (!page)
753 return;
754
755
756
757
758 if (page_mkclean(page))
759 set_page_dirty(page);
760 unlock_page(page);
761 page_cache_release(page);
762}
763EXPORT_SYMBOL(pagecache_isize_extended);
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
779{
780 struct address_space *mapping = inode->i_mapping;
781 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
782 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
783
784
785
786
787
788
789
790
791
792
793
794
795
796 if ((u64)unmap_end > (u64)unmap_start)
797 unmap_mapping_range(mapping, unmap_start,
798 1 + unmap_end - unmap_start, 0);
799 truncate_inode_pages_range(mapping, lstart, lend);
800}
801EXPORT_SYMBOL(truncate_pagecache_range);
802