1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/dax.h>
13#include <linux/gfp.h>
14#include <linux/mm.h>
15#include <linux/swap.h>
16#include <linux/export.h>
17#include <linux/pagemap.h>
18#include <linux/highmem.h>
19#include <linux/pagevec.h>
20#include <linux/task_io_accounting_ops.h>
21#include <linux/buffer_head.h>
22
23#include <linux/shmem_fs.h>
24#include <linux/cleancache.h>
25#include <linux/rmap.h>
26#include "internal.h"
27
28static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
29 void *entry)
30{
31 struct radix_tree_node *node;
32 void **slot;
33
34 spin_lock_irq(&mapping->tree_lock);
35
36
37
38
39
40 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
41 goto unlock;
42 if (*slot != entry)
43 goto unlock;
44 __radix_tree_replace(&mapping->page_tree, node, slot, NULL,
45 workingset_update_node, mapping);
46 mapping->nrexceptional--;
47unlock:
48 spin_unlock_irq(&mapping->tree_lock);
49}
50
51
52
53
54static void truncate_exceptional_entry(struct address_space *mapping,
55 pgoff_t index, void *entry)
56{
57
58 if (shmem_mapping(mapping))
59 return;
60
61 if (dax_mapping(mapping)) {
62 dax_delete_mapping_entry(mapping, index);
63 return;
64 }
65 clear_shadow_entry(mapping, index, entry);
66}
67
68
69
70
71
72static int invalidate_exceptional_entry(struct address_space *mapping,
73 pgoff_t index, void *entry)
74{
75
76 if (shmem_mapping(mapping) || dax_mapping(mapping))
77 return 1;
78 clear_shadow_entry(mapping, index, entry);
79 return 1;
80}
81
82
83
84
85
86static int invalidate_exceptional_entry2(struct address_space *mapping,
87 pgoff_t index, void *entry)
88{
89
90 if (shmem_mapping(mapping))
91 return 1;
92 if (dax_mapping(mapping))
93 return dax_invalidate_mapping_entry_sync(mapping, index);
94 clear_shadow_entry(mapping, index, entry);
95 return 1;
96}
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113void do_invalidatepage(struct page *page, unsigned int offset,
114 unsigned int length)
115{
116 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
117
118 invalidatepage = page->mapping->a_ops->invalidatepage;
119#ifdef CONFIG_BLOCK
120 if (!invalidatepage)
121 invalidatepage = block_invalidatepage;
122#endif
123 if (invalidatepage)
124 (*invalidatepage)(page, offset, length);
125}
126
127
128
129
130
131
132
133
134
135
136
137static int
138truncate_complete_page(struct address_space *mapping, struct page *page)
139{
140 if (page->mapping != mapping)
141 return -EIO;
142
143 if (page_has_private(page))
144 do_invalidatepage(page, 0, PAGE_SIZE);
145
146
147
148
149
150
151 cancel_dirty_page(page);
152 ClearPageMappedToDisk(page);
153 delete_from_page_cache(page);
154 return 0;
155}
156
157
158
159
160
161
162
163
164
165static int
166invalidate_complete_page(struct address_space *mapping, struct page *page)
167{
168 int ret;
169
170 if (page->mapping != mapping)
171 return 0;
172
173 if (page_has_private(page) && !try_to_release_page(page, 0))
174 return 0;
175
176 ret = remove_mapping(mapping, page);
177
178 return ret;
179}
180
181int truncate_inode_page(struct address_space *mapping, struct page *page)
182{
183 loff_t holelen;
184 VM_BUG_ON_PAGE(PageTail(page), page);
185
186 holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
187 if (page_mapped(page)) {
188 unmap_mapping_range(mapping,
189 (loff_t)page->index << PAGE_SHIFT,
190 holelen, 0);
191 }
192 return truncate_complete_page(mapping, page);
193}
194
195
196
197
198int generic_error_remove_page(struct address_space *mapping, struct page *page)
199{
200 if (!mapping)
201 return -EINVAL;
202
203
204
205
206 if (!S_ISREG(mapping->host->i_mode))
207 return -EIO;
208 return truncate_inode_page(mapping, page);
209}
210EXPORT_SYMBOL(generic_error_remove_page);
211
212
213
214
215
216
217
218int invalidate_inode_page(struct page *page)
219{
220 struct address_space *mapping = page_mapping(page);
221 if (!mapping)
222 return 0;
223 if (PageDirty(page) || PageWriteback(page))
224 return 0;
225 if (page_mapped(page))
226 return 0;
227 return invalidate_complete_page(mapping, page);
228}
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254void truncate_inode_pages_range(struct address_space *mapping,
255 loff_t lstart, loff_t lend)
256{
257 pgoff_t start;
258 pgoff_t end;
259 unsigned int partial_start;
260 unsigned int partial_end;
261 struct pagevec pvec;
262 pgoff_t indices[PAGEVEC_SIZE];
263 pgoff_t index;
264 int i;
265
266 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
267 goto out;
268
269
270 partial_start = lstart & (PAGE_SIZE - 1);
271 partial_end = (lend + 1) & (PAGE_SIZE - 1);
272
273
274
275
276
277
278
279 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
280 if (lend == -1)
281
282
283
284
285
286 end = -1;
287 else
288 end = (lend + 1) >> PAGE_SHIFT;
289
290 pagevec_init(&pvec, 0);
291 index = start;
292 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
293 min(end - index, (pgoff_t)PAGEVEC_SIZE),
294 indices)) {
295 for (i = 0; i < pagevec_count(&pvec); i++) {
296 struct page *page = pvec.pages[i];
297
298
299 index = indices[i];
300 if (index >= end)
301 break;
302
303 if (radix_tree_exceptional_entry(page)) {
304 truncate_exceptional_entry(mapping, index,
305 page);
306 continue;
307 }
308
309 if (!trylock_page(page))
310 continue;
311 WARN_ON(page_to_index(page) != index);
312 if (PageWriteback(page)) {
313 unlock_page(page);
314 continue;
315 }
316 truncate_inode_page(mapping, page);
317 unlock_page(page);
318 }
319 pagevec_remove_exceptionals(&pvec);
320 pagevec_release(&pvec);
321 cond_resched();
322 index++;
323 }
324
325 if (partial_start) {
326 struct page *page = find_lock_page(mapping, start - 1);
327 if (page) {
328 unsigned int top = PAGE_SIZE;
329 if (start > end) {
330
331 top = partial_end;
332 partial_end = 0;
333 }
334 wait_on_page_writeback(page);
335 zero_user_segment(page, partial_start, top);
336 cleancache_invalidate_page(mapping, page);
337 if (page_has_private(page))
338 do_invalidatepage(page, partial_start,
339 top - partial_start);
340 unlock_page(page);
341 put_page(page);
342 }
343 }
344 if (partial_end) {
345 struct page *page = find_lock_page(mapping, end);
346 if (page) {
347 wait_on_page_writeback(page);
348 zero_user_segment(page, 0, partial_end);
349 cleancache_invalidate_page(mapping, page);
350 if (page_has_private(page))
351 do_invalidatepage(page, 0,
352 partial_end);
353 unlock_page(page);
354 put_page(page);
355 }
356 }
357
358
359
360
361 if (start >= end)
362 goto out;
363
364 index = start;
365 for ( ; ; ) {
366 cond_resched();
367 if (!pagevec_lookup_entries(&pvec, mapping, index,
368 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
369
370 if (index == start)
371 break;
372
373 index = start;
374 continue;
375 }
376 if (index == start && indices[0] >= end) {
377
378 pagevec_remove_exceptionals(&pvec);
379 pagevec_release(&pvec);
380 break;
381 }
382 for (i = 0; i < pagevec_count(&pvec); i++) {
383 struct page *page = pvec.pages[i];
384
385
386 index = indices[i];
387 if (index >= end) {
388
389 index = start - 1;
390 break;
391 }
392
393 if (radix_tree_exceptional_entry(page)) {
394 truncate_exceptional_entry(mapping, index,
395 page);
396 continue;
397 }
398
399 lock_page(page);
400 WARN_ON(page_to_index(page) != index);
401 wait_on_page_writeback(page);
402 truncate_inode_page(mapping, page);
403 unlock_page(page);
404 }
405 pagevec_remove_exceptionals(&pvec);
406 pagevec_release(&pvec);
407 index++;
408 }
409
410out:
411 cleancache_invalidate_inode(mapping);
412}
413EXPORT_SYMBOL(truncate_inode_pages_range);
414
415
416
417
418
419
420
421
422
423
424
425
426
427void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
428{
429 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
430}
431EXPORT_SYMBOL(truncate_inode_pages);
432
433
434
435
436
437
438
439
440
441
442void truncate_inode_pages_final(struct address_space *mapping)
443{
444 unsigned long nrexceptional;
445 unsigned long nrpages;
446
447
448
449
450
451
452
453
454 mapping_set_exiting(mapping);
455
456
457
458
459
460
461 nrpages = mapping->nrpages;
462 smp_rmb();
463 nrexceptional = mapping->nrexceptional;
464
465 if (nrpages || nrexceptional) {
466
467
468
469
470
471
472 spin_lock_irq(&mapping->tree_lock);
473 spin_unlock_irq(&mapping->tree_lock);
474
475 truncate_inode_pages(mapping, 0);
476 }
477}
478EXPORT_SYMBOL(truncate_inode_pages_final);
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493unsigned long invalidate_mapping_pages(struct address_space *mapping,
494 pgoff_t start, pgoff_t end)
495{
496 pgoff_t indices[PAGEVEC_SIZE];
497 struct pagevec pvec;
498 pgoff_t index = start;
499 unsigned long ret;
500 unsigned long count = 0;
501 int i;
502
503 pagevec_init(&pvec, 0);
504 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
505 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
506 indices)) {
507 for (i = 0; i < pagevec_count(&pvec); i++) {
508 struct page *page = pvec.pages[i];
509
510
511 index = indices[i];
512 if (index > end)
513 break;
514
515 if (radix_tree_exceptional_entry(page)) {
516 invalidate_exceptional_entry(mapping, index,
517 page);
518 continue;
519 }
520
521 if (!trylock_page(page))
522 continue;
523
524 WARN_ON(page_to_index(page) != index);
525
526
527 if (PageTransTail(page)) {
528 unlock_page(page);
529 continue;
530 } else if (PageTransHuge(page)) {
531 index += HPAGE_PMD_NR - 1;
532 i += HPAGE_PMD_NR - 1;
533
534 if (index == round_down(end, HPAGE_PMD_NR))
535 continue;
536 }
537
538 ret = invalidate_inode_page(page);
539 unlock_page(page);
540
541
542
543
544 if (!ret)
545 deactivate_file_page(page);
546 count += ret;
547 }
548 pagevec_remove_exceptionals(&pvec);
549 pagevec_release(&pvec);
550 cond_resched();
551 index++;
552 }
553 return count;
554}
555EXPORT_SYMBOL(invalidate_mapping_pages);
556
557
558
559
560
561
562
563
564static int
565invalidate_complete_page2(struct address_space *mapping, struct page *page)
566{
567 unsigned long flags;
568
569 if (page->mapping != mapping)
570 return 0;
571
572 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
573 return 0;
574
575 spin_lock_irqsave(&mapping->tree_lock, flags);
576 if (PageDirty(page))
577 goto failed;
578
579 BUG_ON(page_has_private(page));
580 __delete_from_page_cache(page, NULL);
581 spin_unlock_irqrestore(&mapping->tree_lock, flags);
582
583 if (mapping->a_ops->freepage)
584 mapping->a_ops->freepage(page);
585
586 put_page(page);
587 return 1;
588failed:
589 spin_unlock_irqrestore(&mapping->tree_lock, flags);
590 return 0;
591}
592
593static int do_launder_page(struct address_space *mapping, struct page *page)
594{
595 if (!PageDirty(page))
596 return 0;
597 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
598 return 0;
599 return mapping->a_ops->launder_page(page);
600}
601
602
603
604
605
606
607
608
609
610
611
612
613int invalidate_inode_pages2_range(struct address_space *mapping,
614 pgoff_t start, pgoff_t end)
615{
616 pgoff_t indices[PAGEVEC_SIZE];
617 struct pagevec pvec;
618 pgoff_t index;
619 int i;
620 int ret = 0;
621 int ret2 = 0;
622 int did_range_unmap = 0;
623
624 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
625 goto out;
626
627 pagevec_init(&pvec, 0);
628 index = start;
629 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
630 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
631 indices)) {
632 for (i = 0; i < pagevec_count(&pvec); i++) {
633 struct page *page = pvec.pages[i];
634
635
636 index = indices[i];
637 if (index > end)
638 break;
639
640 if (radix_tree_exceptional_entry(page)) {
641 if (!invalidate_exceptional_entry2(mapping,
642 index, page))
643 ret = -EBUSY;
644 continue;
645 }
646
647 lock_page(page);
648 WARN_ON(page_to_index(page) != index);
649 if (page->mapping != mapping) {
650 unlock_page(page);
651 continue;
652 }
653 wait_on_page_writeback(page);
654 if (page_mapped(page)) {
655 if (!did_range_unmap) {
656
657
658
659 unmap_mapping_range(mapping,
660 (loff_t)index << PAGE_SHIFT,
661 (loff_t)(1 + end - index)
662 << PAGE_SHIFT,
663 0);
664 did_range_unmap = 1;
665 } else {
666
667
668
669 unmap_mapping_range(mapping,
670 (loff_t)index << PAGE_SHIFT,
671 PAGE_SIZE, 0);
672 }
673 }
674 BUG_ON(page_mapped(page));
675 ret2 = do_launder_page(mapping, page);
676 if (ret2 == 0) {
677 if (!invalidate_complete_page2(mapping, page))
678 ret2 = -EBUSY;
679 }
680 if (ret2 < 0)
681 ret = ret2;
682 unlock_page(page);
683 }
684 pagevec_remove_exceptionals(&pvec);
685 pagevec_release(&pvec);
686 cond_resched();
687 index++;
688 }
689
690
691
692
693
694
695
696 if (dax_mapping(mapping)) {
697 unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
698 (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
699 }
700out:
701 cleancache_invalidate_inode(mapping);
702 return ret;
703}
704EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
705
706
707
708
709
710
711
712
713
714
715int invalidate_inode_pages2(struct address_space *mapping)
716{
717 return invalidate_inode_pages2_range(mapping, 0, -1);
718}
719EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736void truncate_pagecache(struct inode *inode, loff_t newsize)
737{
738 struct address_space *mapping = inode->i_mapping;
739 loff_t holebegin = round_up(newsize, PAGE_SIZE);
740
741
742
743
744
745
746
747
748
749
750 unmap_mapping_range(mapping, holebegin, 0, 1);
751 truncate_inode_pages(mapping, newsize);
752 unmap_mapping_range(mapping, holebegin, 0, 1);
753}
754EXPORT_SYMBOL(truncate_pagecache);
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769void truncate_setsize(struct inode *inode, loff_t newsize)
770{
771 loff_t oldsize = inode->i_size;
772
773 i_size_write(inode, newsize);
774 if (newsize > oldsize)
775 pagecache_isize_extended(inode, oldsize, newsize);
776 truncate_pagecache(inode, newsize);
777}
778EXPORT_SYMBOL(truncate_setsize);
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
800{
801 int bsize = i_blocksize(inode);
802 loff_t rounded_from;
803 struct page *page;
804 pgoff_t index;
805
806 WARN_ON(to > inode->i_size);
807
808 if (from >= to || bsize == PAGE_SIZE)
809 return;
810
811 rounded_from = round_up(from, bsize);
812 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
813 return;
814
815 index = from >> PAGE_SHIFT;
816 page = find_lock_page(inode->i_mapping, index);
817
818 if (!page)
819 return;
820
821
822
823
824 if (page_mkclean(page))
825 set_page_dirty(page);
826 unlock_page(page);
827 put_page(page);
828}
829EXPORT_SYMBOL(pagecache_isize_extended);
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
845{
846 struct address_space *mapping = inode->i_mapping;
847 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
848 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
849
850
851
852
853
854
855
856
857
858
859
860
861
862 if ((u64)unmap_end > (u64)unmap_start)
863 unmap_mapping_range(mapping, unmap_start,
864 1 + unmap_end - unmap_start, 0);
865 truncate_inode_pages_range(mapping, lstart, lend);
866}
867EXPORT_SYMBOL(truncate_pagecache_range);
868