1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/backing-dev.h>
13#include <linux/dax.h>
14#include <linux/gfp.h>
15#include <linux/mm.h>
16#include <linux/swap.h>
17#include <linux/export.h>
18#include <linux/pagemap.h>
19#include <linux/highmem.h>
20#include <linux/pagevec.h>
21#include <linux/task_io_accounting_ops.h>
22#include <linux/buffer_head.h>
23
24#include <linux/shmem_fs.h>
25#include <linux/cleancache.h>
26#include <linux/rmap.h>
27#include "internal.h"
28
29
30
31
32
33
34static inline void __clear_shadow_entry(struct address_space *mapping,
35 pgoff_t index, void *entry)
36{
37 XA_STATE(xas, &mapping->i_pages, index);
38
39 xas_set_update(&xas, workingset_update_node);
40 if (xas_load(&xas) != entry)
41 return;
42 xas_store(&xas, NULL);
43 mapping->nrexceptional--;
44}
45
46static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
47 void *entry)
48{
49 xa_lock_irq(&mapping->i_pages);
50 __clear_shadow_entry(mapping, index, entry);
51 xa_unlock_irq(&mapping->i_pages);
52}
53
54
55
56
57
58
59static void truncate_exceptional_pvec_entries(struct address_space *mapping,
60 struct pagevec *pvec, pgoff_t *indices,
61 pgoff_t end)
62{
63 int i, j;
64 bool dax, lock;
65
66
67 if (shmem_mapping(mapping))
68 return;
69
70 for (j = 0; j < pagevec_count(pvec); j++)
71 if (xa_is_value(pvec->pages[j]))
72 break;
73
74 if (j == pagevec_count(pvec))
75 return;
76
77 dax = dax_mapping(mapping);
78 lock = !dax && indices[j] < end;
79 if (lock)
80 xa_lock_irq(&mapping->i_pages);
81
82 for (i = j; i < pagevec_count(pvec); i++) {
83 struct page *page = pvec->pages[i];
84 pgoff_t index = indices[i];
85
86 if (!xa_is_value(page)) {
87 pvec->pages[j++] = page;
88 continue;
89 }
90
91 if (index >= end)
92 continue;
93
94 if (unlikely(dax)) {
95 dax_delete_mapping_entry(mapping, index);
96 continue;
97 }
98
99 __clear_shadow_entry(mapping, index, page);
100 }
101
102 if (lock)
103 xa_unlock_irq(&mapping->i_pages);
104 pvec->nr = j;
105}
106
107
108
109
110
111static int invalidate_exceptional_entry(struct address_space *mapping,
112 pgoff_t index, void *entry)
113{
114
115 if (shmem_mapping(mapping) || dax_mapping(mapping))
116 return 1;
117 clear_shadow_entry(mapping, index, entry);
118 return 1;
119}
120
121
122
123
124
125static int invalidate_exceptional_entry2(struct address_space *mapping,
126 pgoff_t index, void *entry)
127{
128
129 if (shmem_mapping(mapping))
130 return 1;
131 if (dax_mapping(mapping))
132 return dax_invalidate_mapping_entry_sync(mapping, index);
133 clear_shadow_entry(mapping, index, entry);
134 return 1;
135}
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152void do_invalidatepage(struct page *page, unsigned int offset,
153 unsigned int length)
154{
155 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
156
157 invalidatepage = page->mapping->a_ops->invalidatepage;
158#ifdef CONFIG_BLOCK
159 if (!invalidatepage)
160 invalidatepage = block_invalidatepage;
161#endif
162 if (invalidatepage)
163 (*invalidatepage)(page, offset, length);
164}
165
166
167
168
169
170
171
172
173
174
175
176static void
177truncate_cleanup_page(struct address_space *mapping, struct page *page)
178{
179 if (page_mapped(page)) {
180 pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
181 unmap_mapping_pages(mapping, page->index, nr, false);
182 }
183
184 if (page_has_private(page))
185 do_invalidatepage(page, 0, PAGE_SIZE);
186
187
188
189
190
191
192 cancel_dirty_page(page);
193 ClearPageMappedToDisk(page);
194}
195
196
197
198
199
200
201
202
203
204static int
205invalidate_complete_page(struct address_space *mapping, struct page *page)
206{
207 int ret;
208
209 if (page->mapping != mapping)
210 return 0;
211
212 if (page_has_private(page) && !try_to_release_page(page, 0))
213 return 0;
214
215 ret = remove_mapping(mapping, page);
216
217 return ret;
218}
219
220int truncate_inode_page(struct address_space *mapping, struct page *page)
221{
222 VM_BUG_ON_PAGE(PageTail(page), page);
223
224 if (page->mapping != mapping)
225 return -EIO;
226
227 truncate_cleanup_page(mapping, page);
228 delete_from_page_cache(page);
229 return 0;
230}
231
232
233
234
235int generic_error_remove_page(struct address_space *mapping, struct page *page)
236{
237 if (!mapping)
238 return -EINVAL;
239
240
241
242
243 if (!S_ISREG(mapping->host->i_mode))
244 return -EIO;
245 return truncate_inode_page(mapping, page);
246}
247EXPORT_SYMBOL(generic_error_remove_page);
248
249
250
251
252
253
254
255int invalidate_inode_page(struct page *page)
256{
257 struct address_space *mapping = page_mapping(page);
258 if (!mapping)
259 return 0;
260 if (PageDirty(page) || PageWriteback(page))
261 return 0;
262 if (page_mapped(page))
263 return 0;
264 return invalidate_complete_page(mapping, page);
265}
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291void truncate_inode_pages_range(struct address_space *mapping,
292 loff_t lstart, loff_t lend)
293{
294 pgoff_t start;
295 pgoff_t end;
296 unsigned int partial_start;
297 unsigned int partial_end;
298 struct pagevec pvec;
299 pgoff_t indices[PAGEVEC_SIZE];
300 pgoff_t index;
301 int i;
302
303 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
304 goto out;
305
306
307 partial_start = lstart & (PAGE_SIZE - 1);
308 partial_end = (lend + 1) & (PAGE_SIZE - 1);
309
310
311
312
313
314
315
316 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
317 if (lend == -1)
318
319
320
321
322
323 end = -1;
324 else
325 end = (lend + 1) >> PAGE_SHIFT;
326
327 pagevec_init(&pvec);
328 index = start;
329 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
330 min(end - index, (pgoff_t)PAGEVEC_SIZE),
331 indices)) {
332
333
334
335
336
337 struct pagevec locked_pvec;
338
339 pagevec_init(&locked_pvec);
340 for (i = 0; i < pagevec_count(&pvec); i++) {
341 struct page *page = pvec.pages[i];
342
343
344 index = indices[i];
345 if (index >= end)
346 break;
347
348 if (xa_is_value(page))
349 continue;
350
351 if (!trylock_page(page))
352 continue;
353 WARN_ON(page_to_index(page) != index);
354 if (PageWriteback(page)) {
355 unlock_page(page);
356 continue;
357 }
358 if (page->mapping != mapping) {
359 unlock_page(page);
360 continue;
361 }
362 pagevec_add(&locked_pvec, page);
363 }
364 for (i = 0; i < pagevec_count(&locked_pvec); i++)
365 truncate_cleanup_page(mapping, locked_pvec.pages[i]);
366 delete_from_page_cache_batch(mapping, &locked_pvec);
367 for (i = 0; i < pagevec_count(&locked_pvec); i++)
368 unlock_page(locked_pvec.pages[i]);
369 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
370 pagevec_release(&pvec);
371 cond_resched();
372 index++;
373 }
374 if (partial_start) {
375 struct page *page = find_lock_page(mapping, start - 1);
376 if (page) {
377 unsigned int top = PAGE_SIZE;
378 if (start > end) {
379
380 top = partial_end;
381 partial_end = 0;
382 }
383 wait_on_page_writeback(page);
384 zero_user_segment(page, partial_start, top);
385 cleancache_invalidate_page(mapping, page);
386 if (page_has_private(page))
387 do_invalidatepage(page, partial_start,
388 top - partial_start);
389 unlock_page(page);
390 put_page(page);
391 }
392 }
393 if (partial_end) {
394 struct page *page = find_lock_page(mapping, end);
395 if (page) {
396 wait_on_page_writeback(page);
397 zero_user_segment(page, 0, partial_end);
398 cleancache_invalidate_page(mapping, page);
399 if (page_has_private(page))
400 do_invalidatepage(page, 0,
401 partial_end);
402 unlock_page(page);
403 put_page(page);
404 }
405 }
406
407
408
409
410 if (start >= end)
411 goto out;
412
413 index = start;
414 for ( ; ; ) {
415 cond_resched();
416 if (!pagevec_lookup_entries(&pvec, mapping, index,
417 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
418
419 if (index == start)
420 break;
421
422 index = start;
423 continue;
424 }
425 if (index == start && indices[0] >= end) {
426
427 pagevec_remove_exceptionals(&pvec);
428 pagevec_release(&pvec);
429 break;
430 }
431
432 for (i = 0; i < pagevec_count(&pvec); i++) {
433 struct page *page = pvec.pages[i];
434
435
436 index = indices[i];
437 if (index >= end) {
438
439 index = start - 1;
440 break;
441 }
442
443 if (xa_is_value(page))
444 continue;
445
446 lock_page(page);
447 WARN_ON(page_to_index(page) != index);
448 wait_on_page_writeback(page);
449 truncate_inode_page(mapping, page);
450 unlock_page(page);
451 }
452 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
453 pagevec_release(&pvec);
454 index++;
455 }
456
457out:
458 cleancache_invalidate_inode(mapping);
459}
460EXPORT_SYMBOL(truncate_inode_pages_range);
461
462
463
464
465
466
467
468
469
470
471
472
473
474void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
475{
476 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
477}
478EXPORT_SYMBOL(truncate_inode_pages);
479
480
481
482
483
484
485
486
487
488
489void truncate_inode_pages_final(struct address_space *mapping)
490{
491 unsigned long nrexceptional;
492 unsigned long nrpages;
493
494
495
496
497
498
499
500
501 mapping_set_exiting(mapping);
502
503
504
505
506
507
508 nrpages = mapping->nrpages;
509 smp_rmb();
510 nrexceptional = mapping->nrexceptional;
511
512 if (nrpages || nrexceptional) {
513
514
515
516
517
518
519 xa_lock_irq(&mapping->i_pages);
520 xa_unlock_irq(&mapping->i_pages);
521 }
522
523
524
525
526
527 truncate_inode_pages(mapping, 0);
528}
529EXPORT_SYMBOL(truncate_inode_pages_final);
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546unsigned long invalidate_mapping_pages(struct address_space *mapping,
547 pgoff_t start, pgoff_t end)
548{
549 pgoff_t indices[PAGEVEC_SIZE];
550 struct pagevec pvec;
551 pgoff_t index = start;
552 unsigned long ret;
553 unsigned long count = 0;
554 int i;
555
556 pagevec_init(&pvec);
557 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
558 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
559 indices)) {
560 for (i = 0; i < pagevec_count(&pvec); i++) {
561 struct page *page = pvec.pages[i];
562
563
564 index = indices[i];
565 if (index > end)
566 break;
567
568 if (xa_is_value(page)) {
569 invalidate_exceptional_entry(mapping, index,
570 page);
571 continue;
572 }
573
574 if (!trylock_page(page))
575 continue;
576
577 WARN_ON(page_to_index(page) != index);
578
579
580 if (PageTransTail(page)) {
581 unlock_page(page);
582 continue;
583 } else if (PageTransHuge(page)) {
584 index += HPAGE_PMD_NR - 1;
585 i += HPAGE_PMD_NR - 1;
586
587
588
589
590
591 if (index > end) {
592 unlock_page(page);
593 continue;
594 }
595 }
596
597 ret = invalidate_inode_page(page);
598 unlock_page(page);
599
600
601
602
603 if (!ret)
604 deactivate_file_page(page);
605 count += ret;
606 }
607 pagevec_remove_exceptionals(&pvec);
608 pagevec_release(&pvec);
609 cond_resched();
610 index++;
611 }
612 return count;
613}
614EXPORT_SYMBOL(invalidate_mapping_pages);
615
616
617
618
619
620
621
622
623static int
624invalidate_complete_page2(struct address_space *mapping, struct page *page)
625{
626 unsigned long flags;
627
628 if (page->mapping != mapping)
629 return 0;
630
631 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
632 return 0;
633
634 xa_lock_irqsave(&mapping->i_pages, flags);
635 if (PageDirty(page))
636 goto failed;
637
638 BUG_ON(page_has_private(page));
639 __delete_from_page_cache(page, NULL);
640 xa_unlock_irqrestore(&mapping->i_pages, flags);
641
642 if (mapping->a_ops->freepage)
643 mapping->a_ops->freepage(page);
644
645 put_page(page);
646 return 1;
647failed:
648 xa_unlock_irqrestore(&mapping->i_pages, flags);
649 return 0;
650}
651
652static int do_launder_page(struct address_space *mapping, struct page *page)
653{
654 if (!PageDirty(page))
655 return 0;
656 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
657 return 0;
658 return mapping->a_ops->launder_page(page);
659}
660
661
662
663
664
665
666
667
668
669
670
671
672int invalidate_inode_pages2_range(struct address_space *mapping,
673 pgoff_t start, pgoff_t end)
674{
675 pgoff_t indices[PAGEVEC_SIZE];
676 struct pagevec pvec;
677 pgoff_t index;
678 int i;
679 int ret = 0;
680 int ret2 = 0;
681 int did_range_unmap = 0;
682
683 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
684 goto out;
685
686 pagevec_init(&pvec);
687 index = start;
688 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
689 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
690 indices)) {
691 for (i = 0; i < pagevec_count(&pvec); i++) {
692 struct page *page = pvec.pages[i];
693
694
695 index = indices[i];
696 if (index > end)
697 break;
698
699 if (xa_is_value(page)) {
700 if (!invalidate_exceptional_entry2(mapping,
701 index, page))
702 ret = -EBUSY;
703 continue;
704 }
705
706 lock_page(page);
707 WARN_ON(page_to_index(page) != index);
708 if (page->mapping != mapping) {
709 unlock_page(page);
710 continue;
711 }
712 wait_on_page_writeback(page);
713 if (page_mapped(page)) {
714 if (!did_range_unmap) {
715
716
717
718 unmap_mapping_pages(mapping, index,
719 (1 + end - index), false);
720 did_range_unmap = 1;
721 } else {
722
723
724
725 unmap_mapping_pages(mapping, index,
726 1, false);
727 }
728 }
729 BUG_ON(page_mapped(page));
730 ret2 = do_launder_page(mapping, page);
731 if (ret2 == 0) {
732 if (!invalidate_complete_page2(mapping, page))
733 ret2 = -EBUSY;
734 }
735 if (ret2 < 0)
736 ret = ret2;
737 unlock_page(page);
738 }
739 pagevec_remove_exceptionals(&pvec);
740 pagevec_release(&pvec);
741 cond_resched();
742 index++;
743 }
744
745
746
747
748
749
750
751 if (dax_mapping(mapping)) {
752 unmap_mapping_pages(mapping, start, end - start + 1, false);
753 }
754out:
755 cleancache_invalidate_inode(mapping);
756 return ret;
757}
758EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
759
760
761
762
763
764
765
766
767
768
769int invalidate_inode_pages2(struct address_space *mapping)
770{
771 return invalidate_inode_pages2_range(mapping, 0, -1);
772}
773EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790void truncate_pagecache(struct inode *inode, loff_t newsize)
791{
792 struct address_space *mapping = inode->i_mapping;
793 loff_t holebegin = round_up(newsize, PAGE_SIZE);
794
795
796
797
798
799
800
801
802
803
804 unmap_mapping_range(mapping, holebegin, 0, 1);
805 truncate_inode_pages(mapping, newsize);
806 unmap_mapping_range(mapping, holebegin, 0, 1);
807}
808EXPORT_SYMBOL(truncate_pagecache);
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823void truncate_setsize(struct inode *inode, loff_t newsize)
824{
825 loff_t oldsize = inode->i_size;
826
827 i_size_write(inode, newsize);
828 if (newsize > oldsize)
829 pagecache_isize_extended(inode, oldsize, newsize);
830 truncate_pagecache(inode, newsize);
831}
832EXPORT_SYMBOL(truncate_setsize);
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
854{
855 int bsize = i_blocksize(inode);
856 loff_t rounded_from;
857 struct page *page;
858 pgoff_t index;
859
860 WARN_ON(to > inode->i_size);
861
862 if (from >= to || bsize == PAGE_SIZE)
863 return;
864
865 rounded_from = round_up(from, bsize);
866 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
867 return;
868
869 index = from >> PAGE_SHIFT;
870 page = find_lock_page(inode->i_mapping, index);
871
872 if (!page)
873 return;
874
875
876
877
878 if (page_mkclean(page))
879 set_page_dirty(page);
880 unlock_page(page);
881 put_page(page);
882}
883EXPORT_SYMBOL(pagecache_isize_extended);
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
899{
900 struct address_space *mapping = inode->i_mapping;
901 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
902 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
903
904
905
906
907
908
909
910
911
912
913
914
915
916 if ((u64)unmap_end > (u64)unmap_start)
917 unmap_mapping_range(mapping, unmap_start,
918 1 + unmap_end - unmap_start, 0);
919 truncate_inode_pages_range(mapping, lstart, lend);
920}
921EXPORT_SYMBOL(truncate_pagecache_range);
922