1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/dax.h>
13#include <linux/gfp.h>
14#include <linux/mm.h>
15#include <linux/swap.h>
16#include <linux/export.h>
17#include <linux/pagemap.h>
18#include <linux/highmem.h>
19#include <linux/pagevec.h>
20#include <linux/task_io_accounting_ops.h>
21#include <linux/buffer_head.h>
22
23#include <linux/shmem_fs.h>
24#include <linux/cleancache.h>
25#include <linux/rmap.h>
26#include "internal.h"
27
28
29
30
31
32
33static inline void __clear_shadow_entry(struct address_space *mapping,
34 pgoff_t index, void *entry)
35{
36 XA_STATE(xas, &mapping->i_pages, index);
37
38 xas_set_update(&xas, workingset_update_node);
39 if (xas_load(&xas) != entry)
40 return;
41 xas_store(&xas, NULL);
42 mapping->nrexceptional--;
43}
44
45static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
46 void *entry)
47{
48 xa_lock_irq(&mapping->i_pages);
49 __clear_shadow_entry(mapping, index, entry);
50 xa_unlock_irq(&mapping->i_pages);
51}
52
53
54
55
56
57
58static void truncate_exceptional_pvec_entries(struct address_space *mapping,
59 struct pagevec *pvec, pgoff_t *indices,
60 pgoff_t end)
61{
62 int i, j;
63 bool dax, lock;
64
65
66 if (shmem_mapping(mapping))
67 return;
68
69 for (j = 0; j < pagevec_count(pvec); j++)
70 if (xa_is_value(pvec->pages[j]))
71 break;
72
73 if (j == pagevec_count(pvec))
74 return;
75
76 dax = dax_mapping(mapping);
77 lock = !dax && indices[j] < end;
78 if (lock)
79 xa_lock_irq(&mapping->i_pages);
80
81 for (i = j; i < pagevec_count(pvec); i++) {
82 struct page *page = pvec->pages[i];
83 pgoff_t index = indices[i];
84
85 if (!xa_is_value(page)) {
86 pvec->pages[j++] = page;
87 continue;
88 }
89
90 if (index >= end)
91 continue;
92
93 if (unlikely(dax)) {
94 dax_delete_mapping_entry(mapping, index);
95 continue;
96 }
97
98 __clear_shadow_entry(mapping, index, page);
99 }
100
101 if (lock)
102 xa_unlock_irq(&mapping->i_pages);
103 pvec->nr = j;
104}
105
106
107
108
109
110static int invalidate_exceptional_entry(struct address_space *mapping,
111 pgoff_t index, void *entry)
112{
113
114 if (shmem_mapping(mapping) || dax_mapping(mapping))
115 return 1;
116 clear_shadow_entry(mapping, index, entry);
117 return 1;
118}
119
120
121
122
123
124static int invalidate_exceptional_entry2(struct address_space *mapping,
125 pgoff_t index, void *entry)
126{
127
128 if (shmem_mapping(mapping))
129 return 1;
130 if (dax_mapping(mapping))
131 return dax_invalidate_mapping_entry_sync(mapping, index);
132 clear_shadow_entry(mapping, index, entry);
133 return 1;
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151void do_invalidatepage(struct page *page, unsigned int offset,
152 unsigned int length)
153{
154 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
155
156 invalidatepage = page->mapping->a_ops->invalidatepage;
157#ifdef CONFIG_BLOCK
158 if (!invalidatepage)
159 invalidatepage = block_invalidatepage;
160#endif
161 if (invalidatepage)
162 (*invalidatepage)(page, offset, length);
163}
164
165
166
167
168
169
170
171
172
173
174
175static void
176truncate_cleanup_page(struct address_space *mapping, struct page *page)
177{
178 if (page_mapped(page)) {
179 pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
180 unmap_mapping_pages(mapping, page->index, nr, false);
181 }
182
183 if (page_has_private(page))
184 do_invalidatepage(page, 0, PAGE_SIZE);
185
186
187
188
189
190
191 cancel_dirty_page(page);
192 ClearPageMappedToDisk(page);
193}
194
195
196
197
198
199
200
201
202
203static int
204invalidate_complete_page(struct address_space *mapping, struct page *page)
205{
206 int ret;
207
208 if (page->mapping != mapping)
209 return 0;
210
211 if (page_has_private(page) && !try_to_release_page(page, 0))
212 return 0;
213
214 ret = remove_mapping(mapping, page);
215
216 return ret;
217}
218
219int truncate_inode_page(struct address_space *mapping, struct page *page)
220{
221 VM_BUG_ON_PAGE(PageTail(page), page);
222
223 if (page->mapping != mapping)
224 return -EIO;
225
226 truncate_cleanup_page(mapping, page);
227 delete_from_page_cache(page);
228 return 0;
229}
230
231
232
233
234int generic_error_remove_page(struct address_space *mapping, struct page *page)
235{
236 if (!mapping)
237 return -EINVAL;
238
239
240
241
242 if (!S_ISREG(mapping->host->i_mode))
243 return -EIO;
244 return truncate_inode_page(mapping, page);
245}
246EXPORT_SYMBOL(generic_error_remove_page);
247
248
249
250
251
252
253
254int invalidate_inode_page(struct page *page)
255{
256 struct address_space *mapping = page_mapping(page);
257 if (!mapping)
258 return 0;
259 if (PageDirty(page) || PageWriteback(page))
260 return 0;
261 if (page_mapped(page))
262 return 0;
263 return invalidate_complete_page(mapping, page);
264}
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290void truncate_inode_pages_range(struct address_space *mapping,
291 loff_t lstart, loff_t lend)
292{
293 pgoff_t start;
294 pgoff_t end;
295 unsigned int partial_start;
296 unsigned int partial_end;
297 struct pagevec pvec;
298 pgoff_t indices[PAGEVEC_SIZE];
299 pgoff_t index;
300 int i;
301
302 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
303 goto out;
304
305
306 partial_start = lstart & (PAGE_SIZE - 1);
307 partial_end = (lend + 1) & (PAGE_SIZE - 1);
308
309
310
311
312
313
314
315 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
316 if (lend == -1)
317
318
319
320
321
322 end = -1;
323 else
324 end = (lend + 1) >> PAGE_SHIFT;
325
326 pagevec_init(&pvec);
327 index = start;
328 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
329 min(end - index, (pgoff_t)PAGEVEC_SIZE),
330 indices)) {
331
332
333
334
335
336 struct pagevec locked_pvec;
337
338 pagevec_init(&locked_pvec);
339 for (i = 0; i < pagevec_count(&pvec); i++) {
340 struct page *page = pvec.pages[i];
341
342
343 index = indices[i];
344 if (index >= end)
345 break;
346
347 if (xa_is_value(page))
348 continue;
349
350 if (!trylock_page(page))
351 continue;
352 WARN_ON(page_to_index(page) != index);
353 if (PageWriteback(page)) {
354 unlock_page(page);
355 continue;
356 }
357 if (page->mapping != mapping) {
358 unlock_page(page);
359 continue;
360 }
361 pagevec_add(&locked_pvec, page);
362 }
363 for (i = 0; i < pagevec_count(&locked_pvec); i++)
364 truncate_cleanup_page(mapping, locked_pvec.pages[i]);
365 delete_from_page_cache_batch(mapping, &locked_pvec);
366 for (i = 0; i < pagevec_count(&locked_pvec); i++)
367 unlock_page(locked_pvec.pages[i]);
368 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
369 pagevec_release(&pvec);
370 cond_resched();
371 index++;
372 }
373 if (partial_start) {
374 struct page *page = find_lock_page(mapping, start - 1);
375 if (page) {
376 unsigned int top = PAGE_SIZE;
377 if (start > end) {
378
379 top = partial_end;
380 partial_end = 0;
381 }
382 wait_on_page_writeback(page);
383 zero_user_segment(page, partial_start, top);
384 cleancache_invalidate_page(mapping, page);
385 if (page_has_private(page))
386 do_invalidatepage(page, partial_start,
387 top - partial_start);
388 unlock_page(page);
389 put_page(page);
390 }
391 }
392 if (partial_end) {
393 struct page *page = find_lock_page(mapping, end);
394 if (page) {
395 wait_on_page_writeback(page);
396 zero_user_segment(page, 0, partial_end);
397 cleancache_invalidate_page(mapping, page);
398 if (page_has_private(page))
399 do_invalidatepage(page, 0,
400 partial_end);
401 unlock_page(page);
402 put_page(page);
403 }
404 }
405
406
407
408
409 if (start >= end)
410 goto out;
411
412 index = start;
413 for ( ; ; ) {
414 cond_resched();
415 if (!pagevec_lookup_entries(&pvec, mapping, index,
416 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
417
418 if (index == start)
419 break;
420
421 index = start;
422 continue;
423 }
424 if (index == start && indices[0] >= end) {
425
426 pagevec_remove_exceptionals(&pvec);
427 pagevec_release(&pvec);
428 break;
429 }
430
431 for (i = 0; i < pagevec_count(&pvec); i++) {
432 struct page *page = pvec.pages[i];
433
434
435 index = indices[i];
436 if (index >= end) {
437
438 index = start - 1;
439 break;
440 }
441
442 if (xa_is_value(page))
443 continue;
444
445 lock_page(page);
446 WARN_ON(page_to_index(page) != index);
447 wait_on_page_writeback(page);
448 truncate_inode_page(mapping, page);
449 unlock_page(page);
450 }
451 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
452 pagevec_release(&pvec);
453 index++;
454 }
455
456out:
457 cleancache_invalidate_inode(mapping);
458}
459EXPORT_SYMBOL(truncate_inode_pages_range);
460
461
462
463
464
465
466
467
468
469
470
471
472
473void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
474{
475 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
476}
477EXPORT_SYMBOL(truncate_inode_pages);
478
479
480
481
482
483
484
485
486
487
488void truncate_inode_pages_final(struct address_space *mapping)
489{
490 unsigned long nrexceptional;
491 unsigned long nrpages;
492
493
494
495
496
497
498
499
500 mapping_set_exiting(mapping);
501
502
503
504
505
506
507 nrpages = mapping->nrpages;
508 smp_rmb();
509 nrexceptional = mapping->nrexceptional;
510
511 if (nrpages || nrexceptional) {
512
513
514
515
516
517
518 xa_lock_irq(&mapping->i_pages);
519 xa_unlock_irq(&mapping->i_pages);
520 }
521
522
523
524
525
526 truncate_inode_pages(mapping, 0);
527}
528EXPORT_SYMBOL(truncate_inode_pages_final);
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545unsigned long invalidate_mapping_pages(struct address_space *mapping,
546 pgoff_t start, pgoff_t end)
547{
548 pgoff_t indices[PAGEVEC_SIZE];
549 struct pagevec pvec;
550 pgoff_t index = start;
551 unsigned long ret;
552 unsigned long count = 0;
553 int i;
554
555 pagevec_init(&pvec);
556 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
557 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
558 indices)) {
559 for (i = 0; i < pagevec_count(&pvec); i++) {
560 struct page *page = pvec.pages[i];
561
562
563 index = indices[i];
564 if (index > end)
565 break;
566
567 if (xa_is_value(page)) {
568 invalidate_exceptional_entry(mapping, index,
569 page);
570 continue;
571 }
572
573 if (!trylock_page(page))
574 continue;
575
576 WARN_ON(page_to_index(page) != index);
577
578
579 if (PageTransTail(page)) {
580 unlock_page(page);
581 continue;
582 } else if (PageTransHuge(page)) {
583 index += HPAGE_PMD_NR - 1;
584 i += HPAGE_PMD_NR - 1;
585
586
587
588
589
590 if (index > end) {
591 unlock_page(page);
592 continue;
593 }
594 }
595
596 ret = invalidate_inode_page(page);
597 unlock_page(page);
598
599
600
601
602 if (!ret)
603 deactivate_file_page(page);
604 count += ret;
605 }
606 pagevec_remove_exceptionals(&pvec);
607 pagevec_release(&pvec);
608 cond_resched();
609 index++;
610 }
611 return count;
612}
613EXPORT_SYMBOL(invalidate_mapping_pages);
614
615
616
617
618
619
620
621
622static int
623invalidate_complete_page2(struct address_space *mapping, struct page *page)
624{
625 unsigned long flags;
626
627 if (page->mapping != mapping)
628 return 0;
629
630 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
631 return 0;
632
633 xa_lock_irqsave(&mapping->i_pages, flags);
634 if (PageDirty(page))
635 goto failed;
636
637 BUG_ON(page_has_private(page));
638 __delete_from_page_cache(page, NULL);
639 xa_unlock_irqrestore(&mapping->i_pages, flags);
640
641 if (mapping->a_ops->freepage)
642 mapping->a_ops->freepage(page);
643
644 put_page(page);
645 return 1;
646failed:
647 xa_unlock_irqrestore(&mapping->i_pages, flags);
648 return 0;
649}
650
651static int do_launder_page(struct address_space *mapping, struct page *page)
652{
653 if (!PageDirty(page))
654 return 0;
655 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
656 return 0;
657 return mapping->a_ops->launder_page(page);
658}
659
660
661
662
663
664
665
666
667
668
669
670
671int invalidate_inode_pages2_range(struct address_space *mapping,
672 pgoff_t start, pgoff_t end)
673{
674 pgoff_t indices[PAGEVEC_SIZE];
675 struct pagevec pvec;
676 pgoff_t index;
677 int i;
678 int ret = 0;
679 int ret2 = 0;
680 int did_range_unmap = 0;
681
682 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
683 goto out;
684
685 pagevec_init(&pvec);
686 index = start;
687 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
688 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
689 indices)) {
690 for (i = 0; i < pagevec_count(&pvec); i++) {
691 struct page *page = pvec.pages[i];
692
693
694 index = indices[i];
695 if (index > end)
696 break;
697
698 if (xa_is_value(page)) {
699 if (!invalidate_exceptional_entry2(mapping,
700 index, page))
701 ret = -EBUSY;
702 continue;
703 }
704
705 lock_page(page);
706 WARN_ON(page_to_index(page) != index);
707 if (page->mapping != mapping) {
708 unlock_page(page);
709 continue;
710 }
711 wait_on_page_writeback(page);
712 if (page_mapped(page)) {
713 if (!did_range_unmap) {
714
715
716
717 unmap_mapping_pages(mapping, index,
718 (1 + end - index), false);
719 did_range_unmap = 1;
720 } else {
721
722
723
724 unmap_mapping_pages(mapping, index,
725 1, false);
726 }
727 }
728 BUG_ON(page_mapped(page));
729 ret2 = do_launder_page(mapping, page);
730 if (ret2 == 0) {
731 if (!invalidate_complete_page2(mapping, page))
732 ret2 = -EBUSY;
733 }
734 if (ret2 < 0)
735 ret = ret2;
736 unlock_page(page);
737 }
738 pagevec_remove_exceptionals(&pvec);
739 pagevec_release(&pvec);
740 cond_resched();
741 index++;
742 }
743
744
745
746
747
748
749
750 if (dax_mapping(mapping)) {
751 unmap_mapping_pages(mapping, start, end - start + 1, false);
752 }
753out:
754 cleancache_invalidate_inode(mapping);
755 return ret;
756}
757EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
758
759
760
761
762
763
764
765
766
767
768int invalidate_inode_pages2(struct address_space *mapping)
769{
770 return invalidate_inode_pages2_range(mapping, 0, -1);
771}
772EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789void truncate_pagecache(struct inode *inode, loff_t newsize)
790{
791 struct address_space *mapping = inode->i_mapping;
792 loff_t holebegin = round_up(newsize, PAGE_SIZE);
793
794
795
796
797
798
799
800
801
802
803 unmap_mapping_range(mapping, holebegin, 0, 1);
804 truncate_inode_pages(mapping, newsize);
805 unmap_mapping_range(mapping, holebegin, 0, 1);
806}
807EXPORT_SYMBOL(truncate_pagecache);
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822void truncate_setsize(struct inode *inode, loff_t newsize)
823{
824 loff_t oldsize = inode->i_size;
825
826 i_size_write(inode, newsize);
827 if (newsize > oldsize)
828 pagecache_isize_extended(inode, oldsize, newsize);
829 truncate_pagecache(inode, newsize);
830}
831EXPORT_SYMBOL(truncate_setsize);
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
853{
854 int bsize = i_blocksize(inode);
855 loff_t rounded_from;
856 struct page *page;
857 pgoff_t index;
858
859 WARN_ON(to > inode->i_size);
860
861 if (from >= to || bsize == PAGE_SIZE)
862 return;
863
864 rounded_from = round_up(from, bsize);
865 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
866 return;
867
868 index = from >> PAGE_SHIFT;
869 page = find_lock_page(inode->i_mapping, index);
870
871 if (!page)
872 return;
873
874
875
876
877 if (page_mkclean(page))
878 set_page_dirty(page);
879 unlock_page(page);
880 put_page(page);
881}
882EXPORT_SYMBOL(pagecache_isize_extended);
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
898{
899 struct address_space *mapping = inode->i_mapping;
900 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
901 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
902
903
904
905
906
907
908
909
910
911
912
913
914
915 if ((u64)unmap_end > (u64)unmap_start)
916 unmap_mapping_range(mapping, unmap_start,
917 1 + unmap_end - unmap_start, 0);
918 truncate_inode_pages_range(mapping, lstart, lend);
919}
920EXPORT_SYMBOL(truncate_pagecache_range);
921