1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/backing-dev.h>
13#include <linux/dax.h>
14#include <linux/gfp.h>
15#include <linux/mm.h>
16#include <linux/swap.h>
17#include <linux/export.h>
18#include <linux/pagemap.h>
19#include <linux/highmem.h>
20#include <linux/pagevec.h>
21#include <linux/task_io_accounting_ops.h>
22#include <linux/buffer_head.h>
23
24#include <linux/shmem_fs.h>
25#include <linux/cleancache.h>
26#include <linux/rmap.h>
27#include "internal.h"
28
29
30
31
32
33
34static inline void __clear_shadow_entry(struct address_space *mapping,
35 pgoff_t index, void *entry)
36{
37 XA_STATE(xas, &mapping->i_pages, index);
38
39 xas_set_update(&xas, workingset_update_node);
40 if (xas_load(&xas) != entry)
41 return;
42 xas_store(&xas, NULL);
43}
44
45static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
46 void *entry)
47{
48 spin_lock(&mapping->host->i_lock);
49 xa_lock_irq(&mapping->i_pages);
50 __clear_shadow_entry(mapping, index, entry);
51 xa_unlock_irq(&mapping->i_pages);
52 if (mapping_shrinkable(mapping))
53 inode_add_lru(mapping->host);
54 spin_unlock(&mapping->host->i_lock);
55}
56
57
58
59
60
61
62static void truncate_exceptional_pvec_entries(struct address_space *mapping,
63 struct pagevec *pvec, pgoff_t *indices)
64{
65 int i, j;
66 bool dax;
67
68
69 if (shmem_mapping(mapping))
70 return;
71
72 for (j = 0; j < pagevec_count(pvec); j++)
73 if (xa_is_value(pvec->pages[j]))
74 break;
75
76 if (j == pagevec_count(pvec))
77 return;
78
79 dax = dax_mapping(mapping);
80 if (!dax) {
81 spin_lock(&mapping->host->i_lock);
82 xa_lock_irq(&mapping->i_pages);
83 }
84
85 for (i = j; i < pagevec_count(pvec); i++) {
86 struct page *page = pvec->pages[i];
87 pgoff_t index = indices[i];
88
89 if (!xa_is_value(page)) {
90 pvec->pages[j++] = page;
91 continue;
92 }
93
94 if (unlikely(dax)) {
95 dax_delete_mapping_entry(mapping, index);
96 continue;
97 }
98
99 __clear_shadow_entry(mapping, index, page);
100 }
101
102 if (!dax) {
103 xa_unlock_irq(&mapping->i_pages);
104 if (mapping_shrinkable(mapping))
105 inode_add_lru(mapping->host);
106 spin_unlock(&mapping->host->i_lock);
107 }
108 pvec->nr = j;
109}
110
111
112
113
114
115static int invalidate_exceptional_entry(struct address_space *mapping,
116 pgoff_t index, void *entry)
117{
118
119 if (shmem_mapping(mapping) || dax_mapping(mapping))
120 return 1;
121 clear_shadow_entry(mapping, index, entry);
122 return 1;
123}
124
125
126
127
128
129static int invalidate_exceptional_entry2(struct address_space *mapping,
130 pgoff_t index, void *entry)
131{
132
133 if (shmem_mapping(mapping))
134 return 1;
135 if (dax_mapping(mapping))
136 return dax_invalidate_mapping_entry_sync(mapping, index);
137 clear_shadow_entry(mapping, index, entry);
138 return 1;
139}
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156void do_invalidatepage(struct page *page, unsigned int offset,
157 unsigned int length)
158{
159 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
160
161 invalidatepage = page->mapping->a_ops->invalidatepage;
162#ifdef CONFIG_BLOCK
163 if (!invalidatepage)
164 invalidatepage = block_invalidatepage;
165#endif
166 if (invalidatepage)
167 (*invalidatepage)(page, offset, length);
168}
169
170
171
172
173
174
175
176
177
178
179
180static void truncate_cleanup_page(struct page *page)
181{
182 if (page_mapped(page))
183 unmap_mapping_page(page);
184
185 if (page_has_private(page))
186 do_invalidatepage(page, 0, thp_size(page));
187
188
189
190
191
192
193 cancel_dirty_page(page);
194 ClearPageMappedToDisk(page);
195}
196
197
198
199
200
201
202
203
204
205static int
206invalidate_complete_page(struct address_space *mapping, struct page *page)
207{
208 int ret;
209
210 if (page->mapping != mapping)
211 return 0;
212
213 if (page_has_private(page) && !try_to_release_page(page, 0))
214 return 0;
215
216 ret = remove_mapping(mapping, page);
217
218 return ret;
219}
220
221int truncate_inode_page(struct address_space *mapping, struct page *page)
222{
223 VM_BUG_ON_PAGE(PageTail(page), page);
224
225 if (page->mapping != mapping)
226 return -EIO;
227
228 truncate_cleanup_page(page);
229 delete_from_page_cache(page);
230 return 0;
231}
232
233
234
235
236int generic_error_remove_page(struct address_space *mapping, struct page *page)
237{
238 if (!mapping)
239 return -EINVAL;
240
241
242
243
244 if (!S_ISREG(mapping->host->i_mode))
245 return -EIO;
246 return truncate_inode_page(mapping, page);
247}
248EXPORT_SYMBOL(generic_error_remove_page);
249
250
251
252
253
254
255
256int invalidate_inode_page(struct page *page)
257{
258 struct address_space *mapping = page_mapping(page);
259 if (!mapping)
260 return 0;
261 if (PageDirty(page) || PageWriteback(page))
262 return 0;
263 if (page_mapped(page))
264 return 0;
265 return invalidate_complete_page(mapping, page);
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292void truncate_inode_pages_range(struct address_space *mapping,
293 loff_t lstart, loff_t lend)
294{
295 pgoff_t start;
296 pgoff_t end;
297 unsigned int partial_start;
298 unsigned int partial_end;
299 struct pagevec pvec;
300 pgoff_t indices[PAGEVEC_SIZE];
301 pgoff_t index;
302 int i;
303
304 if (mapping_empty(mapping))
305 goto out;
306
307
308 partial_start = lstart & (PAGE_SIZE - 1);
309 partial_end = (lend + 1) & (PAGE_SIZE - 1);
310
311
312
313
314
315
316
317 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
318 if (lend == -1)
319
320
321
322
323
324 end = -1;
325 else
326 end = (lend + 1) >> PAGE_SHIFT;
327
328 pagevec_init(&pvec);
329 index = start;
330 while (index < end && find_lock_entries(mapping, index, end - 1,
331 &pvec, indices)) {
332 index = indices[pagevec_count(&pvec) - 1] + 1;
333 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
334 for (i = 0; i < pagevec_count(&pvec); i++)
335 truncate_cleanup_page(pvec.pages[i]);
336 delete_from_page_cache_batch(mapping, &pvec);
337 for (i = 0; i < pagevec_count(&pvec); i++)
338 unlock_page(pvec.pages[i]);
339 pagevec_release(&pvec);
340 cond_resched();
341 }
342
343 if (partial_start) {
344 struct page *page = find_lock_page(mapping, start - 1);
345 if (page) {
346 unsigned int top = PAGE_SIZE;
347 if (start > end) {
348
349 top = partial_end;
350 partial_end = 0;
351 }
352 wait_on_page_writeback(page);
353 zero_user_segment(page, partial_start, top);
354 cleancache_invalidate_page(mapping, page);
355 if (page_has_private(page))
356 do_invalidatepage(page, partial_start,
357 top - partial_start);
358 unlock_page(page);
359 put_page(page);
360 }
361 }
362 if (partial_end) {
363 struct page *page = find_lock_page(mapping, end);
364 if (page) {
365 wait_on_page_writeback(page);
366 zero_user_segment(page, 0, partial_end);
367 cleancache_invalidate_page(mapping, page);
368 if (page_has_private(page))
369 do_invalidatepage(page, 0,
370 partial_end);
371 unlock_page(page);
372 put_page(page);
373 }
374 }
375
376
377
378
379 if (start >= end)
380 goto out;
381
382 index = start;
383 for ( ; ; ) {
384 cond_resched();
385 if (!find_get_entries(mapping, index, end - 1, &pvec,
386 indices)) {
387
388 if (index == start)
389 break;
390
391 index = start;
392 continue;
393 }
394
395 for (i = 0; i < pagevec_count(&pvec); i++) {
396 struct page *page = pvec.pages[i];
397
398
399 index = indices[i];
400
401 if (xa_is_value(page))
402 continue;
403
404 lock_page(page);
405 WARN_ON(page_to_index(page) != index);
406 wait_on_page_writeback(page);
407 truncate_inode_page(mapping, page);
408 unlock_page(page);
409 }
410 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
411 pagevec_release(&pvec);
412 index++;
413 }
414
415out:
416 cleancache_invalidate_inode(mapping);
417}
418EXPORT_SYMBOL(truncate_inode_pages_range);
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
434{
435 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
436}
437EXPORT_SYMBOL(truncate_inode_pages);
438
439
440
441
442
443
444
445
446
447
448void truncate_inode_pages_final(struct address_space *mapping)
449{
450
451
452
453
454
455
456
457 mapping_set_exiting(mapping);
458
459 if (!mapping_empty(mapping)) {
460
461
462
463
464
465
466 xa_lock_irq(&mapping->i_pages);
467 xa_unlock_irq(&mapping->i_pages);
468 }
469
470
471
472
473
474 truncate_inode_pages(mapping, 0);
475}
476EXPORT_SYMBOL(truncate_inode_pages_final);
477
478static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
479 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
480{
481 pgoff_t indices[PAGEVEC_SIZE];
482 struct pagevec pvec;
483 pgoff_t index = start;
484 unsigned long ret;
485 unsigned long count = 0;
486 int i;
487
488 pagevec_init(&pvec);
489 while (find_lock_entries(mapping, index, end, &pvec, indices)) {
490 for (i = 0; i < pagevec_count(&pvec); i++) {
491 struct page *page = pvec.pages[i];
492
493
494 index = indices[i];
495
496 if (xa_is_value(page)) {
497 count += invalidate_exceptional_entry(mapping,
498 index,
499 page);
500 continue;
501 }
502 index += thp_nr_pages(page) - 1;
503
504 ret = invalidate_inode_page(page);
505 unlock_page(page);
506
507
508
509
510 if (!ret) {
511 deactivate_file_page(page);
512
513 if (nr_pagevec)
514 (*nr_pagevec)++;
515 }
516 count += ret;
517 }
518 pagevec_remove_exceptionals(&pvec);
519 pagevec_release(&pvec);
520 cond_resched();
521 index++;
522 }
523 return count;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540unsigned long invalidate_mapping_pages(struct address_space *mapping,
541 pgoff_t start, pgoff_t end)
542{
543 return __invalidate_mapping_pages(mapping, start, end, NULL);
544}
545EXPORT_SYMBOL(invalidate_mapping_pages);
546
547
548
549
550
551
552
553
554
555
556
557
558void invalidate_mapping_pagevec(struct address_space *mapping,
559 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
560{
561 __invalidate_mapping_pages(mapping, start, end, nr_pagevec);
562}
563
564
565
566
567
568
569
570
571static int
572invalidate_complete_page2(struct address_space *mapping, struct page *page)
573{
574 if (page->mapping != mapping)
575 return 0;
576
577 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
578 return 0;
579
580 spin_lock(&mapping->host->i_lock);
581 xa_lock_irq(&mapping->i_pages);
582 if (PageDirty(page))
583 goto failed;
584
585 BUG_ON(page_has_private(page));
586 __delete_from_page_cache(page, NULL);
587 xa_unlock_irq(&mapping->i_pages);
588 if (mapping_shrinkable(mapping))
589 inode_add_lru(mapping->host);
590 spin_unlock(&mapping->host->i_lock);
591
592 if (mapping->a_ops->freepage)
593 mapping->a_ops->freepage(page);
594
595 put_page(page);
596 return 1;
597failed:
598 xa_unlock_irq(&mapping->i_pages);
599 spin_unlock(&mapping->host->i_lock);
600 return 0;
601}
602
603static int do_launder_page(struct address_space *mapping, struct page *page)
604{
605 if (!PageDirty(page))
606 return 0;
607 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
608 return 0;
609 return mapping->a_ops->launder_page(page);
610}
611
612
613
614
615
616
617
618
619
620
621
622
623int invalidate_inode_pages2_range(struct address_space *mapping,
624 pgoff_t start, pgoff_t end)
625{
626 pgoff_t indices[PAGEVEC_SIZE];
627 struct pagevec pvec;
628 pgoff_t index;
629 int i;
630 int ret = 0;
631 int ret2 = 0;
632 int did_range_unmap = 0;
633
634 if (mapping_empty(mapping))
635 goto out;
636
637 pagevec_init(&pvec);
638 index = start;
639 while (find_get_entries(mapping, index, end, &pvec, indices)) {
640 for (i = 0; i < pagevec_count(&pvec); i++) {
641 struct page *page = pvec.pages[i];
642
643
644 index = indices[i];
645
646 if (xa_is_value(page)) {
647 if (!invalidate_exceptional_entry2(mapping,
648 index, page))
649 ret = -EBUSY;
650 continue;
651 }
652
653 if (!did_range_unmap && page_mapped(page)) {
654
655
656
657
658 unmap_mapping_pages(mapping, index,
659 (1 + end - index), false);
660 did_range_unmap = 1;
661 }
662
663 lock_page(page);
664 WARN_ON(page_to_index(page) != index);
665 if (page->mapping != mapping) {
666 unlock_page(page);
667 continue;
668 }
669 wait_on_page_writeback(page);
670
671 if (page_mapped(page))
672 unmap_mapping_page(page);
673 BUG_ON(page_mapped(page));
674
675 ret2 = do_launder_page(mapping, page);
676 if (ret2 == 0) {
677 if (!invalidate_complete_page2(mapping, page))
678 ret2 = -EBUSY;
679 }
680 if (ret2 < 0)
681 ret = ret2;
682 unlock_page(page);
683 }
684 pagevec_remove_exceptionals(&pvec);
685 pagevec_release(&pvec);
686 cond_resched();
687 index++;
688 }
689
690
691
692
693
694
695
696 if (dax_mapping(mapping)) {
697 unmap_mapping_pages(mapping, start, end - start + 1, false);
698 }
699out:
700 cleancache_invalidate_inode(mapping);
701 return ret;
702}
703EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
704
705
706
707
708
709
710
711
712
713
714int invalidate_inode_pages2(struct address_space *mapping)
715{
716 return invalidate_inode_pages2_range(mapping, 0, -1);
717}
718EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735void truncate_pagecache(struct inode *inode, loff_t newsize)
736{
737 struct address_space *mapping = inode->i_mapping;
738 loff_t holebegin = round_up(newsize, PAGE_SIZE);
739
740
741
742
743
744
745
746
747
748
749 unmap_mapping_range(mapping, holebegin, 0, 1);
750 truncate_inode_pages(mapping, newsize);
751 unmap_mapping_range(mapping, holebegin, 0, 1);
752}
753EXPORT_SYMBOL(truncate_pagecache);
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768void truncate_setsize(struct inode *inode, loff_t newsize)
769{
770 loff_t oldsize = inode->i_size;
771
772 i_size_write(inode, newsize);
773 if (newsize > oldsize)
774 pagecache_isize_extended(inode, oldsize, newsize);
775 truncate_pagecache(inode, newsize);
776}
777EXPORT_SYMBOL(truncate_setsize);
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
799{
800 int bsize = i_blocksize(inode);
801 loff_t rounded_from;
802 struct page *page;
803 pgoff_t index;
804
805 WARN_ON(to > inode->i_size);
806
807 if (from >= to || bsize == PAGE_SIZE)
808 return;
809
810 rounded_from = round_up(from, bsize);
811 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
812 return;
813
814 index = from >> PAGE_SHIFT;
815 page = find_lock_page(inode->i_mapping, index);
816
817 if (!page)
818 return;
819
820
821
822
823 if (page_mkclean(page))
824 set_page_dirty(page);
825 unlock_page(page);
826 put_page(page);
827}
828EXPORT_SYMBOL(pagecache_isize_extended);
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
844{
845 struct address_space *mapping = inode->i_mapping;
846 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
847 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
848
849
850
851
852
853
854
855
856
857
858
859
860
861 if ((u64)unmap_end > (u64)unmap_start)
862 unmap_mapping_range(mapping, unmap_start,
863 1 + unmap_end - unmap_start, 0);
864 truncate_inode_pages_range(mapping, lstart, lend);
865}
866EXPORT_SYMBOL(truncate_pagecache_range);
867