1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/backing-dev.h>
13#include <linux/dax.h>
14#include <linux/gfp.h>
15#include <linux/mm.h>
16#include <linux/swap.h>
17#include <linux/export.h>
18#include <linux/pagemap.h>
19#include <linux/highmem.h>
20#include <linux/pagevec.h>
21#include <linux/task_io_accounting_ops.h>
22#include <linux/buffer_head.h>
23
24#include <linux/shmem_fs.h>
25#include <linux/cleancache.h>
26#include <linux/rmap.h>
27#include "internal.h"
28
29
30
31
32
33
34static inline void __clear_shadow_entry(struct address_space *mapping,
35 pgoff_t index, void *entry)
36{
37 XA_STATE(xas, &mapping->i_pages, index);
38
39 xas_set_update(&xas, workingset_update_node);
40 if (xas_load(&xas) != entry)
41 return;
42 xas_store(&xas, NULL);
43}
44
45static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
46 void *entry)
47{
48 xa_lock_irq(&mapping->i_pages);
49 __clear_shadow_entry(mapping, index, entry);
50 xa_unlock_irq(&mapping->i_pages);
51}
52
53
54
55
56
57
58static void truncate_exceptional_pvec_entries(struct address_space *mapping,
59 struct pagevec *pvec, pgoff_t *indices)
60{
61 int i, j;
62 bool dax;
63
64
65 if (shmem_mapping(mapping))
66 return;
67
68 for (j = 0; j < pagevec_count(pvec); j++)
69 if (xa_is_value(pvec->pages[j]))
70 break;
71
72 if (j == pagevec_count(pvec))
73 return;
74
75 dax = dax_mapping(mapping);
76 if (!dax)
77 xa_lock_irq(&mapping->i_pages);
78
79 for (i = j; i < pagevec_count(pvec); i++) {
80 struct page *page = pvec->pages[i];
81 pgoff_t index = indices[i];
82
83 if (!xa_is_value(page)) {
84 pvec->pages[j++] = page;
85 continue;
86 }
87
88 if (unlikely(dax)) {
89 dax_delete_mapping_entry(mapping, index);
90 continue;
91 }
92
93 __clear_shadow_entry(mapping, index, page);
94 }
95
96 if (!dax)
97 xa_unlock_irq(&mapping->i_pages);
98 pvec->nr = j;
99}
100
101
102
103
104
105static int invalidate_exceptional_entry(struct address_space *mapping,
106 pgoff_t index, void *entry)
107{
108
109 if (shmem_mapping(mapping) || dax_mapping(mapping))
110 return 1;
111 clear_shadow_entry(mapping, index, entry);
112 return 1;
113}
114
115
116
117
118
119static int invalidate_exceptional_entry2(struct address_space *mapping,
120 pgoff_t index, void *entry)
121{
122
123 if (shmem_mapping(mapping))
124 return 1;
125 if (dax_mapping(mapping))
126 return dax_invalidate_mapping_entry_sync(mapping, index);
127 clear_shadow_entry(mapping, index, entry);
128 return 1;
129}
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146void do_invalidatepage(struct page *page, unsigned int offset,
147 unsigned int length)
148{
149 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
150
151 invalidatepage = page->mapping->a_ops->invalidatepage;
152#ifdef CONFIG_BLOCK
153 if (!invalidatepage)
154 invalidatepage = block_invalidatepage;
155#endif
156 if (invalidatepage)
157 (*invalidatepage)(page, offset, length);
158}
159
160
161
162
163
164
165
166
167
168
169
170static void truncate_cleanup_page(struct page *page)
171{
172 if (page_mapped(page))
173 unmap_mapping_page(page);
174
175 if (page_has_private(page))
176 do_invalidatepage(page, 0, thp_size(page));
177
178
179
180
181
182
183 cancel_dirty_page(page);
184 ClearPageMappedToDisk(page);
185}
186
187
188
189
190
191
192
193
194
195static int
196invalidate_complete_page(struct address_space *mapping, struct page *page)
197{
198 int ret;
199
200 if (page->mapping != mapping)
201 return 0;
202
203 if (page_has_private(page) && !try_to_release_page(page, 0))
204 return 0;
205
206 ret = remove_mapping(mapping, page);
207
208 return ret;
209}
210
211int truncate_inode_page(struct address_space *mapping, struct page *page)
212{
213 VM_BUG_ON_PAGE(PageTail(page), page);
214
215 if (page->mapping != mapping)
216 return -EIO;
217
218 truncate_cleanup_page(page);
219 delete_from_page_cache(page);
220 return 0;
221}
222
223
224
225
226int generic_error_remove_page(struct address_space *mapping, struct page *page)
227{
228 if (!mapping)
229 return -EINVAL;
230
231
232
233
234 if (!S_ISREG(mapping->host->i_mode))
235 return -EIO;
236 return truncate_inode_page(mapping, page);
237}
238EXPORT_SYMBOL(generic_error_remove_page);
239
240
241
242
243
244
245
246int invalidate_inode_page(struct page *page)
247{
248 struct address_space *mapping = page_mapping(page);
249 if (!mapping)
250 return 0;
251 if (PageDirty(page) || PageWriteback(page))
252 return 0;
253 if (page_mapped(page))
254 return 0;
255 return invalidate_complete_page(mapping, page);
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282void truncate_inode_pages_range(struct address_space *mapping,
283 loff_t lstart, loff_t lend)
284{
285 pgoff_t start;
286 pgoff_t end;
287 unsigned int partial_start;
288 unsigned int partial_end;
289 struct pagevec pvec;
290 pgoff_t indices[PAGEVEC_SIZE];
291 pgoff_t index;
292 int i;
293
294 if (mapping_empty(mapping))
295 goto out;
296
297
298 partial_start = lstart & (PAGE_SIZE - 1);
299 partial_end = (lend + 1) & (PAGE_SIZE - 1);
300
301
302
303
304
305
306
307 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
308 if (lend == -1)
309
310
311
312
313
314 end = -1;
315 else
316 end = (lend + 1) >> PAGE_SHIFT;
317
318 pagevec_init(&pvec);
319 index = start;
320 while (index < end && find_lock_entries(mapping, index, end - 1,
321 &pvec, indices)) {
322 index = indices[pagevec_count(&pvec) - 1] + 1;
323 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
324 for (i = 0; i < pagevec_count(&pvec); i++)
325 truncate_cleanup_page(pvec.pages[i]);
326 delete_from_page_cache_batch(mapping, &pvec);
327 for (i = 0; i < pagevec_count(&pvec); i++)
328 unlock_page(pvec.pages[i]);
329 pagevec_release(&pvec);
330 cond_resched();
331 }
332
333 if (partial_start) {
334 struct page *page = find_lock_page(mapping, start - 1);
335 if (page) {
336 unsigned int top = PAGE_SIZE;
337 if (start > end) {
338
339 top = partial_end;
340 partial_end = 0;
341 }
342 wait_on_page_writeback(page);
343 zero_user_segment(page, partial_start, top);
344 cleancache_invalidate_page(mapping, page);
345 if (page_has_private(page))
346 do_invalidatepage(page, partial_start,
347 top - partial_start);
348 unlock_page(page);
349 put_page(page);
350 }
351 }
352 if (partial_end) {
353 struct page *page = find_lock_page(mapping, end);
354 if (page) {
355 wait_on_page_writeback(page);
356 zero_user_segment(page, 0, partial_end);
357 cleancache_invalidate_page(mapping, page);
358 if (page_has_private(page))
359 do_invalidatepage(page, 0,
360 partial_end);
361 unlock_page(page);
362 put_page(page);
363 }
364 }
365
366
367
368
369 if (start >= end)
370 goto out;
371
372 index = start;
373 for ( ; ; ) {
374 cond_resched();
375 if (!find_get_entries(mapping, index, end - 1, &pvec,
376 indices)) {
377
378 if (index == start)
379 break;
380
381 index = start;
382 continue;
383 }
384
385 for (i = 0; i < pagevec_count(&pvec); i++) {
386 struct page *page = pvec.pages[i];
387
388
389 index = indices[i];
390
391 if (xa_is_value(page))
392 continue;
393
394 lock_page(page);
395 WARN_ON(page_to_index(page) != index);
396 wait_on_page_writeback(page);
397 truncate_inode_page(mapping, page);
398 unlock_page(page);
399 }
400 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
401 pagevec_release(&pvec);
402 index++;
403 }
404
405out:
406 cleancache_invalidate_inode(mapping);
407}
408EXPORT_SYMBOL(truncate_inode_pages_range);
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
424{
425 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
426}
427EXPORT_SYMBOL(truncate_inode_pages);
428
429
430
431
432
433
434
435
436
437
438void truncate_inode_pages_final(struct address_space *mapping)
439{
440
441
442
443
444
445
446
447 mapping_set_exiting(mapping);
448
449 if (!mapping_empty(mapping)) {
450
451
452
453
454
455
456 xa_lock_irq(&mapping->i_pages);
457 xa_unlock_irq(&mapping->i_pages);
458 }
459
460
461
462
463
464 truncate_inode_pages(mapping, 0);
465}
466EXPORT_SYMBOL(truncate_inode_pages_final);
467
468static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
469 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
470{
471 pgoff_t indices[PAGEVEC_SIZE];
472 struct pagevec pvec;
473 pgoff_t index = start;
474 unsigned long ret;
475 unsigned long count = 0;
476 int i;
477
478 pagevec_init(&pvec);
479 while (find_lock_entries(mapping, index, end, &pvec, indices)) {
480 for (i = 0; i < pagevec_count(&pvec); i++) {
481 struct page *page = pvec.pages[i];
482
483
484 index = indices[i];
485
486 if (xa_is_value(page)) {
487 count += invalidate_exceptional_entry(mapping,
488 index,
489 page);
490 continue;
491 }
492 index += thp_nr_pages(page) - 1;
493
494 ret = invalidate_inode_page(page);
495 unlock_page(page);
496
497
498
499
500 if (!ret) {
501 deactivate_file_page(page);
502
503 if (nr_pagevec)
504 (*nr_pagevec)++;
505 }
506 count += ret;
507 }
508 pagevec_remove_exceptionals(&pvec);
509 pagevec_release(&pvec);
510 cond_resched();
511 index++;
512 }
513 return count;
514}
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530unsigned long invalidate_mapping_pages(struct address_space *mapping,
531 pgoff_t start, pgoff_t end)
532{
533 return __invalidate_mapping_pages(mapping, start, end, NULL);
534}
535EXPORT_SYMBOL(invalidate_mapping_pages);
536
537
538
539
540
541
542
543
544
545
546
547
548void invalidate_mapping_pagevec(struct address_space *mapping,
549 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
550{
551 __invalidate_mapping_pages(mapping, start, end, nr_pagevec);
552}
553
554
555
556
557
558
559
560
561static int
562invalidate_complete_page2(struct address_space *mapping, struct page *page)
563{
564 if (page->mapping != mapping)
565 return 0;
566
567 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
568 return 0;
569
570 xa_lock_irq(&mapping->i_pages);
571 if (PageDirty(page))
572 goto failed;
573
574 BUG_ON(page_has_private(page));
575 __delete_from_page_cache(page, NULL);
576 xa_unlock_irq(&mapping->i_pages);
577
578 if (mapping->a_ops->freepage)
579 mapping->a_ops->freepage(page);
580
581 put_page(page);
582 return 1;
583failed:
584 xa_unlock_irq(&mapping->i_pages);
585 return 0;
586}
587
588static int do_launder_page(struct address_space *mapping, struct page *page)
589{
590 if (!PageDirty(page))
591 return 0;
592 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
593 return 0;
594 return mapping->a_ops->launder_page(page);
595}
596
597
598
599
600
601
602
603
604
605
606
607
608int invalidate_inode_pages2_range(struct address_space *mapping,
609 pgoff_t start, pgoff_t end)
610{
611 pgoff_t indices[PAGEVEC_SIZE];
612 struct pagevec pvec;
613 pgoff_t index;
614 int i;
615 int ret = 0;
616 int ret2 = 0;
617 int did_range_unmap = 0;
618
619 if (mapping_empty(mapping))
620 goto out;
621
622 pagevec_init(&pvec);
623 index = start;
624 while (find_get_entries(mapping, index, end, &pvec, indices)) {
625 for (i = 0; i < pagevec_count(&pvec); i++) {
626 struct page *page = pvec.pages[i];
627
628
629 index = indices[i];
630
631 if (xa_is_value(page)) {
632 if (!invalidate_exceptional_entry2(mapping,
633 index, page))
634 ret = -EBUSY;
635 continue;
636 }
637
638 if (!did_range_unmap && page_mapped(page)) {
639
640
641
642
643 unmap_mapping_pages(mapping, index,
644 (1 + end - index), false);
645 did_range_unmap = 1;
646 }
647
648 lock_page(page);
649 WARN_ON(page_to_index(page) != index);
650 if (page->mapping != mapping) {
651 unlock_page(page);
652 continue;
653 }
654 wait_on_page_writeback(page);
655
656 if (page_mapped(page))
657 unmap_mapping_page(page);
658 BUG_ON(page_mapped(page));
659
660 ret2 = do_launder_page(mapping, page);
661 if (ret2 == 0) {
662 if (!invalidate_complete_page2(mapping, page))
663 ret2 = -EBUSY;
664 }
665 if (ret2 < 0)
666 ret = ret2;
667 unlock_page(page);
668 }
669 pagevec_remove_exceptionals(&pvec);
670 pagevec_release(&pvec);
671 cond_resched();
672 index++;
673 }
674
675
676
677
678
679
680
681 if (dax_mapping(mapping)) {
682 unmap_mapping_pages(mapping, start, end - start + 1, false);
683 }
684out:
685 cleancache_invalidate_inode(mapping);
686 return ret;
687}
688EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
689
690
691
692
693
694
695
696
697
698
699int invalidate_inode_pages2(struct address_space *mapping)
700{
701 return invalidate_inode_pages2_range(mapping, 0, -1);
702}
703EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720void truncate_pagecache(struct inode *inode, loff_t newsize)
721{
722 struct address_space *mapping = inode->i_mapping;
723 loff_t holebegin = round_up(newsize, PAGE_SIZE);
724
725
726
727
728
729
730
731
732
733
734 unmap_mapping_range(mapping, holebegin, 0, 1);
735 truncate_inode_pages(mapping, newsize);
736 unmap_mapping_range(mapping, holebegin, 0, 1);
737}
738EXPORT_SYMBOL(truncate_pagecache);
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753void truncate_setsize(struct inode *inode, loff_t newsize)
754{
755 loff_t oldsize = inode->i_size;
756
757 i_size_write(inode, newsize);
758 if (newsize > oldsize)
759 pagecache_isize_extended(inode, oldsize, newsize);
760 truncate_pagecache(inode, newsize);
761}
762EXPORT_SYMBOL(truncate_setsize);
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
784{
785 int bsize = i_blocksize(inode);
786 loff_t rounded_from;
787 struct page *page;
788 pgoff_t index;
789
790 WARN_ON(to > inode->i_size);
791
792 if (from >= to || bsize == PAGE_SIZE)
793 return;
794
795 rounded_from = round_up(from, bsize);
796 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
797 return;
798
799 index = from >> PAGE_SHIFT;
800 page = find_lock_page(inode->i_mapping, index);
801
802 if (!page)
803 return;
804
805
806
807
808 if (page_mkclean(page))
809 set_page_dirty(page);
810 unlock_page(page);
811 put_page(page);
812}
813EXPORT_SYMBOL(pagecache_isize_extended);
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
829{
830 struct address_space *mapping = inode->i_mapping;
831 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
832 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
833
834
835
836
837
838
839
840
841
842
843
844
845
846 if ((u64)unmap_end > (u64)unmap_start)
847 unmap_mapping_range(mapping, unmap_start,
848 1 + unmap_end - unmap_start, 0);
849 truncate_inode_pages_range(mapping, lstart, lend);
850}
851EXPORT_SYMBOL(truncate_pagecache_range);
852