1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/backing-dev.h>
13#include <linux/dax.h>
14#include <linux/gfp.h>
15#include <linux/mm.h>
16#include <linux/swap.h>
17#include <linux/export.h>
18#include <linux/pagemap.h>
19#include <linux/highmem.h>
20#include <linux/pagevec.h>
21#include <linux/task_io_accounting_ops.h>
22#include <linux/buffer_head.h>
23
24#include <linux/shmem_fs.h>
25#include <linux/cleancache.h>
26#include <linux/rmap.h>
27#include "internal.h"
28
29
30
31
32
33
34static inline void __clear_shadow_entry(struct address_space *mapping,
35 pgoff_t index, void *entry)
36{
37 XA_STATE(xas, &mapping->i_pages, index);
38
39 xas_set_update(&xas, workingset_update_node);
40 if (xas_load(&xas) != entry)
41 return;
42 xas_store(&xas, NULL);
43}
44
45static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
46 void *entry)
47{
48 xa_lock_irq(&mapping->i_pages);
49 __clear_shadow_entry(mapping, index, entry);
50 xa_unlock_irq(&mapping->i_pages);
51}
52
53
54
55
56
57
58static void truncate_exceptional_pvec_entries(struct address_space *mapping,
59 struct pagevec *pvec, pgoff_t *indices)
60{
61 int i, j;
62 bool dax;
63
64
65 if (shmem_mapping(mapping))
66 return;
67
68 for (j = 0; j < pagevec_count(pvec); j++)
69 if (xa_is_value(pvec->pages[j]))
70 break;
71
72 if (j == pagevec_count(pvec))
73 return;
74
75 dax = dax_mapping(mapping);
76 if (!dax)
77 xa_lock_irq(&mapping->i_pages);
78
79 for (i = j; i < pagevec_count(pvec); i++) {
80 struct page *page = pvec->pages[i];
81 pgoff_t index = indices[i];
82
83 if (!xa_is_value(page)) {
84 pvec->pages[j++] = page;
85 continue;
86 }
87
88 if (unlikely(dax)) {
89 dax_delete_mapping_entry(mapping, index);
90 continue;
91 }
92
93 __clear_shadow_entry(mapping, index, page);
94 }
95
96 if (!dax)
97 xa_unlock_irq(&mapping->i_pages);
98 pvec->nr = j;
99}
100
101
102
103
104
105static int invalidate_exceptional_entry(struct address_space *mapping,
106 pgoff_t index, void *entry)
107{
108
109 if (shmem_mapping(mapping) || dax_mapping(mapping))
110 return 1;
111 clear_shadow_entry(mapping, index, entry);
112 return 1;
113}
114
115
116
117
118
119static int invalidate_exceptional_entry2(struct address_space *mapping,
120 pgoff_t index, void *entry)
121{
122
123 if (shmem_mapping(mapping))
124 return 1;
125 if (dax_mapping(mapping))
126 return dax_invalidate_mapping_entry_sync(mapping, index);
127 clear_shadow_entry(mapping, index, entry);
128 return 1;
129}
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146void do_invalidatepage(struct page *page, unsigned int offset,
147 unsigned int length)
148{
149 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
150
151 invalidatepage = page->mapping->a_ops->invalidatepage;
152#ifdef CONFIG_BLOCK
153 if (!invalidatepage)
154 invalidatepage = block_invalidatepage;
155#endif
156 if (invalidatepage)
157 (*invalidatepage)(page, offset, length);
158}
159
160
161
162
163
164
165
166
167
168
169
170static void truncate_cleanup_page(struct page *page)
171{
172 if (page_mapped(page))
173 unmap_mapping_page(page);
174
175 if (page_has_private(page))
176 do_invalidatepage(page, 0, thp_size(page));
177
178
179
180
181
182
183 cancel_dirty_page(page);
184 ClearPageMappedToDisk(page);
185}
186
187
188
189
190
191
192
193
194
195static int
196invalidate_complete_page(struct address_space *mapping, struct page *page)
197{
198 int ret;
199
200 if (page->mapping != mapping)
201 return 0;
202
203 if (page_has_private(page) && !try_to_release_page(page, 0))
204 return 0;
205
206 ret = remove_mapping(mapping, page);
207
208 return ret;
209}
210
211int truncate_inode_page(struct address_space *mapping, struct page *page)
212{
213 VM_BUG_ON_PAGE(PageTail(page), page);
214
215 if (page->mapping != mapping)
216 return -EIO;
217
218 truncate_cleanup_page(page);
219 delete_from_page_cache(page);
220 return 0;
221}
222
223
224
225
226int generic_error_remove_page(struct address_space *mapping, struct page *page)
227{
228 if (!mapping)
229 return -EINVAL;
230
231
232
233
234 if (!S_ISREG(mapping->host->i_mode))
235 return -EIO;
236 return truncate_inode_page(mapping, page);
237}
238EXPORT_SYMBOL(generic_error_remove_page);
239
240
241
242
243
244
245
246int invalidate_inode_page(struct page *page)
247{
248 struct address_space *mapping = page_mapping(page);
249 if (!mapping)
250 return 0;
251 if (PageDirty(page) || PageWriteback(page))
252 return 0;
253 if (page_mapped(page))
254 return 0;
255 return invalidate_complete_page(mapping, page);
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282void truncate_inode_pages_range(struct address_space *mapping,
283 loff_t lstart, loff_t lend)
284{
285 pgoff_t start;
286 pgoff_t end;
287 unsigned int partial_start;
288 unsigned int partial_end;
289 struct pagevec pvec;
290 pgoff_t indices[PAGEVEC_SIZE];
291 pgoff_t index;
292 int i;
293
294 if (mapping_empty(mapping))
295 goto out;
296
297
298 partial_start = lstart & (PAGE_SIZE - 1);
299 partial_end = (lend + 1) & (PAGE_SIZE - 1);
300
301
302
303
304
305
306
307 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
308 if (lend == -1)
309
310
311
312
313
314 end = -1;
315 else
316 end = (lend + 1) >> PAGE_SHIFT;
317
318 pagevec_init(&pvec);
319 index = start;
320 while (index < end && find_lock_entries(mapping, index, end - 1,
321 &pvec, indices)) {
322 index = indices[pagevec_count(&pvec) - 1] + 1;
323 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
324 for (i = 0; i < pagevec_count(&pvec); i++)
325 truncate_cleanup_page(pvec.pages[i]);
326 delete_from_page_cache_batch(mapping, &pvec);
327 for (i = 0; i < pagevec_count(&pvec); i++)
328 unlock_page(pvec.pages[i]);
329 pagevec_release(&pvec);
330 cond_resched();
331 }
332
333 if (partial_start) {
334 struct page *page = find_lock_page(mapping, start - 1);
335 if (page) {
336 unsigned int top = PAGE_SIZE;
337 if (start > end) {
338
339 top = partial_end;
340 partial_end = 0;
341 }
342 wait_on_page_writeback(page);
343 zero_user_segment(page, partial_start, top);
344 cleancache_invalidate_page(mapping, page);
345 if (page_has_private(page))
346 do_invalidatepage(page, partial_start,
347 top - partial_start);
348 unlock_page(page);
349 put_page(page);
350 }
351 }
352 if (partial_end) {
353 struct page *page = find_lock_page(mapping, end);
354 if (page) {
355 wait_on_page_writeback(page);
356 zero_user_segment(page, 0, partial_end);
357 cleancache_invalidate_page(mapping, page);
358 if (page_has_private(page))
359 do_invalidatepage(page, 0,
360 partial_end);
361 unlock_page(page);
362 put_page(page);
363 }
364 }
365
366
367
368
369 if (start >= end)
370 goto out;
371
372 index = start;
373 for ( ; ; ) {
374 cond_resched();
375 if (!find_get_entries(mapping, index, end - 1, &pvec,
376 indices)) {
377
378 if (index == start)
379 break;
380
381 index = start;
382 continue;
383 }
384
385 for (i = 0; i < pagevec_count(&pvec); i++) {
386 struct page *page = pvec.pages[i];
387
388
389 index = indices[i];
390
391 if (xa_is_value(page))
392 continue;
393
394 lock_page(page);
395 WARN_ON(page_to_index(page) != index);
396 wait_on_page_writeback(page);
397 truncate_inode_page(mapping, page);
398 unlock_page(page);
399 }
400 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
401 pagevec_release(&pvec);
402 index++;
403 }
404
405out:
406 cleancache_invalidate_inode(mapping);
407}
408EXPORT_SYMBOL(truncate_inode_pages_range);
409
410
411
412
413
414
415
416
417
418
419
420
421
422void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
423{
424 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
425}
426EXPORT_SYMBOL(truncate_inode_pages);
427
428
429
430
431
432
433
434
435
436
437void truncate_inode_pages_final(struct address_space *mapping)
438{
439
440
441
442
443
444
445
446 mapping_set_exiting(mapping);
447
448 if (!mapping_empty(mapping)) {
449
450
451
452
453
454
455 xa_lock_irq(&mapping->i_pages);
456 xa_unlock_irq(&mapping->i_pages);
457 }
458
459
460
461
462
463 truncate_inode_pages(mapping, 0);
464}
465EXPORT_SYMBOL(truncate_inode_pages_final);
466
467static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
468 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
469{
470 pgoff_t indices[PAGEVEC_SIZE];
471 struct pagevec pvec;
472 pgoff_t index = start;
473 unsigned long ret;
474 unsigned long count = 0;
475 int i;
476
477 pagevec_init(&pvec);
478 while (find_lock_entries(mapping, index, end, &pvec, indices)) {
479 for (i = 0; i < pagevec_count(&pvec); i++) {
480 struct page *page = pvec.pages[i];
481
482
483 index = indices[i];
484
485 if (xa_is_value(page)) {
486 invalidate_exceptional_entry(mapping, index,
487 page);
488 continue;
489 }
490 index += thp_nr_pages(page) - 1;
491
492 ret = invalidate_inode_page(page);
493 unlock_page(page);
494
495
496
497
498 if (!ret) {
499 deactivate_file_page(page);
500
501 if (nr_pagevec)
502 (*nr_pagevec)++;
503 }
504 count += ret;
505 }
506 pagevec_remove_exceptionals(&pvec);
507 pagevec_release(&pvec);
508 cond_resched();
509 index++;
510 }
511 return count;
512}
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529unsigned long invalidate_mapping_pages(struct address_space *mapping,
530 pgoff_t start, pgoff_t end)
531{
532 return __invalidate_mapping_pages(mapping, start, end, NULL);
533}
534EXPORT_SYMBOL(invalidate_mapping_pages);
535
536
537
538
539
540
541
542
543
544
545
546
547void invalidate_mapping_pagevec(struct address_space *mapping,
548 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
549{
550 __invalidate_mapping_pages(mapping, start, end, nr_pagevec);
551}
552
553
554
555
556
557
558
559
560static int
561invalidate_complete_page2(struct address_space *mapping, struct page *page)
562{
563 unsigned long flags;
564
565 if (page->mapping != mapping)
566 return 0;
567
568 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
569 return 0;
570
571 xa_lock_irqsave(&mapping->i_pages, flags);
572 if (PageDirty(page))
573 goto failed;
574
575 BUG_ON(page_has_private(page));
576 __delete_from_page_cache(page, NULL);
577 xa_unlock_irqrestore(&mapping->i_pages, flags);
578
579 if (mapping->a_ops->freepage)
580 mapping->a_ops->freepage(page);
581
582 put_page(page);
583 return 1;
584failed:
585 xa_unlock_irqrestore(&mapping->i_pages, flags);
586 return 0;
587}
588
589static int do_launder_page(struct address_space *mapping, struct page *page)
590{
591 if (!PageDirty(page))
592 return 0;
593 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
594 return 0;
595 return mapping->a_ops->launder_page(page);
596}
597
598
599
600
601
602
603
604
605
606
607
608
609int invalidate_inode_pages2_range(struct address_space *mapping,
610 pgoff_t start, pgoff_t end)
611{
612 pgoff_t indices[PAGEVEC_SIZE];
613 struct pagevec pvec;
614 pgoff_t index;
615 int i;
616 int ret = 0;
617 int ret2 = 0;
618 int did_range_unmap = 0;
619
620 if (mapping_empty(mapping))
621 goto out;
622
623 pagevec_init(&pvec);
624 index = start;
625 while (find_get_entries(mapping, index, end, &pvec, indices)) {
626 for (i = 0; i < pagevec_count(&pvec); i++) {
627 struct page *page = pvec.pages[i];
628
629
630 index = indices[i];
631
632 if (xa_is_value(page)) {
633 if (!invalidate_exceptional_entry2(mapping,
634 index, page))
635 ret = -EBUSY;
636 continue;
637 }
638
639 if (!did_range_unmap && page_mapped(page)) {
640
641
642
643
644 unmap_mapping_pages(mapping, index,
645 (1 + end - index), false);
646 did_range_unmap = 1;
647 }
648
649 lock_page(page);
650 WARN_ON(page_to_index(page) != index);
651 if (page->mapping != mapping) {
652 unlock_page(page);
653 continue;
654 }
655 wait_on_page_writeback(page);
656
657 if (page_mapped(page))
658 unmap_mapping_page(page);
659 BUG_ON(page_mapped(page));
660
661 ret2 = do_launder_page(mapping, page);
662 if (ret2 == 0) {
663 if (!invalidate_complete_page2(mapping, page))
664 ret2 = -EBUSY;
665 }
666 if (ret2 < 0)
667 ret = ret2;
668 unlock_page(page);
669 }
670 pagevec_remove_exceptionals(&pvec);
671 pagevec_release(&pvec);
672 cond_resched();
673 index++;
674 }
675
676
677
678
679
680
681
682 if (dax_mapping(mapping)) {
683 unmap_mapping_pages(mapping, start, end - start + 1, false);
684 }
685out:
686 cleancache_invalidate_inode(mapping);
687 return ret;
688}
689EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
690
691
692
693
694
695
696
697
698
699
700int invalidate_inode_pages2(struct address_space *mapping)
701{
702 return invalidate_inode_pages2_range(mapping, 0, -1);
703}
704EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721void truncate_pagecache(struct inode *inode, loff_t newsize)
722{
723 struct address_space *mapping = inode->i_mapping;
724 loff_t holebegin = round_up(newsize, PAGE_SIZE);
725
726
727
728
729
730
731
732
733
734
735 unmap_mapping_range(mapping, holebegin, 0, 1);
736 truncate_inode_pages(mapping, newsize);
737 unmap_mapping_range(mapping, holebegin, 0, 1);
738}
739EXPORT_SYMBOL(truncate_pagecache);
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754void truncate_setsize(struct inode *inode, loff_t newsize)
755{
756 loff_t oldsize = inode->i_size;
757
758 i_size_write(inode, newsize);
759 if (newsize > oldsize)
760 pagecache_isize_extended(inode, oldsize, newsize);
761 truncate_pagecache(inode, newsize);
762}
763EXPORT_SYMBOL(truncate_setsize);
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
785{
786 int bsize = i_blocksize(inode);
787 loff_t rounded_from;
788 struct page *page;
789 pgoff_t index;
790
791 WARN_ON(to > inode->i_size);
792
793 if (from >= to || bsize == PAGE_SIZE)
794 return;
795
796 rounded_from = round_up(from, bsize);
797 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
798 return;
799
800 index = from >> PAGE_SHIFT;
801 page = find_lock_page(inode->i_mapping, index);
802
803 if (!page)
804 return;
805
806
807
808
809 if (page_mkclean(page))
810 set_page_dirty(page);
811 unlock_page(page);
812 put_page(page);
813}
814EXPORT_SYMBOL(pagecache_isize_extended);
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
830{
831 struct address_space *mapping = inode->i_mapping;
832 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
833 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
834
835
836
837
838
839
840
841
842
843
844
845
846
847 if ((u64)unmap_end > (u64)unmap_start)
848 unmap_mapping_range(mapping, unmap_start,
849 1 + unmap_end - unmap_start, 0);
850 truncate_inode_pages_range(mapping, lstart, lend);
851}
852EXPORT_SYMBOL(truncate_pagecache_range);
853