1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/gfp.h>
13#include <linux/mm.h>
14#include <linux/swap.h>
15#include <linux/export.h>
16#include <linux/pagemap.h>
17#include <linux/highmem.h>
18#include <linux/pagevec.h>
19#include <linux/task_io_accounting_ops.h>
20#include <linux/buffer_head.h>
21
22#include <linux/cleancache.h>
23#include "internal.h"
24
25static void clear_exceptional_entry(struct address_space *mapping,
26 pgoff_t index, void *entry)
27{
28 struct radix_tree_node *node;
29 void **slot;
30
31
32 if (shmem_mapping(mapping))
33 return;
34
35 spin_lock_irq(&mapping->tree_lock);
36
37
38
39
40
41 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
42 goto unlock;
43 if (*slot != entry)
44 goto unlock;
45 radix_tree_replace_slot(slot, NULL);
46 mapping->nrshadows--;
47 if (!node)
48 goto unlock;
49 workingset_node_shadows_dec(node);
50
51
52
53
54
55
56
57 if (!workingset_node_shadows(node) &&
58 !list_empty(&node->private_list))
59 list_lru_del(&workingset_shadow_nodes, &node->private_list);
60 __radix_tree_delete_node(&mapping->page_tree, node);
61unlock:
62 spin_unlock_irq(&mapping->tree_lock);
63}
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80void do_invalidatepage(struct page *page, unsigned int offset,
81 unsigned int length)
82{
83 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
84
85 invalidatepage = page->mapping->a_ops->invalidatepage;
86#ifdef CONFIG_BLOCK
87 if (!invalidatepage)
88 invalidatepage = block_invalidatepage;
89#endif
90 if (invalidatepage)
91 (*invalidatepage)(page, offset, length);
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108void cancel_dirty_page(struct page *page, unsigned int account_size)
109{
110 if (TestClearPageDirty(page)) {
111 struct address_space *mapping = page->mapping;
112 if (mapping && mapping_cap_account_dirty(mapping)) {
113 dec_zone_page_state(page, NR_FILE_DIRTY);
114 dec_bdi_stat(mapping->backing_dev_info,
115 BDI_RECLAIMABLE);
116 if (account_size)
117 task_io_account_cancelled_write(account_size);
118 }
119 }
120}
121EXPORT_SYMBOL(cancel_dirty_page);
122
123
124
125
126
127
128
129
130
131
132
133static int
134truncate_complete_page(struct address_space *mapping, struct page *page)
135{
136 if (page->mapping != mapping)
137 return -EIO;
138
139 if (page_has_private(page))
140 do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
141
142 cancel_dirty_page(page, PAGE_CACHE_SIZE);
143
144 ClearPageMappedToDisk(page);
145 delete_from_page_cache(page);
146 return 0;
147}
148
149
150
151
152
153
154
155
156
157static int
158invalidate_complete_page(struct address_space *mapping, struct page *page)
159{
160 int ret;
161
162 if (page->mapping != mapping)
163 return 0;
164
165 if (page_has_private(page) && !try_to_release_page(page, 0))
166 return 0;
167
168 ret = remove_mapping(mapping, page);
169
170 return ret;
171}
172
173int truncate_inode_page(struct address_space *mapping, struct page *page)
174{
175 if (page_mapped(page)) {
176 unmap_mapping_range(mapping,
177 (loff_t)page->index << PAGE_CACHE_SHIFT,
178 PAGE_CACHE_SIZE, 0);
179 }
180 return truncate_complete_page(mapping, page);
181}
182
183
184
185
186int generic_error_remove_page(struct address_space *mapping, struct page *page)
187{
188 if (!mapping)
189 return -EINVAL;
190
191
192
193
194 if (!S_ISREG(mapping->host->i_mode))
195 return -EIO;
196 return truncate_inode_page(mapping, page);
197}
198EXPORT_SYMBOL(generic_error_remove_page);
199
200
201
202
203
204
205
206int invalidate_inode_page(struct page *page)
207{
208 struct address_space *mapping = page_mapping(page);
209 if (!mapping)
210 return 0;
211 if (PageDirty(page) || PageWriteback(page))
212 return 0;
213 if (page_mapped(page))
214 return 0;
215 return invalidate_complete_page(mapping, page);
216}
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242void truncate_inode_pages_range(struct address_space *mapping,
243 loff_t lstart, loff_t lend)
244{
245 pgoff_t start;
246 pgoff_t end;
247 unsigned int partial_start;
248 unsigned int partial_end;
249 struct pagevec pvec;
250 pgoff_t indices[PAGEVEC_SIZE];
251 pgoff_t index;
252 int i;
253
254 cleancache_invalidate_inode(mapping);
255 if (mapping->nrpages == 0 && mapping->nrshadows == 0)
256 return;
257
258
259 partial_start = lstart & (PAGE_CACHE_SIZE - 1);
260 partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
261
262
263
264
265
266
267
268 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
269 if (lend == -1)
270
271
272
273
274
275 end = -1;
276 else
277 end = (lend + 1) >> PAGE_CACHE_SHIFT;
278
279 pagevec_init(&pvec, 0);
280 index = start;
281 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
282 min(end - index, (pgoff_t)PAGEVEC_SIZE),
283 indices)) {
284 mem_cgroup_uncharge_start();
285 for (i = 0; i < pagevec_count(&pvec); i++) {
286 struct page *page = pvec.pages[i];
287
288
289 index = indices[i];
290 if (index >= end)
291 break;
292
293 if (radix_tree_exceptional_entry(page)) {
294 clear_exceptional_entry(mapping, index, page);
295 continue;
296 }
297
298 if (!trylock_page(page))
299 continue;
300 WARN_ON(page->index != index);
301 if (PageWriteback(page)) {
302 unlock_page(page);
303 continue;
304 }
305 truncate_inode_page(mapping, page);
306 unlock_page(page);
307 }
308 pagevec_remove_exceptionals(&pvec);
309 pagevec_release(&pvec);
310 mem_cgroup_uncharge_end();
311 cond_resched();
312 index++;
313 }
314
315 if (partial_start) {
316 struct page *page = find_lock_page(mapping, start - 1);
317 if (page) {
318 unsigned int top = PAGE_CACHE_SIZE;
319 if (start > end) {
320
321 top = partial_end;
322 partial_end = 0;
323 }
324 wait_on_page_writeback(page);
325 zero_user_segment(page, partial_start, top);
326 cleancache_invalidate_page(mapping, page);
327 if (page_has_private(page))
328 do_invalidatepage(page, partial_start,
329 top - partial_start);
330 unlock_page(page);
331 page_cache_release(page);
332 }
333 }
334 if (partial_end) {
335 struct page *page = find_lock_page(mapping, end);
336 if (page) {
337 wait_on_page_writeback(page);
338 zero_user_segment(page, 0, partial_end);
339 cleancache_invalidate_page(mapping, page);
340 if (page_has_private(page))
341 do_invalidatepage(page, 0,
342 partial_end);
343 unlock_page(page);
344 page_cache_release(page);
345 }
346 }
347
348
349
350
351 if (start >= end)
352 return;
353
354 index = start;
355 for ( ; ; ) {
356 cond_resched();
357 if (!pagevec_lookup_entries(&pvec, mapping, index,
358 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
359
360 if (index == start)
361 break;
362
363 index = start;
364 continue;
365 }
366 if (index == start && indices[0] >= end) {
367
368 pagevec_remove_exceptionals(&pvec);
369 pagevec_release(&pvec);
370 break;
371 }
372 mem_cgroup_uncharge_start();
373 for (i = 0; i < pagevec_count(&pvec); i++) {
374 struct page *page = pvec.pages[i];
375
376
377 index = indices[i];
378 if (index >= end) {
379
380 index = start - 1;
381 break;
382 }
383
384 if (radix_tree_exceptional_entry(page)) {
385 clear_exceptional_entry(mapping, index, page);
386 continue;
387 }
388
389 lock_page(page);
390 WARN_ON(page->index != index);
391 wait_on_page_writeback(page);
392 truncate_inode_page(mapping, page);
393 unlock_page(page);
394 }
395 pagevec_remove_exceptionals(&pvec);
396 pagevec_release(&pvec);
397 mem_cgroup_uncharge_end();
398 index++;
399 }
400 cleancache_invalidate_inode(mapping);
401}
402EXPORT_SYMBOL(truncate_inode_pages_range);
403
404
405
406
407
408
409
410
411
412
413
414
415
416void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
417{
418 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
419}
420EXPORT_SYMBOL(truncate_inode_pages);
421
422
423
424
425
426
427
428
429
430
431void truncate_inode_pages_final(struct address_space *mapping)
432{
433 unsigned long nrshadows;
434 unsigned long nrpages;
435
436
437
438
439
440
441
442
443 mapping_set_exiting(mapping);
444
445
446
447
448
449
450 nrpages = mapping->nrpages;
451 smp_rmb();
452 nrshadows = mapping->nrshadows;
453
454 if (nrpages || nrshadows) {
455
456
457
458
459
460
461 spin_lock_irq(&mapping->tree_lock);
462 spin_unlock_irq(&mapping->tree_lock);
463
464 truncate_inode_pages(mapping, 0);
465 }
466}
467EXPORT_SYMBOL(truncate_inode_pages_final);
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482unsigned long invalidate_mapping_pages(struct address_space *mapping,
483 pgoff_t start, pgoff_t end)
484{
485 pgoff_t indices[PAGEVEC_SIZE];
486 struct pagevec pvec;
487 pgoff_t index = start;
488 unsigned long ret;
489 unsigned long count = 0;
490 int i;
491
492 pagevec_init(&pvec, 0);
493 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
494 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
495 indices)) {
496 mem_cgroup_uncharge_start();
497 for (i = 0; i < pagevec_count(&pvec); i++) {
498 struct page *page = pvec.pages[i];
499
500
501 index = indices[i];
502 if (index > end)
503 break;
504
505 if (radix_tree_exceptional_entry(page)) {
506 clear_exceptional_entry(mapping, index, page);
507 continue;
508 }
509
510 if (!trylock_page(page))
511 continue;
512 WARN_ON(page->index != index);
513 ret = invalidate_inode_page(page);
514 unlock_page(page);
515
516
517
518
519 if (!ret)
520 deactivate_page(page);
521 count += ret;
522 }
523 pagevec_remove_exceptionals(&pvec);
524 pagevec_release(&pvec);
525 mem_cgroup_uncharge_end();
526 cond_resched();
527 index++;
528 }
529 return count;
530}
531EXPORT_SYMBOL(invalidate_mapping_pages);
532
533
534
535
536
537
538
539
540static int
541invalidate_complete_page2(struct address_space *mapping, struct page *page)
542{
543 if (page->mapping != mapping)
544 return 0;
545
546 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
547 return 0;
548
549 spin_lock_irq(&mapping->tree_lock);
550 if (PageDirty(page))
551 goto failed;
552
553 BUG_ON(page_has_private(page));
554 __delete_from_page_cache(page, NULL);
555 spin_unlock_irq(&mapping->tree_lock);
556 mem_cgroup_uncharge_cache_page(page);
557
558 if (mapping->a_ops->freepage)
559 mapping->a_ops->freepage(page);
560
561 page_cache_release(page);
562 return 1;
563failed:
564 spin_unlock_irq(&mapping->tree_lock);
565 return 0;
566}
567
568static int do_launder_page(struct address_space *mapping, struct page *page)
569{
570 if (!PageDirty(page))
571 return 0;
572 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
573 return 0;
574 return mapping->a_ops->launder_page(page);
575}
576
577
578
579
580
581
582
583
584
585
586
587
588int invalidate_inode_pages2_range(struct address_space *mapping,
589 pgoff_t start, pgoff_t end)
590{
591 pgoff_t indices[PAGEVEC_SIZE];
592 struct pagevec pvec;
593 pgoff_t index;
594 int i;
595 int ret = 0;
596 int ret2 = 0;
597 int did_range_unmap = 0;
598
599 cleancache_invalidate_inode(mapping);
600 pagevec_init(&pvec, 0);
601 index = start;
602 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
603 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
604 indices)) {
605 mem_cgroup_uncharge_start();
606 for (i = 0; i < pagevec_count(&pvec); i++) {
607 struct page *page = pvec.pages[i];
608
609
610 index = indices[i];
611 if (index > end)
612 break;
613
614 if (radix_tree_exceptional_entry(page)) {
615 clear_exceptional_entry(mapping, index, page);
616 continue;
617 }
618
619 lock_page(page);
620 WARN_ON(page->index != index);
621 if (page->mapping != mapping) {
622 unlock_page(page);
623 continue;
624 }
625 wait_on_page_writeback(page);
626 if (page_mapped(page)) {
627 if (!did_range_unmap) {
628
629
630
631 unmap_mapping_range(mapping,
632 (loff_t)index << PAGE_CACHE_SHIFT,
633 (loff_t)(1 + end - index)
634 << PAGE_CACHE_SHIFT,
635 0);
636 did_range_unmap = 1;
637 } else {
638
639
640
641 unmap_mapping_range(mapping,
642 (loff_t)index << PAGE_CACHE_SHIFT,
643 PAGE_CACHE_SIZE, 0);
644 }
645 }
646 BUG_ON(page_mapped(page));
647 ret2 = do_launder_page(mapping, page);
648 if (ret2 == 0) {
649 if (!invalidate_complete_page2(mapping, page))
650 ret2 = -EBUSY;
651 }
652 if (ret2 < 0)
653 ret = ret2;
654 unlock_page(page);
655 }
656 pagevec_remove_exceptionals(&pvec);
657 pagevec_release(&pvec);
658 mem_cgroup_uncharge_end();
659 cond_resched();
660 index++;
661 }
662 cleancache_invalidate_inode(mapping);
663 return ret;
664}
665EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
666
667
668
669
670
671
672
673
674
675
676int invalidate_inode_pages2(struct address_space *mapping)
677{
678 return invalidate_inode_pages2_range(mapping, 0, -1);
679}
680EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697void truncate_pagecache(struct inode *inode, loff_t newsize)
698{
699 struct address_space *mapping = inode->i_mapping;
700 loff_t holebegin = round_up(newsize, PAGE_SIZE);
701
702
703
704
705
706
707
708
709
710
711 unmap_mapping_range(mapping, holebegin, 0, 1);
712 truncate_inode_pages(mapping, newsize);
713 unmap_mapping_range(mapping, holebegin, 0, 1);
714}
715EXPORT_SYMBOL(truncate_pagecache);
716
717
718
719
720
721
722
723
724
725
726
727
728
729void truncate_setsize(struct inode *inode, loff_t newsize)
730{
731 i_size_write(inode, newsize);
732 truncate_pagecache(inode, newsize);
733}
734EXPORT_SYMBOL(truncate_setsize);
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
750{
751 struct address_space *mapping = inode->i_mapping;
752 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
753 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
754
755
756
757
758
759
760
761
762
763
764
765
766
767 if ((u64)unmap_end > (u64)unmap_start)
768 unmap_mapping_range(mapping, unmap_start,
769 1 + unmap_end - unmap_start, 0);
770 truncate_inode_pages_range(mapping, lstart, lend);
771}
772EXPORT_SYMBOL(truncate_pagecache_range);
773