1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/gfp.h>
13#include <linux/mm.h>
14#include <linux/swap.h>
15#include <linux/export.h>
16#include <linux/pagemap.h>
17#include <linux/highmem.h>
18#include <linux/pagevec.h>
19#include <linux/task_io_accounting_ops.h>
20#include <linux/buffer_head.h>
21
22#include <linux/cleancache.h>
23#include "internal.h"
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41void do_invalidatepage(struct page *page, unsigned int offset,
42 unsigned int length)
43{
44 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
45
46 invalidatepage = page->mapping->a_ops->invalidatepage;
47#ifdef CONFIG_BLOCK
48 if (!invalidatepage)
49 invalidatepage = block_invalidatepage;
50#endif
51 if (invalidatepage)
52 (*invalidatepage)(page, offset, length);
53}
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69void cancel_dirty_page(struct page *page, unsigned int account_size)
70{
71 if (TestClearPageDirty(page)) {
72 struct address_space *mapping = page->mapping;
73 if (mapping && mapping_cap_account_dirty(mapping)) {
74 dec_zone_page_state(page, NR_FILE_DIRTY);
75 dec_bdi_stat(mapping->backing_dev_info,
76 BDI_RECLAIMABLE);
77 if (account_size)
78 task_io_account_cancelled_write(account_size);
79 }
80 }
81}
82EXPORT_SYMBOL(cancel_dirty_page);
83
84
85
86
87
88
89
90
91
92
93
94static int
95truncate_complete_page(struct address_space *mapping, struct page *page)
96{
97 if (page->mapping != mapping)
98 return -EIO;
99
100 if (page_has_private(page))
101 do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
102
103 cancel_dirty_page(page, PAGE_CACHE_SIZE);
104
105 ClearPageMappedToDisk(page);
106 delete_from_page_cache(page);
107 return 0;
108}
109
110
111
112
113
114
115
116
117
118static int
119invalidate_complete_page(struct address_space *mapping, struct page *page)
120{
121 int ret;
122
123 if (page->mapping != mapping)
124 return 0;
125
126 if (page_has_private(page) && !try_to_release_page(page, 0))
127 return 0;
128
129 ret = remove_mapping(mapping, page);
130
131 return ret;
132}
133
134int truncate_inode_page(struct address_space *mapping, struct page *page)
135{
136 if (page_mapped(page)) {
137 unmap_mapping_range(mapping,
138 (loff_t)page->index << PAGE_CACHE_SHIFT,
139 PAGE_CACHE_SIZE, 0);
140 }
141 return truncate_complete_page(mapping, page);
142}
143
144
145
146
147int generic_error_remove_page(struct address_space *mapping, struct page *page)
148{
149 if (!mapping)
150 return -EINVAL;
151
152
153
154
155 if (!S_ISREG(mapping->host->i_mode))
156 return -EIO;
157 return truncate_inode_page(mapping, page);
158}
159EXPORT_SYMBOL(generic_error_remove_page);
160
161
162
163
164
165
166
167int invalidate_inode_page(struct page *page)
168{
169 struct address_space *mapping = page_mapping(page);
170 if (!mapping)
171 return 0;
172 if (PageDirty(page) || PageWriteback(page))
173 return 0;
174 if (page_mapped(page))
175 return 0;
176 return invalidate_complete_page(mapping, page);
177}
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203void truncate_inode_pages_range(struct address_space *mapping,
204 loff_t lstart, loff_t lend)
205{
206 pgoff_t start;
207 pgoff_t end;
208 unsigned int partial_start;
209 unsigned int partial_end;
210 struct pagevec pvec;
211 pgoff_t index;
212 int i;
213
214 cleancache_invalidate_inode(mapping);
215 if (mapping->nrpages == 0)
216 return;
217
218
219 partial_start = lstart & (PAGE_CACHE_SIZE - 1);
220 partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
221
222
223
224
225
226
227
228 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
229 if (lend == -1)
230
231
232
233
234
235 end = -1;
236 else
237 end = (lend + 1) >> PAGE_CACHE_SHIFT;
238
239 pagevec_init(&pvec, 0);
240 index = start;
241 while (index < end && pagevec_lookup(&pvec, mapping, index,
242 min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
243 mem_cgroup_uncharge_start();
244 for (i = 0; i < pagevec_count(&pvec); i++) {
245 struct page *page = pvec.pages[i];
246
247
248 index = page->index;
249 if (index >= end)
250 break;
251
252 if (!trylock_page(page))
253 continue;
254 WARN_ON(page->index != index);
255 if (PageWriteback(page)) {
256 unlock_page(page);
257 continue;
258 }
259 truncate_inode_page(mapping, page);
260 unlock_page(page);
261 }
262 pagevec_release(&pvec);
263 mem_cgroup_uncharge_end();
264 cond_resched();
265 index++;
266 }
267
268 if (partial_start) {
269 struct page *page = find_lock_page(mapping, start - 1);
270 if (page) {
271 unsigned int top = PAGE_CACHE_SIZE;
272 if (start > end) {
273
274 top = partial_end;
275 partial_end = 0;
276 }
277 wait_on_page_writeback(page);
278 zero_user_segment(page, partial_start, top);
279 cleancache_invalidate_page(mapping, page);
280 if (page_has_private(page))
281 do_invalidatepage(page, partial_start,
282 top - partial_start);
283 unlock_page(page);
284 page_cache_release(page);
285 }
286 }
287 if (partial_end) {
288 struct page *page = find_lock_page(mapping, end);
289 if (page) {
290 wait_on_page_writeback(page);
291 zero_user_segment(page, 0, partial_end);
292 cleancache_invalidate_page(mapping, page);
293 if (page_has_private(page))
294 do_invalidatepage(page, 0,
295 partial_end);
296 unlock_page(page);
297 page_cache_release(page);
298 }
299 }
300
301
302
303
304 if (start >= end)
305 return;
306
307 index = start;
308 for ( ; ; ) {
309 cond_resched();
310 if (!pagevec_lookup(&pvec, mapping, index,
311 min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
312 if (index == start)
313 break;
314 index = start;
315 continue;
316 }
317 if (index == start && pvec.pages[0]->index >= end) {
318 pagevec_release(&pvec);
319 break;
320 }
321 mem_cgroup_uncharge_start();
322 for (i = 0; i < pagevec_count(&pvec); i++) {
323 struct page *page = pvec.pages[i];
324
325
326 index = page->index;
327 if (index >= end)
328 break;
329
330 lock_page(page);
331 WARN_ON(page->index != index);
332 wait_on_page_writeback(page);
333 truncate_inode_page(mapping, page);
334 unlock_page(page);
335 }
336 pagevec_release(&pvec);
337 mem_cgroup_uncharge_end();
338 index++;
339 }
340 cleancache_invalidate_inode(mapping);
341}
342EXPORT_SYMBOL(truncate_inode_pages_range);
343
344
345
346
347
348
349
350
351
352
353
354
355
356void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
357{
358 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
359}
360EXPORT_SYMBOL(truncate_inode_pages);
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375unsigned long invalidate_mapping_pages(struct address_space *mapping,
376 pgoff_t start, pgoff_t end)
377{
378 struct pagevec pvec;
379 pgoff_t index = start;
380 unsigned long ret;
381 unsigned long count = 0;
382 int i;
383
384
385
386
387
388
389
390
391
392 pagevec_init(&pvec, 0);
393 while (index <= end && pagevec_lookup(&pvec, mapping, index,
394 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
395 mem_cgroup_uncharge_start();
396 for (i = 0; i < pagevec_count(&pvec); i++) {
397 struct page *page = pvec.pages[i];
398
399
400 index = page->index;
401 if (index > end)
402 break;
403
404 if (!trylock_page(page))
405 continue;
406 WARN_ON(page->index != index);
407 ret = invalidate_inode_page(page);
408 unlock_page(page);
409
410
411
412
413 if (!ret)
414 deactivate_page(page);
415 count += ret;
416 }
417 pagevec_release(&pvec);
418 mem_cgroup_uncharge_end();
419 cond_resched();
420 index++;
421 }
422 return count;
423}
424EXPORT_SYMBOL(invalidate_mapping_pages);
425
426
427
428
429
430
431
432
433static int
434invalidate_complete_page2(struct address_space *mapping, struct page *page)
435{
436 if (page->mapping != mapping)
437 return 0;
438
439 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
440 return 0;
441
442 spin_lock_irq(&mapping->tree_lock);
443 if (PageDirty(page))
444 goto failed;
445
446 BUG_ON(page_has_private(page));
447 __delete_from_page_cache(page);
448 spin_unlock_irq(&mapping->tree_lock);
449 mem_cgroup_uncharge_cache_page(page);
450
451 if (mapping->a_ops->freepage)
452 mapping->a_ops->freepage(page);
453
454 page_cache_release(page);
455 return 1;
456failed:
457 spin_unlock_irq(&mapping->tree_lock);
458 return 0;
459}
460
461static int do_launder_page(struct address_space *mapping, struct page *page)
462{
463 if (!PageDirty(page))
464 return 0;
465 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
466 return 0;
467 return mapping->a_ops->launder_page(page);
468}
469
470
471
472
473
474
475
476
477
478
479
480
481int invalidate_inode_pages2_range(struct address_space *mapping,
482 pgoff_t start, pgoff_t end)
483{
484 struct pagevec pvec;
485 pgoff_t index;
486 int i;
487 int ret = 0;
488 int ret2 = 0;
489 int did_range_unmap = 0;
490
491 cleancache_invalidate_inode(mapping);
492 pagevec_init(&pvec, 0);
493 index = start;
494 while (index <= end && pagevec_lookup(&pvec, mapping, index,
495 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
496 mem_cgroup_uncharge_start();
497 for (i = 0; i < pagevec_count(&pvec); i++) {
498 struct page *page = pvec.pages[i];
499
500
501 index = page->index;
502 if (index > end)
503 break;
504
505 lock_page(page);
506 WARN_ON(page->index != index);
507 if (page->mapping != mapping) {
508 unlock_page(page);
509 continue;
510 }
511 wait_on_page_writeback(page);
512 if (page_mapped(page)) {
513 if (!did_range_unmap) {
514
515
516
517 unmap_mapping_range(mapping,
518 (loff_t)index << PAGE_CACHE_SHIFT,
519 (loff_t)(1 + end - index)
520 << PAGE_CACHE_SHIFT,
521 0);
522 did_range_unmap = 1;
523 } else {
524
525
526
527 unmap_mapping_range(mapping,
528 (loff_t)index << PAGE_CACHE_SHIFT,
529 PAGE_CACHE_SIZE, 0);
530 }
531 }
532 BUG_ON(page_mapped(page));
533 ret2 = do_launder_page(mapping, page);
534 if (ret2 == 0) {
535 if (!invalidate_complete_page2(mapping, page))
536 ret2 = -EBUSY;
537 }
538 if (ret2 < 0)
539 ret = ret2;
540 unlock_page(page);
541 }
542 pagevec_release(&pvec);
543 mem_cgroup_uncharge_end();
544 cond_resched();
545 index++;
546 }
547 cleancache_invalidate_inode(mapping);
548 return ret;
549}
550EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
551
552
553
554
555
556
557
558
559
560
561int invalidate_inode_pages2(struct address_space *mapping)
562{
563 return invalidate_inode_pages2_range(mapping, 0, -1);
564}
565EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582void truncate_pagecache(struct inode *inode, loff_t newsize)
583{
584 struct address_space *mapping = inode->i_mapping;
585 loff_t holebegin = round_up(newsize, PAGE_SIZE);
586
587
588
589
590
591
592
593
594
595
596 unmap_mapping_range(mapping, holebegin, 0, 1);
597 truncate_inode_pages(mapping, newsize);
598 unmap_mapping_range(mapping, holebegin, 0, 1);
599}
600EXPORT_SYMBOL(truncate_pagecache);
601
602
603
604
605
606
607
608
609
610
611
612
613
614void truncate_setsize(struct inode *inode, loff_t newsize)
615{
616 i_size_write(inode, newsize);
617 truncate_pagecache(inode, newsize);
618}
619EXPORT_SYMBOL(truncate_setsize);
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
635{
636 struct address_space *mapping = inode->i_mapping;
637 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
638 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
639
640
641
642
643
644
645
646
647
648
649
650
651
652 if ((u64)unmap_end > (u64)unmap_start)
653 unmap_mapping_range(mapping, unmap_start,
654 1 + unmap_end - unmap_start, 0);
655 truncate_inode_pages_range(mapping, lstart, lend);
656}
657EXPORT_SYMBOL(truncate_pagecache_range);
658