1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
24#include <linux/export.h>
25#include <linux/mm_inline.h>
26#include <linux/percpu_counter.h>
27#include <linux/percpu.h>
28#include <linux/cpu.h>
29#include <linux/notifier.h>
30#include <linux/backing-dev.h>
31#include <linux/memcontrol.h>
32#include <linux/gfp.h>
33#include <linux/uio.h>
34#include <linux/hugetlb.h>
35#include <linux/page_idle.h>
36
37#include "internal.h"
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/pagemap.h>
41
42
43int page_cluster;
44
45static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
46static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
47static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
48
49
50
51
52
53static void __page_cache_release(struct page *page)
54{
55 if (PageLRU(page)) {
56 struct zone *zone = page_zone(page);
57 struct lruvec *lruvec;
58 unsigned long flags;
59
60 spin_lock_irqsave(&zone->lru_lock, flags);
61 lruvec = mem_cgroup_page_lruvec(page, zone);
62 VM_BUG_ON_PAGE(!PageLRU(page), page);
63 __ClearPageLRU(page);
64 del_page_from_lru_list(page, lruvec, page_off_lru(page));
65 spin_unlock_irqrestore(&zone->lru_lock, flags);
66 }
67 mem_cgroup_uncharge(page);
68}
69
70static void __put_single_page(struct page *page)
71{
72 __page_cache_release(page);
73 free_hot_cold_page(page, false);
74}
75
76static void __put_compound_page(struct page *page)
77{
78 compound_page_dtor *dtor;
79
80
81
82
83
84
85
86 if (!PageHuge(page))
87 __page_cache_release(page);
88 dtor = get_compound_page_dtor(page);
89 (*dtor)(page);
90}
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118static __always_inline
119void put_unrefcounted_compound_page(struct page *page_head, struct page *page)
120{
121
122
123
124
125
126
127
128 smp_rmb();
129 if (likely(PageTail(page))) {
130
131
132
133
134 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
135 if (put_page_testzero(page_head)) {
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150 VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
151 __put_compound_page(page_head);
152 }
153 } else
154
155
156
157
158
159
160
161 if (put_page_testzero(page))
162 __put_single_page(page);
163}
164
165static __always_inline
166void put_refcounted_compound_page(struct page *page_head, struct page *page)
167{
168 if (likely(page != page_head && get_page_unless_zero(page_head))) {
169 unsigned long flags;
170
171
172
173
174
175
176
177 flags = compound_lock_irqsave(page_head);
178 if (unlikely(!PageTail(page))) {
179
180 compound_unlock_irqrestore(page_head, flags);
181 if (put_page_testzero(page_head)) {
182
183
184
185
186
187
188
189
190
191
192
193
194 if (PageHead(page_head))
195 __put_compound_page(page_head);
196 else
197 __put_single_page(page_head);
198 }
199out_put_single:
200 if (put_page_testzero(page))
201 __put_single_page(page);
202 return;
203 }
204 VM_BUG_ON_PAGE(page_head != page->first_page, page);
205
206
207
208
209
210
211 if (put_page_testzero(page_head))
212 VM_BUG_ON_PAGE(1, page_head);
213
214 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
215 atomic_dec(&page->_mapcount);
216 VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
217 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
218 compound_unlock_irqrestore(page_head, flags);
219
220 if (put_page_testzero(page_head)) {
221 if (PageHead(page_head))
222 __put_compound_page(page_head);
223 else
224 __put_single_page(page_head);
225 }
226 } else {
227
228 VM_BUG_ON_PAGE(PageTail(page), page);
229 goto out_put_single;
230 }
231}
232
233static void put_compound_page(struct page *page)
234{
235 struct page *page_head;
236
237
238
239
240
241
242 if (likely(!PageTail(page))) {
243 if (put_page_testzero(page)) {
244
245
246
247
248 if (PageHead(page))
249 __put_compound_page(page);
250 else
251 __put_single_page(page);
252 }
253 return;
254 }
255
256
257
258
259
260
261
262
263
264
265 page_head = compound_head_by_tail(page);
266 if (!__compound_tail_refcounted(page_head))
267 put_unrefcounted_compound_page(page_head, page);
268 else
269 put_refcounted_compound_page(page_head, page);
270}
271
272void put_page(struct page *page)
273{
274 if (unlikely(PageCompound(page)))
275 put_compound_page(page);
276 else if (put_page_testzero(page))
277 __put_single_page(page);
278}
279EXPORT_SYMBOL(put_page);
280
281
282
283
284
285bool __get_page_tail(struct page *page)
286{
287
288
289
290
291
292
293
294
295 unsigned long flags;
296 bool got;
297 struct page *page_head = compound_head(page);
298
299
300 if (!__compound_tail_refcounted(page_head)) {
301 smp_rmb();
302 if (likely(PageTail(page))) {
303
304
305
306
307
308 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
309 __get_page_tail_foll(page, true);
310 return true;
311 } else {
312
313
314
315
316
317
318
319
320
321 return false;
322 }
323 }
324
325 got = false;
326 if (likely(page != page_head && get_page_unless_zero(page_head))) {
327
328
329
330
331
332
333 flags = compound_lock_irqsave(page_head);
334
335 if (likely(PageTail(page))) {
336 __get_page_tail_foll(page, false);
337 got = true;
338 }
339 compound_unlock_irqrestore(page_head, flags);
340 if (unlikely(!got))
341 put_page(page_head);
342 }
343 return got;
344}
345EXPORT_SYMBOL(__get_page_tail);
346
347
348
349
350
351
352
353
354void put_pages_list(struct list_head *pages)
355{
356 while (!list_empty(pages)) {
357 struct page *victim;
358
359 victim = list_entry(pages->prev, struct page, lru);
360 list_del(&victim->lru);
361 page_cache_release(victim);
362 }
363}
364EXPORT_SYMBOL(put_pages_list);
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
380 struct page **pages)
381{
382 int seg;
383
384 for (seg = 0; seg < nr_segs; seg++) {
385 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
386 return seg;
387
388 pages[seg] = kmap_to_page(kiov[seg].iov_base);
389 page_cache_get(pages[seg]);
390 }
391
392 return seg;
393}
394EXPORT_SYMBOL_GPL(get_kernel_pages);
395
396
397
398
399
400
401
402
403
404
405
406
407int get_kernel_page(unsigned long start, int write, struct page **pages)
408{
409 const struct kvec kiov = {
410 .iov_base = (void *)start,
411 .iov_len = PAGE_SIZE
412 };
413
414 return get_kernel_pages(&kiov, 1, write, pages);
415}
416EXPORT_SYMBOL_GPL(get_kernel_page);
417
418static void pagevec_lru_move_fn(struct pagevec *pvec,
419 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
420 void *arg)
421{
422 int i;
423 struct zone *zone = NULL;
424 struct lruvec *lruvec;
425 unsigned long flags = 0;
426
427 for (i = 0; i < pagevec_count(pvec); i++) {
428 struct page *page = pvec->pages[i];
429 struct zone *pagezone = page_zone(page);
430
431 if (pagezone != zone) {
432 if (zone)
433 spin_unlock_irqrestore(&zone->lru_lock, flags);
434 zone = pagezone;
435 spin_lock_irqsave(&zone->lru_lock, flags);
436 }
437
438 lruvec = mem_cgroup_page_lruvec(page, zone);
439 (*move_fn)(page, lruvec, arg);
440 }
441 if (zone)
442 spin_unlock_irqrestore(&zone->lru_lock, flags);
443 release_pages(pvec->pages, pvec->nr, pvec->cold);
444 pagevec_reinit(pvec);
445}
446
447static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
448 void *arg)
449{
450 int *pgmoved = arg;
451
452 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
453 enum lru_list lru = page_lru_base_type(page);
454 list_move_tail(&page->lru, &lruvec->lists[lru]);
455 (*pgmoved)++;
456 }
457}
458
459
460
461
462
463static void pagevec_move_tail(struct pagevec *pvec)
464{
465 int pgmoved = 0;
466
467 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
468 __count_vm_events(PGROTATED, pgmoved);
469}
470
471
472
473
474
475
476void rotate_reclaimable_page(struct page *page)
477{
478 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
479 !PageUnevictable(page) && PageLRU(page)) {
480 struct pagevec *pvec;
481 unsigned long flags;
482
483 page_cache_get(page);
484 local_irq_save(flags);
485 pvec = this_cpu_ptr(&lru_rotate_pvecs);
486 if (!pagevec_add(pvec, page))
487 pagevec_move_tail(pvec);
488 local_irq_restore(flags);
489 }
490}
491
492static void update_page_reclaim_stat(struct lruvec *lruvec,
493 int file, int rotated)
494{
495 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
496
497 reclaim_stat->recent_scanned[file]++;
498 if (rotated)
499 reclaim_stat->recent_rotated[file]++;
500}
501
502static void __activate_page(struct page *page, struct lruvec *lruvec,
503 void *arg)
504{
505 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
506 int file = page_is_file_cache(page);
507 int lru = page_lru_base_type(page);
508
509 del_page_from_lru_list(page, lruvec, lru);
510 SetPageActive(page);
511 lru += LRU_ACTIVE;
512 add_page_to_lru_list(page, lruvec, lru);
513 trace_mm_lru_activate(page);
514
515 __count_vm_event(PGACTIVATE);
516 update_page_reclaim_stat(lruvec, file, 1);
517 }
518}
519
520#ifdef CONFIG_SMP
521static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
522
523static void activate_page_drain(int cpu)
524{
525 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
526
527 if (pagevec_count(pvec))
528 pagevec_lru_move_fn(pvec, __activate_page, NULL);
529}
530
531static bool need_activate_page_drain(int cpu)
532{
533 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
534}
535
536void activate_page(struct page *page)
537{
538 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
539 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
540
541 page_cache_get(page);
542 if (!pagevec_add(pvec, page))
543 pagevec_lru_move_fn(pvec, __activate_page, NULL);
544 put_cpu_var(activate_page_pvecs);
545 }
546}
547
548#else
549static inline void activate_page_drain(int cpu)
550{
551}
552
553static bool need_activate_page_drain(int cpu)
554{
555 return false;
556}
557
558void activate_page(struct page *page)
559{
560 struct zone *zone = page_zone(page);
561
562 spin_lock_irq(&zone->lru_lock);
563 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
564 spin_unlock_irq(&zone->lru_lock);
565}
566#endif
567
568static void __lru_cache_activate_page(struct page *page)
569{
570 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
571 int i;
572
573
574
575
576
577
578
579
580
581
582
583 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
584 struct page *pagevec_page = pvec->pages[i];
585
586 if (pagevec_page == page) {
587 SetPageActive(page);
588 break;
589 }
590 }
591
592 put_cpu_var(lru_add_pvec);
593}
594
595
596
597
598
599
600
601
602
603
604
605void mark_page_accessed(struct page *page)
606{
607 if (!PageActive(page) && !PageUnevictable(page) &&
608 PageReferenced(page)) {
609
610
611
612
613
614
615
616 if (PageLRU(page))
617 activate_page(page);
618 else
619 __lru_cache_activate_page(page);
620 ClearPageReferenced(page);
621 if (page_is_file_cache(page))
622 workingset_activation(page);
623 } else if (!PageReferenced(page)) {
624 SetPageReferenced(page);
625 }
626 if (page_is_idle(page))
627 clear_page_idle(page);
628}
629EXPORT_SYMBOL(mark_page_accessed);
630
631static void __lru_cache_add(struct page *page)
632{
633 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
634
635 page_cache_get(page);
636 if (!pagevec_space(pvec))
637 __pagevec_lru_add(pvec);
638 pagevec_add(pvec, page);
639 put_cpu_var(lru_add_pvec);
640}
641
642
643
644
645
646void lru_cache_add_anon(struct page *page)
647{
648 if (PageActive(page))
649 ClearPageActive(page);
650 __lru_cache_add(page);
651}
652
653void lru_cache_add_file(struct page *page)
654{
655 if (PageActive(page))
656 ClearPageActive(page);
657 __lru_cache_add(page);
658}
659EXPORT_SYMBOL(lru_cache_add_file);
660
661
662
663
664
665
666
667
668
669
670void lru_cache_add(struct page *page)
671{
672 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
673 VM_BUG_ON_PAGE(PageLRU(page), page);
674 __lru_cache_add(page);
675}
676
677
678
679
680
681
682
683
684
685
686
687void add_page_to_unevictable_list(struct page *page)
688{
689 struct zone *zone = page_zone(page);
690 struct lruvec *lruvec;
691
692 spin_lock_irq(&zone->lru_lock);
693 lruvec = mem_cgroup_page_lruvec(page, zone);
694 ClearPageActive(page);
695 SetPageUnevictable(page);
696 SetPageLRU(page);
697 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
698 spin_unlock_irq(&zone->lru_lock);
699}
700
701
702
703
704
705
706
707
708
709
710
711void lru_cache_add_active_or_unevictable(struct page *page,
712 struct vm_area_struct *vma)
713{
714 VM_BUG_ON_PAGE(PageLRU(page), page);
715
716 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
717 SetPageActive(page);
718 lru_cache_add(page);
719 return;
720 }
721
722 if (!TestSetPageMlocked(page)) {
723
724
725
726
727
728 __mod_zone_page_state(page_zone(page), NR_MLOCK,
729 hpage_nr_pages(page));
730 count_vm_event(UNEVICTABLE_PGMLOCKED);
731 }
732 add_page_to_unevictable_list(page);
733}
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
757 void *arg)
758{
759 int lru, file;
760 bool active;
761
762 if (!PageLRU(page))
763 return;
764
765 if (PageUnevictable(page))
766 return;
767
768
769 if (page_mapped(page))
770 return;
771
772 active = PageActive(page);
773 file = page_is_file_cache(page);
774 lru = page_lru_base_type(page);
775
776 del_page_from_lru_list(page, lruvec, lru + active);
777 ClearPageActive(page);
778 ClearPageReferenced(page);
779 add_page_to_lru_list(page, lruvec, lru);
780
781 if (PageWriteback(page) || PageDirty(page)) {
782
783
784
785
786
787 SetPageReclaim(page);
788 } else {
789
790
791
792
793 list_move_tail(&page->lru, &lruvec->lists[lru]);
794 __count_vm_event(PGROTATED);
795 }
796
797 if (active)
798 __count_vm_event(PGDEACTIVATE);
799 update_page_reclaim_stat(lruvec, file, 0);
800}
801
802
803
804
805
806
807void lru_add_drain_cpu(int cpu)
808{
809 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
810
811 if (pagevec_count(pvec))
812 __pagevec_lru_add(pvec);
813
814 pvec = &per_cpu(lru_rotate_pvecs, cpu);
815 if (pagevec_count(pvec)) {
816 unsigned long flags;
817
818
819 local_irq_save(flags);
820 pagevec_move_tail(pvec);
821 local_irq_restore(flags);
822 }
823
824 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
825 if (pagevec_count(pvec))
826 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
827
828 activate_page_drain(cpu);
829}
830
831
832
833
834
835
836
837
838
839void deactivate_file_page(struct page *page)
840{
841
842
843
844
845 if (PageUnevictable(page))
846 return;
847
848 if (likely(get_page_unless_zero(page))) {
849 struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
850
851 if (!pagevec_add(pvec, page))
852 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
853 put_cpu_var(lru_deactivate_file_pvecs);
854 }
855}
856
857void lru_add_drain(void)
858{
859 lru_add_drain_cpu(get_cpu());
860 put_cpu();
861}
862
863static void lru_add_drain_per_cpu(struct work_struct *dummy)
864{
865 lru_add_drain();
866}
867
868static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
869
870void lru_add_drain_all(void)
871{
872 static DEFINE_MUTEX(lock);
873 static struct cpumask has_work;
874 int cpu;
875
876 mutex_lock(&lock);
877 get_online_cpus();
878 cpumask_clear(&has_work);
879
880 for_each_online_cpu(cpu) {
881 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
882
883 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
884 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
885 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
886 need_activate_page_drain(cpu)) {
887 INIT_WORK(work, lru_add_drain_per_cpu);
888 schedule_work_on(cpu, work);
889 cpumask_set_cpu(cpu, &has_work);
890 }
891 }
892
893 for_each_cpu(cpu, &has_work)
894 flush_work(&per_cpu(lru_add_drain_work, cpu));
895
896 put_online_cpus();
897 mutex_unlock(&lock);
898}
899
900
901
902
903
904
905
906
907
908
909void release_pages(struct page **pages, int nr, bool cold)
910{
911 int i;
912 LIST_HEAD(pages_to_free);
913 struct zone *zone = NULL;
914 struct lruvec *lruvec;
915 unsigned long uninitialized_var(flags);
916 unsigned int uninitialized_var(lock_batch);
917
918 for (i = 0; i < nr; i++) {
919 struct page *page = pages[i];
920
921 if (unlikely(PageCompound(page))) {
922 if (zone) {
923 spin_unlock_irqrestore(&zone->lru_lock, flags);
924 zone = NULL;
925 }
926 put_compound_page(page);
927 continue;
928 }
929
930
931
932
933
934
935 if (zone && ++lock_batch == SWAP_CLUSTER_MAX) {
936 spin_unlock_irqrestore(&zone->lru_lock, flags);
937 zone = NULL;
938 }
939
940 if (!put_page_testzero(page))
941 continue;
942
943 if (PageLRU(page)) {
944 struct zone *pagezone = page_zone(page);
945
946 if (pagezone != zone) {
947 if (zone)
948 spin_unlock_irqrestore(&zone->lru_lock,
949 flags);
950 lock_batch = 0;
951 zone = pagezone;
952 spin_lock_irqsave(&zone->lru_lock, flags);
953 }
954
955 lruvec = mem_cgroup_page_lruvec(page, zone);
956 VM_BUG_ON_PAGE(!PageLRU(page), page);
957 __ClearPageLRU(page);
958 del_page_from_lru_list(page, lruvec, page_off_lru(page));
959 }
960
961
962 __ClearPageActive(page);
963
964 list_add(&page->lru, &pages_to_free);
965 }
966 if (zone)
967 spin_unlock_irqrestore(&zone->lru_lock, flags);
968
969 mem_cgroup_uncharge_list(&pages_to_free);
970 free_hot_cold_page_list(&pages_to_free, cold);
971}
972EXPORT_SYMBOL(release_pages);
973
974
975
976
977
978
979
980
981
982
983
984void __pagevec_release(struct pagevec *pvec)
985{
986 lru_add_drain();
987 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
988 pagevec_reinit(pvec);
989}
990EXPORT_SYMBOL(__pagevec_release);
991
992#ifdef CONFIG_TRANSPARENT_HUGEPAGE
993
994void lru_add_page_tail(struct page *page, struct page *page_tail,
995 struct lruvec *lruvec, struct list_head *list)
996{
997 const int file = 0;
998
999 VM_BUG_ON_PAGE(!PageHead(page), page);
1000 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
1001 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
1002 VM_BUG_ON(NR_CPUS != 1 &&
1003 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
1004
1005 if (!list)
1006 SetPageLRU(page_tail);
1007
1008 if (likely(PageLRU(page)))
1009 list_add_tail(&page_tail->lru, &page->lru);
1010 else if (list) {
1011
1012 get_page(page_tail);
1013 list_add_tail(&page_tail->lru, list);
1014 } else {
1015 struct list_head *list_head;
1016
1017
1018
1019
1020
1021
1022
1023 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
1024 list_head = page_tail->lru.prev;
1025 list_move_tail(&page_tail->lru, list_head);
1026 }
1027
1028 if (!PageUnevictable(page))
1029 update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
1030}
1031#endif
1032
1033static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
1034 void *arg)
1035{
1036 int file = page_is_file_cache(page);
1037 int active = PageActive(page);
1038 enum lru_list lru = page_lru(page);
1039
1040 VM_BUG_ON_PAGE(PageLRU(page), page);
1041
1042 SetPageLRU(page);
1043 add_page_to_lru_list(page, lruvec, lru);
1044 update_page_reclaim_stat(lruvec, file, active);
1045 trace_mm_lru_insertion(page, lru);
1046}
1047
1048
1049
1050
1051
1052void __pagevec_lru_add(struct pagevec *pvec)
1053{
1054 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
1055}
1056EXPORT_SYMBOL(__pagevec_lru_add);
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078unsigned pagevec_lookup_entries(struct pagevec *pvec,
1079 struct address_space *mapping,
1080 pgoff_t start, unsigned nr_pages,
1081 pgoff_t *indices)
1082{
1083 pvec->nr = find_get_entries(mapping, start, nr_pages,
1084 pvec->pages, indices);
1085 return pagevec_count(pvec);
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097void pagevec_remove_exceptionals(struct pagevec *pvec)
1098{
1099 int i, j;
1100
1101 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1102 struct page *page = pvec->pages[i];
1103 if (!radix_tree_exceptional_entry(page))
1104 pvec->pages[j++] = page;
1105 }
1106 pvec->nr = j;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
1126 pgoff_t start, unsigned nr_pages)
1127{
1128 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
1129 return pagevec_count(pvec);
1130}
1131EXPORT_SYMBOL(pagevec_lookup);
1132
1133unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
1134 pgoff_t *index, int tag, unsigned nr_pages)
1135{
1136 pvec->nr = find_get_pages_tag(mapping, index, tag,
1137 nr_pages, pvec->pages);
1138 return pagevec_count(pvec);
1139}
1140EXPORT_SYMBOL(pagevec_lookup_tag);
1141
1142
1143
1144
1145void __init swap_setup(void)
1146{
1147 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
1148#ifdef CONFIG_SWAP
1149 int i;
1150
1151 for (i = 0; i < MAX_SWAPFILES; i++)
1152 spin_lock_init(&swapper_spaces[i].tree_lock);
1153#endif
1154
1155
1156 if (megs < 16)
1157 page_cluster = 2;
1158 else
1159 page_cluster = 3;
1160
1161
1162
1163
1164}
1165