1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
24#include <linux/export.h>
25#include <linux/mm_inline.h>
26#include <linux/percpu_counter.h>
27#include <linux/percpu.h>
28#include <linux/cpu.h>
29#include <linux/notifier.h>
30#include <linux/backing-dev.h>
31#include <linux/memcontrol.h>
32#include <linux/gfp.h>
33#include <linux/uio.h>
34
35#include "internal.h"
36
37#define CREATE_TRACE_POINTS
38#include <trace/events/pagemap.h>
39
40
41int page_cluster;
42
43static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
44static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
45static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
46
47
48
49
50
51static void __page_cache_release(struct page *page)
52{
53 if (PageLRU(page)) {
54 struct zone *zone = page_zone(page);
55 struct lruvec *lruvec;
56 unsigned long flags;
57
58 spin_lock_irqsave(&zone->lru_lock, flags);
59 lruvec = mem_cgroup_page_lruvec(page, zone);
60 VM_BUG_ON_PAGE(!PageLRU(page), page);
61 __ClearPageLRU(page);
62 del_page_from_lru_list(page, lruvec, page_off_lru(page));
63 spin_unlock_irqrestore(&zone->lru_lock, flags);
64 }
65 mem_cgroup_uncharge(page);
66}
67
68static void __put_single_page(struct page *page)
69{
70 __page_cache_release(page);
71 free_hot_cold_page(page, false);
72}
73
74static void __put_compound_page(struct page *page)
75{
76 compound_page_dtor *dtor;
77
78 __page_cache_release(page);
79 dtor = get_compound_page_dtor(page);
80 (*dtor)(page);
81}
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109static __always_inline
110void put_unrefcounted_compound_page(struct page *page_head, struct page *page)
111{
112
113
114
115
116
117
118
119 smp_rmb();
120 if (likely(PageTail(page))) {
121
122
123
124
125 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
126 VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
127 if (put_page_testzero(page_head)) {
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142 VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
143 __put_compound_page(page_head);
144 }
145 } else
146
147
148
149
150
151
152
153 if (put_page_testzero(page))
154 __put_single_page(page);
155}
156
157static __always_inline
158void put_refcounted_compound_page(struct page *page_head, struct page *page)
159{
160 if (likely(page != page_head && get_page_unless_zero(page_head))) {
161 unsigned long flags;
162
163
164
165
166
167
168
169 flags = compound_lock_irqsave(page_head);
170 if (unlikely(!PageTail(page))) {
171
172 compound_unlock_irqrestore(page_head, flags);
173 if (put_page_testzero(page_head)) {
174
175
176
177
178
179
180
181
182
183
184
185
186 if (PageHead(page_head))
187 __put_compound_page(page_head);
188 else
189 __put_single_page(page_head);
190 }
191out_put_single:
192 if (put_page_testzero(page))
193 __put_single_page(page);
194 return;
195 }
196 VM_BUG_ON_PAGE(page_head != page->first_page, page);
197
198
199
200
201
202
203 if (put_page_testzero(page_head))
204 VM_BUG_ON_PAGE(1, page_head);
205
206 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
207 atomic_dec(&page->_mapcount);
208 VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
209 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
210 compound_unlock_irqrestore(page_head, flags);
211
212 if (put_page_testzero(page_head)) {
213 if (PageHead(page_head))
214 __put_compound_page(page_head);
215 else
216 __put_single_page(page_head);
217 }
218 } else {
219
220 VM_BUG_ON_PAGE(PageTail(page), page);
221 goto out_put_single;
222 }
223}
224
225static void put_compound_page(struct page *page)
226{
227 struct page *page_head;
228
229
230
231
232
233
234 if (likely(!PageTail(page))) {
235 if (put_page_testzero(page)) {
236
237
238
239
240 if (PageHead(page))
241 __put_compound_page(page);
242 else
243 __put_single_page(page);
244 }
245 return;
246 }
247
248
249
250
251
252
253
254
255
256
257 page_head = compound_head_by_tail(page);
258 if (!__compound_tail_refcounted(page_head))
259 put_unrefcounted_compound_page(page_head, page);
260 else
261 put_refcounted_compound_page(page_head, page);
262}
263
264void put_page(struct page *page)
265{
266 if (unlikely(PageCompound(page)))
267 put_compound_page(page);
268 else if (put_page_testzero(page))
269 __put_single_page(page);
270}
271EXPORT_SYMBOL(put_page);
272
273
274
275
276
277bool __get_page_tail(struct page *page)
278{
279
280
281
282
283
284
285
286
287 unsigned long flags;
288 bool got;
289 struct page *page_head = compound_head(page);
290
291
292 if (!__compound_tail_refcounted(page_head)) {
293 smp_rmb();
294 if (likely(PageTail(page))) {
295
296
297
298
299
300 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
301 __get_page_tail_foll(page, true);
302 return true;
303 } else {
304
305
306
307
308
309
310
311
312
313 return false;
314 }
315 }
316
317 got = false;
318 if (likely(page != page_head && get_page_unless_zero(page_head))) {
319
320
321
322
323
324
325 flags = compound_lock_irqsave(page_head);
326
327 if (likely(PageTail(page))) {
328 __get_page_tail_foll(page, false);
329 got = true;
330 }
331 compound_unlock_irqrestore(page_head, flags);
332 if (unlikely(!got))
333 put_page(page_head);
334 }
335 return got;
336}
337EXPORT_SYMBOL(__get_page_tail);
338
339
340
341
342
343
344
345
346void put_pages_list(struct list_head *pages)
347{
348 while (!list_empty(pages)) {
349 struct page *victim;
350
351 victim = list_entry(pages->prev, struct page, lru);
352 list_del(&victim->lru);
353 page_cache_release(victim);
354 }
355}
356EXPORT_SYMBOL(put_pages_list);
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
372 struct page **pages)
373{
374 int seg;
375
376 for (seg = 0; seg < nr_segs; seg++) {
377 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
378 return seg;
379
380 pages[seg] = kmap_to_page(kiov[seg].iov_base);
381 page_cache_get(pages[seg]);
382 }
383
384 return seg;
385}
386EXPORT_SYMBOL_GPL(get_kernel_pages);
387
388
389
390
391
392
393
394
395
396
397
398
399int get_kernel_page(unsigned long start, int write, struct page **pages)
400{
401 const struct kvec kiov = {
402 .iov_base = (void *)start,
403 .iov_len = PAGE_SIZE
404 };
405
406 return get_kernel_pages(&kiov, 1, write, pages);
407}
408EXPORT_SYMBOL_GPL(get_kernel_page);
409
410static void pagevec_lru_move_fn(struct pagevec *pvec,
411 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
412 void *arg)
413{
414 int i;
415 struct zone *zone = NULL;
416 struct lruvec *lruvec;
417 unsigned long flags = 0;
418
419 for (i = 0; i < pagevec_count(pvec); i++) {
420 struct page *page = pvec->pages[i];
421 struct zone *pagezone = page_zone(page);
422
423 if (pagezone != zone) {
424 if (zone)
425 spin_unlock_irqrestore(&zone->lru_lock, flags);
426 zone = pagezone;
427 spin_lock_irqsave(&zone->lru_lock, flags);
428 }
429
430 lruvec = mem_cgroup_page_lruvec(page, zone);
431 (*move_fn)(page, lruvec, arg);
432 }
433 if (zone)
434 spin_unlock_irqrestore(&zone->lru_lock, flags);
435 release_pages(pvec->pages, pvec->nr, pvec->cold);
436 pagevec_reinit(pvec);
437}
438
439static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
440 void *arg)
441{
442 int *pgmoved = arg;
443
444 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
445 enum lru_list lru = page_lru_base_type(page);
446 list_move_tail(&page->lru, &lruvec->lists[lru]);
447 (*pgmoved)++;
448 }
449}
450
451
452
453
454
455static void pagevec_move_tail(struct pagevec *pvec)
456{
457 int pgmoved = 0;
458
459 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
460 __count_vm_events(PGROTATED, pgmoved);
461}
462
463
464
465
466
467
468void rotate_reclaimable_page(struct page *page)
469{
470 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
471 !PageUnevictable(page) && PageLRU(page)) {
472 struct pagevec *pvec;
473 unsigned long flags;
474
475 page_cache_get(page);
476 local_irq_save(flags);
477 pvec = this_cpu_ptr(&lru_rotate_pvecs);
478 if (!pagevec_add(pvec, page))
479 pagevec_move_tail(pvec);
480 local_irq_restore(flags);
481 }
482}
483
484static void update_page_reclaim_stat(struct lruvec *lruvec,
485 int file, int rotated)
486{
487 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
488
489 reclaim_stat->recent_scanned[file]++;
490 if (rotated)
491 reclaim_stat->recent_rotated[file]++;
492}
493
494static void __activate_page(struct page *page, struct lruvec *lruvec,
495 void *arg)
496{
497 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
498 int file = page_is_file_cache(page);
499 int lru = page_lru_base_type(page);
500
501 del_page_from_lru_list(page, lruvec, lru);
502 SetPageActive(page);
503 lru += LRU_ACTIVE;
504 add_page_to_lru_list(page, lruvec, lru);
505 trace_mm_lru_activate(page);
506
507 __count_vm_event(PGACTIVATE);
508 update_page_reclaim_stat(lruvec, file, 1);
509 }
510}
511
512#ifdef CONFIG_SMP
513static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
514
515static void activate_page_drain(int cpu)
516{
517 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
518
519 if (pagevec_count(pvec))
520 pagevec_lru_move_fn(pvec, __activate_page, NULL);
521}
522
523static bool need_activate_page_drain(int cpu)
524{
525 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
526}
527
528void activate_page(struct page *page)
529{
530 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
531 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
532
533 page_cache_get(page);
534 if (!pagevec_add(pvec, page))
535 pagevec_lru_move_fn(pvec, __activate_page, NULL);
536 put_cpu_var(activate_page_pvecs);
537 }
538}
539
540#else
541static inline void activate_page_drain(int cpu)
542{
543}
544
545static bool need_activate_page_drain(int cpu)
546{
547 return false;
548}
549
550void activate_page(struct page *page)
551{
552 struct zone *zone = page_zone(page);
553
554 spin_lock_irq(&zone->lru_lock);
555 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
556 spin_unlock_irq(&zone->lru_lock);
557}
558#endif
559
560static void __lru_cache_activate_page(struct page *page)
561{
562 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
563 int i;
564
565
566
567
568
569
570
571
572
573
574
575 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
576 struct page *pagevec_page = pvec->pages[i];
577
578 if (pagevec_page == page) {
579 SetPageActive(page);
580 break;
581 }
582 }
583
584 put_cpu_var(lru_add_pvec);
585}
586
587
588
589
590
591
592
593
594
595
596
597void mark_page_accessed(struct page *page)
598{
599 if (!PageActive(page) && !PageUnevictable(page) &&
600 PageReferenced(page)) {
601
602
603
604
605
606
607
608 if (PageLRU(page))
609 activate_page(page);
610 else
611 __lru_cache_activate_page(page);
612 ClearPageReferenced(page);
613 if (page_is_file_cache(page))
614 workingset_activation(page);
615 } else if (!PageReferenced(page)) {
616 SetPageReferenced(page);
617 }
618}
619EXPORT_SYMBOL(mark_page_accessed);
620
621static void __lru_cache_add(struct page *page)
622{
623 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
624
625 page_cache_get(page);
626 if (!pagevec_space(pvec))
627 __pagevec_lru_add(pvec);
628 pagevec_add(pvec, page);
629 put_cpu_var(lru_add_pvec);
630}
631
632
633
634
635
636void lru_cache_add_anon(struct page *page)
637{
638 if (PageActive(page))
639 ClearPageActive(page);
640 __lru_cache_add(page);
641}
642
643void lru_cache_add_file(struct page *page)
644{
645 if (PageActive(page))
646 ClearPageActive(page);
647 __lru_cache_add(page);
648}
649EXPORT_SYMBOL(lru_cache_add_file);
650
651
652
653
654
655
656
657
658
659
660void lru_cache_add(struct page *page)
661{
662 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
663 VM_BUG_ON_PAGE(PageLRU(page), page);
664 __lru_cache_add(page);
665}
666
667
668
669
670
671
672
673
674
675
676
677void add_page_to_unevictable_list(struct page *page)
678{
679 struct zone *zone = page_zone(page);
680 struct lruvec *lruvec;
681
682 spin_lock_irq(&zone->lru_lock);
683 lruvec = mem_cgroup_page_lruvec(page, zone);
684 ClearPageActive(page);
685 SetPageUnevictable(page);
686 SetPageLRU(page);
687 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
688 spin_unlock_irq(&zone->lru_lock);
689}
690
691
692
693
694
695
696
697
698
699
700
701void lru_cache_add_active_or_unevictable(struct page *page,
702 struct vm_area_struct *vma)
703{
704 VM_BUG_ON_PAGE(PageLRU(page), page);
705
706 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
707 SetPageActive(page);
708 lru_cache_add(page);
709 return;
710 }
711
712 if (!TestSetPageMlocked(page)) {
713
714
715
716
717
718 __mod_zone_page_state(page_zone(page), NR_MLOCK,
719 hpage_nr_pages(page));
720 count_vm_event(UNEVICTABLE_PGMLOCKED);
721 }
722 add_page_to_unevictable_list(page);
723}
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
747 void *arg)
748{
749 int lru, file;
750 bool active;
751
752 if (!PageLRU(page))
753 return;
754
755 if (PageUnevictable(page))
756 return;
757
758
759 if (page_mapped(page))
760 return;
761
762 active = PageActive(page);
763 file = page_is_file_cache(page);
764 lru = page_lru_base_type(page);
765
766 del_page_from_lru_list(page, lruvec, lru + active);
767 ClearPageActive(page);
768 ClearPageReferenced(page);
769 add_page_to_lru_list(page, lruvec, lru);
770
771 if (PageWriteback(page) || PageDirty(page)) {
772
773
774
775
776
777 SetPageReclaim(page);
778 } else {
779
780
781
782
783 list_move_tail(&page->lru, &lruvec->lists[lru]);
784 __count_vm_event(PGROTATED);
785 }
786
787 if (active)
788 __count_vm_event(PGDEACTIVATE);
789 update_page_reclaim_stat(lruvec, file, 0);
790}
791
792
793
794
795
796
797void lru_add_drain_cpu(int cpu)
798{
799 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
800
801 if (pagevec_count(pvec))
802 __pagevec_lru_add(pvec);
803
804 pvec = &per_cpu(lru_rotate_pvecs, cpu);
805 if (pagevec_count(pvec)) {
806 unsigned long flags;
807
808
809 local_irq_save(flags);
810 pagevec_move_tail(pvec);
811 local_irq_restore(flags);
812 }
813
814 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
815 if (pagevec_count(pvec))
816 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
817
818 activate_page_drain(cpu);
819}
820
821
822
823
824
825
826
827
828
829void deactivate_page(struct page *page)
830{
831
832
833
834
835 if (PageUnevictable(page))
836 return;
837
838 if (likely(get_page_unless_zero(page))) {
839 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
840
841 if (!pagevec_add(pvec, page))
842 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
843 put_cpu_var(lru_deactivate_pvecs);
844 }
845}
846
847void lru_add_drain(void)
848{
849 lru_add_drain_cpu(get_cpu());
850 put_cpu();
851}
852
853static void lru_add_drain_per_cpu(struct work_struct *dummy)
854{
855 lru_add_drain();
856}
857
858static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
859
860void lru_add_drain_all(void)
861{
862 static DEFINE_MUTEX(lock);
863 static struct cpumask has_work;
864 int cpu;
865
866 mutex_lock(&lock);
867 get_online_cpus();
868 cpumask_clear(&has_work);
869
870 for_each_online_cpu(cpu) {
871 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
872
873 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
874 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
875 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
876 need_activate_page_drain(cpu)) {
877 INIT_WORK(work, lru_add_drain_per_cpu);
878 schedule_work_on(cpu, work);
879 cpumask_set_cpu(cpu, &has_work);
880 }
881 }
882
883 for_each_cpu(cpu, &has_work)
884 flush_work(&per_cpu(lru_add_drain_work, cpu));
885
886 put_online_cpus();
887 mutex_unlock(&lock);
888}
889
890
891
892
893
894
895
896
897
898
899void release_pages(struct page **pages, int nr, bool cold)
900{
901 int i;
902 LIST_HEAD(pages_to_free);
903 struct zone *zone = NULL;
904 struct lruvec *lruvec;
905 unsigned long uninitialized_var(flags);
906 unsigned int uninitialized_var(lock_batch);
907
908 for (i = 0; i < nr; i++) {
909 struct page *page = pages[i];
910
911 if (unlikely(PageCompound(page))) {
912 if (zone) {
913 spin_unlock_irqrestore(&zone->lru_lock, flags);
914 zone = NULL;
915 }
916 put_compound_page(page);
917 continue;
918 }
919
920
921
922
923
924
925 if (zone && ++lock_batch == SWAP_CLUSTER_MAX) {
926 spin_unlock_irqrestore(&zone->lru_lock, flags);
927 zone = NULL;
928 }
929
930 if (!put_page_testzero(page))
931 continue;
932
933 if (PageLRU(page)) {
934 struct zone *pagezone = page_zone(page);
935
936 if (pagezone != zone) {
937 if (zone)
938 spin_unlock_irqrestore(&zone->lru_lock,
939 flags);
940 lock_batch = 0;
941 zone = pagezone;
942 spin_lock_irqsave(&zone->lru_lock, flags);
943 }
944
945 lruvec = mem_cgroup_page_lruvec(page, zone);
946 VM_BUG_ON_PAGE(!PageLRU(page), page);
947 __ClearPageLRU(page);
948 del_page_from_lru_list(page, lruvec, page_off_lru(page));
949 }
950
951
952 __ClearPageActive(page);
953
954 list_add(&page->lru, &pages_to_free);
955 }
956 if (zone)
957 spin_unlock_irqrestore(&zone->lru_lock, flags);
958
959 mem_cgroup_uncharge_list(&pages_to_free);
960 free_hot_cold_page_list(&pages_to_free, cold);
961}
962EXPORT_SYMBOL(release_pages);
963
964
965
966
967
968
969
970
971
972
973
974void __pagevec_release(struct pagevec *pvec)
975{
976 lru_add_drain();
977 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
978 pagevec_reinit(pvec);
979}
980EXPORT_SYMBOL(__pagevec_release);
981
982#ifdef CONFIG_TRANSPARENT_HUGEPAGE
983
984void lru_add_page_tail(struct page *page, struct page *page_tail,
985 struct lruvec *lruvec, struct list_head *list)
986{
987 const int file = 0;
988
989 VM_BUG_ON_PAGE(!PageHead(page), page);
990 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
991 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
992 VM_BUG_ON(NR_CPUS != 1 &&
993 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
994
995 if (!list)
996 SetPageLRU(page_tail);
997
998 if (likely(PageLRU(page)))
999 list_add_tail(&page_tail->lru, &page->lru);
1000 else if (list) {
1001
1002 get_page(page_tail);
1003 list_add_tail(&page_tail->lru, list);
1004 } else {
1005 struct list_head *list_head;
1006
1007
1008
1009
1010
1011
1012
1013 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
1014 list_head = page_tail->lru.prev;
1015 list_move_tail(&page_tail->lru, list_head);
1016 }
1017
1018 if (!PageUnevictable(page))
1019 update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
1020}
1021#endif
1022
1023static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
1024 void *arg)
1025{
1026 int file = page_is_file_cache(page);
1027 int active = PageActive(page);
1028 enum lru_list lru = page_lru(page);
1029
1030 VM_BUG_ON_PAGE(PageLRU(page), page);
1031
1032 SetPageLRU(page);
1033 add_page_to_lru_list(page, lruvec, lru);
1034 update_page_reclaim_stat(lruvec, file, active);
1035 trace_mm_lru_insertion(page, lru);
1036}
1037
1038
1039
1040
1041
1042void __pagevec_lru_add(struct pagevec *pvec)
1043{
1044 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
1045}
1046EXPORT_SYMBOL(__pagevec_lru_add);
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068unsigned pagevec_lookup_entries(struct pagevec *pvec,
1069 struct address_space *mapping,
1070 pgoff_t start, unsigned nr_pages,
1071 pgoff_t *indices)
1072{
1073 pvec->nr = find_get_entries(mapping, start, nr_pages,
1074 pvec->pages, indices);
1075 return pagevec_count(pvec);
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087void pagevec_remove_exceptionals(struct pagevec *pvec)
1088{
1089 int i, j;
1090
1091 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1092 struct page *page = pvec->pages[i];
1093 if (!radix_tree_exceptional_entry(page))
1094 pvec->pages[j++] = page;
1095 }
1096 pvec->nr = j;
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
1116 pgoff_t start, unsigned nr_pages)
1117{
1118 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
1119 return pagevec_count(pvec);
1120}
1121EXPORT_SYMBOL(pagevec_lookup);
1122
1123unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
1124 pgoff_t *index, int tag, unsigned nr_pages)
1125{
1126 pvec->nr = find_get_pages_tag(mapping, index, tag,
1127 nr_pages, pvec->pages);
1128 return pagevec_count(pvec);
1129}
1130EXPORT_SYMBOL(pagevec_lookup_tag);
1131
1132
1133
1134
1135void __init swap_setup(void)
1136{
1137 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
1138#ifdef CONFIG_SWAP
1139 int i;
1140
1141 if (bdi_init(swapper_spaces[0].backing_dev_info))
1142 panic("Failed to init swap bdi");
1143 for (i = 0; i < MAX_SWAPFILES; i++) {
1144 spin_lock_init(&swapper_spaces[i].tree_lock);
1145 INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
1146 }
1147#endif
1148
1149
1150 if (megs < 16)
1151 page_cluster = 2;
1152 else
1153 page_cluster = 3;
1154
1155
1156
1157
1158}
1159