1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/mm.h>
18#include <linux/sched.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/mman.h>
22#include <linux/pagemap.h>
23#include <linux/pagevec.h>
24#include <linux/init.h>
25#include <linux/export.h>
26#include <linux/mm_inline.h>
27#include <linux/percpu_counter.h>
28#include <linux/memremap.h>
29#include <linux/percpu.h>
30#include <linux/cpu.h>
31#include <linux/notifier.h>
32#include <linux/backing-dev.h>
33#include <linux/memcontrol.h>
34#include <linux/gfp.h>
35#include <linux/uio.h>
36#include <linux/hugetlb.h>
37#include <linux/page_idle.h>
38
39#include "internal.h"
40
41#define CREATE_TRACE_POINTS
42#include <trace/events/pagemap.h>
43
44
45int page_cluster;
46
47static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
48static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
49static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
50static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
51#ifdef CONFIG_SMP
52static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
53#endif
54
55
56
57
58
59static void __page_cache_release(struct page *page)
60{
61 if (PageLRU(page)) {
62 pg_data_t *pgdat = page_pgdat(page);
63 struct lruvec *lruvec;
64 unsigned long flags;
65
66 spin_lock_irqsave(&pgdat->lru_lock, flags);
67 lruvec = mem_cgroup_page_lruvec(page, pgdat);
68 VM_BUG_ON_PAGE(!PageLRU(page), page);
69 __ClearPageLRU(page);
70 del_page_from_lru_list(page, lruvec, page_off_lru(page));
71 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
72 }
73 __ClearPageWaiters(page);
74 mem_cgroup_uncharge(page);
75}
76
77static void __put_single_page(struct page *page)
78{
79 __page_cache_release(page);
80 free_unref_page(page);
81}
82
83static void __put_compound_page(struct page *page)
84{
85 compound_page_dtor *dtor;
86
87
88
89
90
91
92
93 if (!PageHuge(page))
94 __page_cache_release(page);
95 dtor = get_compound_page_dtor(page);
96 (*dtor)(page);
97}
98
99void __put_page(struct page *page)
100{
101 if (is_zone_device_page(page)) {
102 put_dev_pagemap(page->pgmap);
103
104
105
106
107
108 return;
109 }
110
111 if (unlikely(PageCompound(page)))
112 __put_compound_page(page);
113 else
114 __put_single_page(page);
115}
116EXPORT_SYMBOL(__put_page);
117
118
119
120
121
122
123
124
125void put_pages_list(struct list_head *pages)
126{
127 while (!list_empty(pages)) {
128 struct page *victim;
129
130 victim = lru_to_page(pages);
131 list_del(&victim->lru);
132 put_page(victim);
133 }
134}
135EXPORT_SYMBOL(put_pages_list);
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
151 struct page **pages)
152{
153 int seg;
154
155 for (seg = 0; seg < nr_segs; seg++) {
156 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
157 return seg;
158
159 pages[seg] = kmap_to_page(kiov[seg].iov_base);
160 get_page(pages[seg]);
161 }
162
163 return seg;
164}
165EXPORT_SYMBOL_GPL(get_kernel_pages);
166
167
168
169
170
171
172
173
174
175
176
177
178int get_kernel_page(unsigned long start, int write, struct page **pages)
179{
180 const struct kvec kiov = {
181 .iov_base = (void *)start,
182 .iov_len = PAGE_SIZE
183 };
184
185 return get_kernel_pages(&kiov, 1, write, pages);
186}
187EXPORT_SYMBOL_GPL(get_kernel_page);
188
189static void pagevec_lru_move_fn(struct pagevec *pvec,
190 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
191 void *arg)
192{
193 int i;
194 struct pglist_data *pgdat = NULL;
195 struct lruvec *lruvec;
196 unsigned long flags = 0;
197
198 for (i = 0; i < pagevec_count(pvec); i++) {
199 struct page *page = pvec->pages[i];
200 struct pglist_data *pagepgdat = page_pgdat(page);
201
202 if (pagepgdat != pgdat) {
203 if (pgdat)
204 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
205 pgdat = pagepgdat;
206 spin_lock_irqsave(&pgdat->lru_lock, flags);
207 }
208
209 lruvec = mem_cgroup_page_lruvec(page, pgdat);
210 (*move_fn)(page, lruvec, arg);
211 }
212 if (pgdat)
213 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
214 release_pages(pvec->pages, pvec->nr);
215 pagevec_reinit(pvec);
216}
217
218static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
219 void *arg)
220{
221 int *pgmoved = arg;
222
223 if (PageLRU(page) && !PageUnevictable(page)) {
224 del_page_from_lru_list(page, lruvec, page_lru(page));
225 ClearPageActive(page);
226 add_page_to_lru_list_tail(page, lruvec, page_lru(page));
227 (*pgmoved)++;
228 }
229}
230
231
232
233
234
235static void pagevec_move_tail(struct pagevec *pvec)
236{
237 int pgmoved = 0;
238
239 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
240 __count_vm_events(PGROTATED, pgmoved);
241}
242
243
244
245
246
247
248void rotate_reclaimable_page(struct page *page)
249{
250 if (!PageLocked(page) && !PageDirty(page) &&
251 !PageUnevictable(page) && PageLRU(page)) {
252 struct pagevec *pvec;
253 unsigned long flags;
254
255 get_page(page);
256 local_irq_save(flags);
257 pvec = this_cpu_ptr(&lru_rotate_pvecs);
258 if (!pagevec_add(pvec, page) || PageCompound(page))
259 pagevec_move_tail(pvec);
260 local_irq_restore(flags);
261 }
262}
263
264static void update_page_reclaim_stat(struct lruvec *lruvec,
265 int file, int rotated)
266{
267 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
268
269 reclaim_stat->recent_scanned[file]++;
270 if (rotated)
271 reclaim_stat->recent_rotated[file]++;
272}
273
274static void __activate_page(struct page *page, struct lruvec *lruvec,
275 void *arg)
276{
277 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
278 int file = page_is_file_cache(page);
279 int lru = page_lru_base_type(page);
280
281 del_page_from_lru_list(page, lruvec, lru);
282 SetPageActive(page);
283 lru += LRU_ACTIVE;
284 add_page_to_lru_list(page, lruvec, lru);
285 trace_mm_lru_activate(page);
286
287 __count_vm_event(PGACTIVATE);
288 update_page_reclaim_stat(lruvec, file, 1);
289 }
290}
291
292#ifdef CONFIG_SMP
293static void activate_page_drain(int cpu)
294{
295 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
296
297 if (pagevec_count(pvec))
298 pagevec_lru_move_fn(pvec, __activate_page, NULL);
299}
300
301static bool need_activate_page_drain(int cpu)
302{
303 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
304}
305
306void activate_page(struct page *page)
307{
308 page = compound_head(page);
309 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
310 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
311
312 get_page(page);
313 if (!pagevec_add(pvec, page) || PageCompound(page))
314 pagevec_lru_move_fn(pvec, __activate_page, NULL);
315 put_cpu_var(activate_page_pvecs);
316 }
317}
318
319#else
320static inline void activate_page_drain(int cpu)
321{
322}
323
324void activate_page(struct page *page)
325{
326 pg_data_t *pgdat = page_pgdat(page);
327
328 page = compound_head(page);
329 spin_lock_irq(&pgdat->lru_lock);
330 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
331 spin_unlock_irq(&pgdat->lru_lock);
332}
333#endif
334
335static void __lru_cache_activate_page(struct page *page)
336{
337 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
338 int i;
339
340
341
342
343
344
345
346
347
348
349
350 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
351 struct page *pagevec_page = pvec->pages[i];
352
353 if (pagevec_page == page) {
354 SetPageActive(page);
355 break;
356 }
357 }
358
359 put_cpu_var(lru_add_pvec);
360}
361
362
363
364
365
366
367
368
369
370
371
372void mark_page_accessed(struct page *page)
373{
374 page = compound_head(page);
375 if (!PageActive(page) && !PageUnevictable(page) &&
376 PageReferenced(page)) {
377
378
379
380
381
382
383
384 if (PageLRU(page))
385 activate_page(page);
386 else
387 __lru_cache_activate_page(page);
388 ClearPageReferenced(page);
389 if (page_is_file_cache(page))
390 workingset_activation(page);
391 } else if (!PageReferenced(page)) {
392 SetPageReferenced(page);
393 }
394 if (page_is_idle(page))
395 clear_page_idle(page);
396}
397EXPORT_SYMBOL(mark_page_accessed);
398
399static void __lru_cache_add(struct page *page)
400{
401 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
402
403 get_page(page);
404 if (!pagevec_add(pvec, page) || PageCompound(page))
405 __pagevec_lru_add(pvec);
406 put_cpu_var(lru_add_pvec);
407}
408
409
410
411
412
413void lru_cache_add_anon(struct page *page)
414{
415 if (PageActive(page))
416 ClearPageActive(page);
417 __lru_cache_add(page);
418}
419
420void lru_cache_add_file(struct page *page)
421{
422 if (PageActive(page))
423 ClearPageActive(page);
424 __lru_cache_add(page);
425}
426EXPORT_SYMBOL(lru_cache_add_file);
427
428
429
430
431
432
433
434
435
436
437void lru_cache_add(struct page *page)
438{
439 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
440 VM_BUG_ON_PAGE(PageLRU(page), page);
441 __lru_cache_add(page);
442}
443
444
445
446
447
448
449
450
451
452
453
454void lru_cache_add_active_or_unevictable(struct page *page,
455 struct vm_area_struct *vma)
456{
457 VM_BUG_ON_PAGE(PageLRU(page), page);
458
459 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
460 SetPageActive(page);
461 else if (!TestSetPageMlocked(page)) {
462
463
464
465
466
467 __mod_zone_page_state(page_zone(page), NR_MLOCK,
468 hpage_nr_pages(page));
469 count_vm_event(UNEVICTABLE_PGMLOCKED);
470 }
471 lru_cache_add(page);
472}
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
496 void *arg)
497{
498 int lru, file;
499 bool active;
500
501 if (!PageLRU(page))
502 return;
503
504 if (PageUnevictable(page))
505 return;
506
507
508 if (page_mapped(page))
509 return;
510
511 active = PageActive(page);
512 file = page_is_file_cache(page);
513 lru = page_lru_base_type(page);
514
515 del_page_from_lru_list(page, lruvec, lru + active);
516 ClearPageActive(page);
517 ClearPageReferenced(page);
518 add_page_to_lru_list(page, lruvec, lru);
519
520 if (PageWriteback(page) || PageDirty(page)) {
521
522
523
524
525
526 SetPageReclaim(page);
527 } else {
528
529
530
531
532 list_move_tail(&page->lru, &lruvec->lists[lru]);
533 __count_vm_event(PGROTATED);
534 }
535
536 if (active)
537 __count_vm_event(PGDEACTIVATE);
538 update_page_reclaim_stat(lruvec, file, 0);
539}
540
541
542static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
543 void *arg)
544{
545 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
546 !PageSwapCache(page) && !PageUnevictable(page)) {
547 bool active = PageActive(page);
548
549 del_page_from_lru_list(page, lruvec,
550 LRU_INACTIVE_ANON + active);
551 ClearPageActive(page);
552 ClearPageReferenced(page);
553
554
555
556
557
558 ClearPageSwapBacked(page);
559 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
560
561 __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
562 count_memcg_page_event(page, PGLAZYFREE);
563 update_page_reclaim_stat(lruvec, 1, 0);
564 }
565}
566
567
568
569
570
571
572void lru_add_drain_cpu(int cpu)
573{
574 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
575
576 if (pagevec_count(pvec))
577 __pagevec_lru_add(pvec);
578
579 pvec = &per_cpu(lru_rotate_pvecs, cpu);
580 if (pagevec_count(pvec)) {
581 unsigned long flags;
582
583
584 local_irq_save(flags);
585 pagevec_move_tail(pvec);
586 local_irq_restore(flags);
587 }
588
589 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
590 if (pagevec_count(pvec))
591 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
592
593 pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
594 if (pagevec_count(pvec))
595 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
596
597 activate_page_drain(cpu);
598}
599
600
601
602
603
604
605
606
607
608void deactivate_file_page(struct page *page)
609{
610
611
612
613
614 if (PageUnevictable(page))
615 return;
616
617 if (likely(get_page_unless_zero(page))) {
618 struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
619
620 if (!pagevec_add(pvec, page) || PageCompound(page))
621 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
622 put_cpu_var(lru_deactivate_file_pvecs);
623 }
624}
625
626
627
628
629
630
631
632
633void mark_page_lazyfree(struct page *page)
634{
635 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
636 !PageSwapCache(page) && !PageUnevictable(page)) {
637 struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
638
639 get_page(page);
640 if (!pagevec_add(pvec, page) || PageCompound(page))
641 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
642 put_cpu_var(lru_lazyfree_pvecs);
643 }
644}
645
646void lru_add_drain(void)
647{
648 lru_add_drain_cpu(get_cpu());
649 put_cpu();
650}
651
652#ifdef CONFIG_SMP
653
654static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
655
656static void lru_add_drain_per_cpu(struct work_struct *dummy)
657{
658 lru_add_drain();
659}
660
661
662
663
664
665
666
667
668void lru_add_drain_all(void)
669{
670 static DEFINE_MUTEX(lock);
671 static struct cpumask has_work;
672 int cpu;
673
674
675
676
677
678 if (WARN_ON(!mm_percpu_wq))
679 return;
680
681 mutex_lock(&lock);
682 cpumask_clear(&has_work);
683
684 for_each_online_cpu(cpu) {
685 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
686
687 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
688 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
689 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
690 pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
691 need_activate_page_drain(cpu)) {
692 INIT_WORK(work, lru_add_drain_per_cpu);
693 queue_work_on(cpu, mm_percpu_wq, work);
694 cpumask_set_cpu(cpu, &has_work);
695 }
696 }
697
698 for_each_cpu(cpu, &has_work)
699 flush_work(&per_cpu(lru_add_drain_work, cpu));
700
701 mutex_unlock(&lock);
702}
703#else
704void lru_add_drain_all(void)
705{
706 lru_add_drain();
707}
708#endif
709
710
711
712
713
714
715
716
717
718void release_pages(struct page **pages, int nr)
719{
720 int i;
721 LIST_HEAD(pages_to_free);
722 struct pglist_data *locked_pgdat = NULL;
723 struct lruvec *lruvec;
724 unsigned long uninitialized_var(flags);
725 unsigned int uninitialized_var(lock_batch);
726
727 for (i = 0; i < nr; i++) {
728 struct page *page = pages[i];
729
730
731
732
733
734
735 if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
736 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
737 locked_pgdat = NULL;
738 }
739
740 if (is_huge_zero_page(page))
741 continue;
742
743 if (is_zone_device_page(page)) {
744 if (locked_pgdat) {
745 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
746 flags);
747 locked_pgdat = NULL;
748 }
749
750
751
752
753
754
755 if (put_devmap_managed_page(page))
756 continue;
757 }
758
759 page = compound_head(page);
760 if (!put_page_testzero(page))
761 continue;
762
763 if (PageCompound(page)) {
764 if (locked_pgdat) {
765 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
766 locked_pgdat = NULL;
767 }
768 __put_compound_page(page);
769 continue;
770 }
771
772 if (PageLRU(page)) {
773 struct pglist_data *pgdat = page_pgdat(page);
774
775 if (pgdat != locked_pgdat) {
776 if (locked_pgdat)
777 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
778 flags);
779 lock_batch = 0;
780 locked_pgdat = pgdat;
781 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
782 }
783
784 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
785 VM_BUG_ON_PAGE(!PageLRU(page), page);
786 __ClearPageLRU(page);
787 del_page_from_lru_list(page, lruvec, page_off_lru(page));
788 }
789
790
791 __ClearPageActive(page);
792 __ClearPageWaiters(page);
793
794 list_add(&page->lru, &pages_to_free);
795 }
796 if (locked_pgdat)
797 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
798
799 mem_cgroup_uncharge_list(&pages_to_free);
800 free_unref_page_list(&pages_to_free);
801}
802EXPORT_SYMBOL(release_pages);
803
804
805
806
807
808
809
810
811
812
813
814void __pagevec_release(struct pagevec *pvec)
815{
816 if (!pvec->percpu_pvec_drained) {
817 lru_add_drain();
818 pvec->percpu_pvec_drained = true;
819 }
820 release_pages(pvec->pages, pagevec_count(pvec));
821 pagevec_reinit(pvec);
822}
823EXPORT_SYMBOL(__pagevec_release);
824
825#ifdef CONFIG_TRANSPARENT_HUGEPAGE
826
827void lru_add_page_tail(struct page *page, struct page *page_tail,
828 struct lruvec *lruvec, struct list_head *list)
829{
830 const int file = 0;
831
832 VM_BUG_ON_PAGE(!PageHead(page), page);
833 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
834 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
835 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
836
837 if (!list)
838 SetPageLRU(page_tail);
839
840 if (likely(PageLRU(page)))
841 list_add_tail(&page_tail->lru, &page->lru);
842 else if (list) {
843
844 get_page(page_tail);
845 list_add_tail(&page_tail->lru, list);
846 } else {
847 struct list_head *list_head;
848
849
850
851
852
853
854
855 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
856 list_head = page_tail->lru.prev;
857 list_move_tail(&page_tail->lru, list_head);
858 }
859
860 if (!PageUnevictable(page))
861 update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
862}
863#endif
864
865static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
866 void *arg)
867{
868 enum lru_list lru;
869 int was_unevictable = TestClearPageUnevictable(page);
870
871 VM_BUG_ON_PAGE(PageLRU(page), page);
872
873 SetPageLRU(page);
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900 smp_mb();
901
902 if (page_evictable(page)) {
903 lru = page_lru(page);
904 update_page_reclaim_stat(lruvec, page_is_file_cache(page),
905 PageActive(page));
906 if (was_unevictable)
907 count_vm_event(UNEVICTABLE_PGRESCUED);
908 } else {
909 lru = LRU_UNEVICTABLE;
910 ClearPageActive(page);
911 SetPageUnevictable(page);
912 if (!was_unevictable)
913 count_vm_event(UNEVICTABLE_PGCULLED);
914 }
915
916 add_page_to_lru_list(page, lruvec, lru);
917 trace_mm_lru_insertion(page, lru);
918}
919
920
921
922
923
924void __pagevec_lru_add(struct pagevec *pvec)
925{
926 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
927}
928EXPORT_SYMBOL(__pagevec_lru_add);
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950unsigned pagevec_lookup_entries(struct pagevec *pvec,
951 struct address_space *mapping,
952 pgoff_t start, unsigned nr_entries,
953 pgoff_t *indices)
954{
955 pvec->nr = find_get_entries(mapping, start, nr_entries,
956 pvec->pages, indices);
957 return pagevec_count(pvec);
958}
959
960
961
962
963
964
965
966
967
968
969void pagevec_remove_exceptionals(struct pagevec *pvec)
970{
971 int i, j;
972
973 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
974 struct page *page = pvec->pages[i];
975 if (!xa_is_value(page))
976 pvec->pages[j++] = page;
977 }
978 pvec->nr = j;
979}
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001unsigned pagevec_lookup_range(struct pagevec *pvec,
1002 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1003{
1004 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1005 pvec->pages);
1006 return pagevec_count(pvec);
1007}
1008EXPORT_SYMBOL(pagevec_lookup_range);
1009
1010unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1011 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1012 xa_mark_t tag)
1013{
1014 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1015 PAGEVEC_SIZE, pvec->pages);
1016 return pagevec_count(pvec);
1017}
1018EXPORT_SYMBOL(pagevec_lookup_range_tag);
1019
1020unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
1021 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1022 xa_mark_t tag, unsigned max_pages)
1023{
1024 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1025 min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
1026 return pagevec_count(pvec);
1027}
1028EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
1029
1030
1031
1032void __init swap_setup(void)
1033{
1034 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1035
1036
1037 if (megs < 16)
1038 page_cluster = 2;
1039 else
1040 page_cluster = 3;
1041
1042
1043
1044
1045}
1046