1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/mm_inline.h>
26#include <linux/buffer_head.h>
27#include <linux/percpu_counter.h>
28#include <linux/percpu.h>
29#include <linux/cpu.h>
30#include <linux/notifier.h>
31#include <linux/backing-dev.h>
32#include <linux/memcontrol.h>
33#include <linux/gfp.h>
34
35#include "internal.h"
36
37
38int page_cluster;
39
40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
42
43
44
45
46
47static void __page_cache_release(struct page *page)
48{
49 if (PageLRU(page)) {
50 unsigned long flags;
51 struct zone *zone = page_zone(page);
52
53 spin_lock_irqsave(&zone->lru_lock, flags);
54 VM_BUG_ON(!PageLRU(page));
55 __ClearPageLRU(page);
56 del_page_from_lru(zone, page);
57 spin_unlock_irqrestore(&zone->lru_lock, flags);
58 }
59}
60
61static void __put_single_page(struct page *page)
62{
63 __page_cache_release(page);
64 free_hot_cold_page(page, 0);
65}
66
67static void __put_compound_page(struct page *page)
68{
69 compound_page_dtor *dtor;
70
71 __page_cache_release(page);
72 dtor = get_compound_page_dtor(page);
73 (*dtor)(page);
74}
75
76static void put_compound_page(struct page *page)
77{
78 if (unlikely(PageTail(page))) {
79
80 struct page *page_head = page->first_page;
81 smp_rmb();
82
83
84
85
86
87 if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
88 unsigned long flags;
89
90
91
92
93
94 if (unlikely(!PageHead(page_head))) {
95
96 smp_rmb();
97 VM_BUG_ON(PageTail(page));
98 goto out_put_head;
99 }
100
101
102
103
104
105 smp_mb();
106
107 flags = compound_lock_irqsave(page_head);
108 if (unlikely(!PageTail(page))) {
109
110 compound_unlock_irqrestore(page_head, flags);
111 VM_BUG_ON(PageHead(page_head));
112 out_put_head:
113 if (put_page_testzero(page_head))
114 __put_single_page(page_head);
115 out_put_single:
116 if (put_page_testzero(page))
117 __put_single_page(page);
118 return;
119 }
120 VM_BUG_ON(page_head != page->first_page);
121
122
123
124
125
126
127 if (put_page_testzero(page_head))
128 VM_BUG_ON(1);
129
130 VM_BUG_ON(atomic_read(&page->_count) <= 0);
131 atomic_dec(&page->_count);
132 VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
133 compound_unlock_irqrestore(page_head, flags);
134 if (put_page_testzero(page_head)) {
135 if (PageHead(page_head))
136 __put_compound_page(page_head);
137 else
138 __put_single_page(page_head);
139 }
140 } else {
141
142 VM_BUG_ON(PageTail(page));
143 goto out_put_single;
144 }
145 } else if (put_page_testzero(page)) {
146 if (PageHead(page))
147 __put_compound_page(page);
148 else
149 __put_single_page(page);
150 }
151}
152
153void put_page(struct page *page)
154{
155 if (unlikely(PageCompound(page)))
156 put_compound_page(page);
157 else if (put_page_testzero(page))
158 __put_single_page(page);
159}
160EXPORT_SYMBOL(put_page);
161
162
163
164
165
166
167
168
169void put_pages_list(struct list_head *pages)
170{
171 while (!list_empty(pages)) {
172 struct page *victim;
173
174 victim = list_entry(pages->prev, struct page, lru);
175 list_del(&victim->lru);
176 page_cache_release(victim);
177 }
178}
179EXPORT_SYMBOL(put_pages_list);
180
181
182
183
184
185static void pagevec_move_tail(struct pagevec *pvec)
186{
187 int i;
188 int pgmoved = 0;
189 struct zone *zone = NULL;
190
191 for (i = 0; i < pagevec_count(pvec); i++) {
192 struct page *page = pvec->pages[i];
193 struct zone *pagezone = page_zone(page);
194
195 if (pagezone != zone) {
196 if (zone)
197 spin_unlock(&zone->lru_lock);
198 zone = pagezone;
199 spin_lock(&zone->lru_lock);
200 }
201 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
202 int lru = page_lru_base_type(page);
203 list_move_tail(&page->lru, &zone->lru[lru].list);
204 pgmoved++;
205 }
206 }
207 if (zone)
208 spin_unlock(&zone->lru_lock);
209 __count_vm_events(PGROTATED, pgmoved);
210 release_pages(pvec->pages, pvec->nr, pvec->cold);
211 pagevec_reinit(pvec);
212}
213
214
215
216
217
218
219void rotate_reclaimable_page(struct page *page)
220{
221 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
222 !PageUnevictable(page) && PageLRU(page)) {
223 struct pagevec *pvec;
224 unsigned long flags;
225
226 page_cache_get(page);
227 local_irq_save(flags);
228 pvec = &__get_cpu_var(lru_rotate_pvecs);
229 if (!pagevec_add(pvec, page))
230 pagevec_move_tail(pvec);
231 local_irq_restore(flags);
232 }
233}
234
235static void update_page_reclaim_stat(struct zone *zone, struct page *page,
236 int file, int rotated)
237{
238 struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
239 struct zone_reclaim_stat *memcg_reclaim_stat;
240
241 memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
242
243 reclaim_stat->recent_scanned[file]++;
244 if (rotated)
245 reclaim_stat->recent_rotated[file]++;
246
247 if (!memcg_reclaim_stat)
248 return;
249
250 memcg_reclaim_stat->recent_scanned[file]++;
251 if (rotated)
252 memcg_reclaim_stat->recent_rotated[file]++;
253}
254
255
256
257
258void activate_page(struct page *page)
259{
260 struct zone *zone = page_zone(page);
261
262 spin_lock_irq(&zone->lru_lock);
263 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
264 int file = page_is_file_cache(page);
265 int lru = page_lru_base_type(page);
266 del_page_from_lru_list(zone, page, lru);
267
268 SetPageActive(page);
269 lru += LRU_ACTIVE;
270 add_page_to_lru_list(zone, page, lru);
271 __count_vm_event(PGACTIVATE);
272
273 update_page_reclaim_stat(zone, page, file, 1);
274 }
275 spin_unlock_irq(&zone->lru_lock);
276}
277
278
279
280
281
282
283
284
285void mark_page_accessed(struct page *page)
286{
287 if (!PageActive(page) && !PageUnevictable(page) &&
288 PageReferenced(page) && PageLRU(page)) {
289 activate_page(page);
290 ClearPageReferenced(page);
291 } else if (!PageReferenced(page)) {
292 SetPageReferenced(page);
293 }
294}
295
296EXPORT_SYMBOL(mark_page_accessed);
297
298void __lru_cache_add(struct page *page, enum lru_list lru)
299{
300 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
301
302 page_cache_get(page);
303 if (!pagevec_add(pvec, page))
304 ____pagevec_lru_add(pvec, lru);
305 put_cpu_var(lru_add_pvecs);
306}
307EXPORT_SYMBOL(__lru_cache_add);
308
309
310
311
312
313
314void lru_cache_add_lru(struct page *page, enum lru_list lru)
315{
316 if (PageActive(page)) {
317 VM_BUG_ON(PageUnevictable(page));
318 ClearPageActive(page);
319 } else if (PageUnevictable(page)) {
320 VM_BUG_ON(PageActive(page));
321 ClearPageUnevictable(page);
322 }
323
324 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
325 __lru_cache_add(page, lru);
326}
327
328
329
330
331
332
333
334
335
336
337
338void add_page_to_unevictable_list(struct page *page)
339{
340 struct zone *zone = page_zone(page);
341
342 spin_lock_irq(&zone->lru_lock);
343 SetPageUnevictable(page);
344 SetPageLRU(page);
345 add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
346 spin_unlock_irq(&zone->lru_lock);
347}
348
349
350
351
352
353
354static void drain_cpu_pagevecs(int cpu)
355{
356 struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
357 struct pagevec *pvec;
358 int lru;
359
360 for_each_lru(lru) {
361 pvec = &pvecs[lru - LRU_BASE];
362 if (pagevec_count(pvec))
363 ____pagevec_lru_add(pvec, lru);
364 }
365
366 pvec = &per_cpu(lru_rotate_pvecs, cpu);
367 if (pagevec_count(pvec)) {
368 unsigned long flags;
369
370
371 local_irq_save(flags);
372 pagevec_move_tail(pvec);
373 local_irq_restore(flags);
374 }
375}
376
377void lru_add_drain(void)
378{
379 drain_cpu_pagevecs(get_cpu());
380 put_cpu();
381}
382
383static void lru_add_drain_per_cpu(struct work_struct *dummy)
384{
385 lru_add_drain();
386}
387
388
389
390
391int lru_add_drain_all(void)
392{
393 return schedule_on_each_cpu(lru_add_drain_per_cpu);
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409void release_pages(struct page **pages, int nr, int cold)
410{
411 int i;
412 struct pagevec pages_to_free;
413 struct zone *zone = NULL;
414 unsigned long uninitialized_var(flags);
415
416 pagevec_init(&pages_to_free, cold);
417 for (i = 0; i < nr; i++) {
418 struct page *page = pages[i];
419
420 if (unlikely(PageCompound(page))) {
421 if (zone) {
422 spin_unlock_irqrestore(&zone->lru_lock, flags);
423 zone = NULL;
424 }
425 put_compound_page(page);
426 continue;
427 }
428
429 if (!put_page_testzero(page))
430 continue;
431
432 if (PageLRU(page)) {
433 struct zone *pagezone = page_zone(page);
434
435 if (pagezone != zone) {
436 if (zone)
437 spin_unlock_irqrestore(&zone->lru_lock,
438 flags);
439 zone = pagezone;
440 spin_lock_irqsave(&zone->lru_lock, flags);
441 }
442 VM_BUG_ON(!PageLRU(page));
443 __ClearPageLRU(page);
444 del_page_from_lru(zone, page);
445 }
446
447 if (!pagevec_add(&pages_to_free, page)) {
448 if (zone) {
449 spin_unlock_irqrestore(&zone->lru_lock, flags);
450 zone = NULL;
451 }
452 __pagevec_free(&pages_to_free);
453 pagevec_reinit(&pages_to_free);
454 }
455 }
456 if (zone)
457 spin_unlock_irqrestore(&zone->lru_lock, flags);
458
459 pagevec_free(&pages_to_free);
460}
461EXPORT_SYMBOL(release_pages);
462
463
464
465
466
467
468
469
470
471
472
473void __pagevec_release(struct pagevec *pvec)
474{
475 lru_add_drain();
476 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
477 pagevec_reinit(pvec);
478}
479
480EXPORT_SYMBOL(__pagevec_release);
481
482
483void lru_add_page_tail(struct zone* zone,
484 struct page *page, struct page *page_tail)
485{
486 int active;
487 enum lru_list lru;
488 const int file = 0;
489 struct list_head *head;
490
491 VM_BUG_ON(!PageHead(page));
492 VM_BUG_ON(PageCompound(page_tail));
493 VM_BUG_ON(PageLRU(page_tail));
494 VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
495
496 SetPageLRU(page_tail);
497
498 if (page_evictable(page_tail, NULL)) {
499 if (PageActive(page)) {
500 SetPageActive(page_tail);
501 active = 1;
502 lru = LRU_ACTIVE_ANON;
503 } else {
504 active = 0;
505 lru = LRU_INACTIVE_ANON;
506 }
507 update_page_reclaim_stat(zone, page_tail, file, active);
508 if (likely(PageLRU(page)))
509 head = page->lru.prev;
510 else
511 head = &zone->lru[lru].list;
512 __add_page_to_lru_list(zone, page_tail, lru, head);
513 } else {
514 SetPageUnevictable(page_tail);
515 add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
516 }
517}
518
519
520
521
522
523void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
524{
525 int i;
526 struct zone *zone = NULL;
527
528 VM_BUG_ON(is_unevictable_lru(lru));
529
530 for (i = 0; i < pagevec_count(pvec); i++) {
531 struct page *page = pvec->pages[i];
532 struct zone *pagezone = page_zone(page);
533 int file;
534 int active;
535
536 if (pagezone != zone) {
537 if (zone)
538 spin_unlock_irq(&zone->lru_lock);
539 zone = pagezone;
540 spin_lock_irq(&zone->lru_lock);
541 }
542 VM_BUG_ON(PageActive(page));
543 VM_BUG_ON(PageUnevictable(page));
544 VM_BUG_ON(PageLRU(page));
545 SetPageLRU(page);
546 active = is_active_lru(lru);
547 file = is_file_lru(lru);
548 if (active)
549 SetPageActive(page);
550 update_page_reclaim_stat(zone, page, file, active);
551 add_page_to_lru_list(zone, page, lru);
552 }
553 if (zone)
554 spin_unlock_irq(&zone->lru_lock);
555 release_pages(pvec->pages, pvec->nr, pvec->cold);
556 pagevec_reinit(pvec);
557}
558
559EXPORT_SYMBOL(____pagevec_lru_add);
560
561
562
563
564void pagevec_strip(struct pagevec *pvec)
565{
566 int i;
567
568 for (i = 0; i < pagevec_count(pvec); i++) {
569 struct page *page = pvec->pages[i];
570
571 if (page_has_private(page) && trylock_page(page)) {
572 if (page_has_private(page))
573 try_to_release_page(page, 0);
574 unlock_page(page);
575 }
576 }
577}
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
596 pgoff_t start, unsigned nr_pages)
597{
598 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
599 return pagevec_count(pvec);
600}
601
602EXPORT_SYMBOL(pagevec_lookup);
603
604unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
605 pgoff_t *index, int tag, unsigned nr_pages)
606{
607 pvec->nr = find_get_pages_tag(mapping, index, tag,
608 nr_pages, pvec->pages);
609 return pagevec_count(pvec);
610}
611
612EXPORT_SYMBOL(pagevec_lookup_tag);
613
614
615
616
617void __init swap_setup(void)
618{
619 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
620
621#ifdef CONFIG_SWAP
622 bdi_init(swapper_space.backing_dev_info);
623#endif
624
625
626 if (megs < 16)
627 page_cluster = 2;
628 else
629 page_cluster = 3;
630
631
632
633
634}
635