1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/kmemcheck.h>
27#include <linux/module.h>
28#include <linux/suspend.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/slab.h>
32#include <linux/oom.h>
33#include <linux/notifier.h>
34#include <linux/topology.h>
35#include <linux/sysctl.h>
36#include <linux/cpu.h>
37#include <linux/cpuset.h>
38#include <linux/memory_hotplug.h>
39#include <linux/nodemask.h>
40#include <linux/vmalloc.h>
41#include <linux/mempolicy.h>
42#include <linux/stop_machine.h>
43#include <linux/sort.h>
44#include <linux/pfn.h>
45#include <linux/backing-dev.h>
46#include <linux/fault-inject.h>
47#include <linux/page-isolation.h>
48#include <linux/page_cgroup.h>
49#include <linux/debugobjects.h>
50#include <linux/kmemleak.h>
51#include <trace/events/kmem.h>
52
53#include <asm/tlbflush.h>
54#include <asm/div64.h>
55#include "internal.h"
56
57
58
59
60nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
61 [N_POSSIBLE] = NODE_MASK_ALL,
62 [N_ONLINE] = { { [0] = 1UL } },
63#ifndef CONFIG_NUMA
64 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
65#ifdef CONFIG_HIGHMEM
66 [N_HIGH_MEMORY] = { { [0] = 1UL } },
67#endif
68 [N_CPU] = { { [0] = 1UL } },
69#endif
70};
71EXPORT_SYMBOL(node_states);
72
73unsigned long totalram_pages __read_mostly;
74unsigned long totalreserve_pages __read_mostly;
75int percpu_pagelist_fraction;
76gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
77
78#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
79int pageblock_order __read_mostly;
80#endif
81
82static void __free_pages_ok(struct page *page, unsigned int order);
83
84
85
86
87
88
89
90
91
92
93
94
95int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
96#ifdef CONFIG_ZONE_DMA
97 256,
98#endif
99#ifdef CONFIG_ZONE_DMA32
100 256,
101#endif
102#ifdef CONFIG_HIGHMEM
103 32,
104#endif
105 32,
106};
107
108EXPORT_SYMBOL(totalram_pages);
109
110static char * const zone_names[MAX_NR_ZONES] = {
111#ifdef CONFIG_ZONE_DMA
112 "DMA",
113#endif
114#ifdef CONFIG_ZONE_DMA32
115 "DMA32",
116#endif
117 "Normal",
118#ifdef CONFIG_HIGHMEM
119 "HighMem",
120#endif
121 "Movable",
122};
123
124int min_free_kbytes = 1024;
125
126static unsigned long __meminitdata nr_kernel_pages;
127static unsigned long __meminitdata nr_all_pages;
128static unsigned long __meminitdata dma_reserve;
129
130#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
131
132
133
134
135
136
137
138 #ifdef CONFIG_MAX_ACTIVE_REGIONS
139
140 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
141 #else
142 #if MAX_NUMNODES >= 32
143
144 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
145 #else
146
147 #define MAX_ACTIVE_REGIONS 256
148 #endif
149 #endif
150
151 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
152 static int __meminitdata nr_nodemap_entries;
153 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
154 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
155 static unsigned long __initdata required_kernelcore;
156 static unsigned long __initdata required_movablecore;
157 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
158
159
160 int movable_zone;
161 EXPORT_SYMBOL(movable_zone);
162#endif
163
164#if MAX_NUMNODES > 1
165int nr_node_ids __read_mostly = MAX_NUMNODES;
166int nr_online_nodes __read_mostly = 1;
167EXPORT_SYMBOL(nr_node_ids);
168EXPORT_SYMBOL(nr_online_nodes);
169#endif
170
171int page_group_by_mobility_disabled __read_mostly;
172
173static void set_pageblock_migratetype(struct page *page, int migratetype)
174{
175
176 if (unlikely(page_group_by_mobility_disabled))
177 migratetype = MIGRATE_UNMOVABLE;
178
179 set_pageblock_flags_group(page, (unsigned long)migratetype,
180 PB_migrate, PB_migrate_end);
181}
182
183bool oom_killer_disabled __read_mostly;
184
185#ifdef CONFIG_DEBUG_VM
186static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
187{
188 int ret = 0;
189 unsigned seq;
190 unsigned long pfn = page_to_pfn(page);
191
192 do {
193 seq = zone_span_seqbegin(zone);
194 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
195 ret = 1;
196 else if (pfn < zone->zone_start_pfn)
197 ret = 1;
198 } while (zone_span_seqretry(zone, seq));
199
200 return ret;
201}
202
203static int page_is_consistent(struct zone *zone, struct page *page)
204{
205 if (!pfn_valid_within(page_to_pfn(page)))
206 return 0;
207 if (zone != page_zone(page))
208 return 0;
209
210 return 1;
211}
212
213
214
215static int bad_range(struct zone *zone, struct page *page)
216{
217 if (page_outside_zone_boundaries(zone, page))
218 return 1;
219 if (!page_is_consistent(zone, page))
220 return 1;
221
222 return 0;
223}
224#else
225static inline int bad_range(struct zone *zone, struct page *page)
226{
227 return 0;
228}
229#endif
230
231static void bad_page(struct page *page)
232{
233 static unsigned long resume;
234 static unsigned long nr_shown;
235 static unsigned long nr_unshown;
236
237
238 if (PageHWPoison(page)) {
239 __ClearPageBuddy(page);
240 return;
241 }
242
243
244
245
246
247 if (nr_shown == 60) {
248 if (time_before(jiffies, resume)) {
249 nr_unshown++;
250 goto out;
251 }
252 if (nr_unshown) {
253 printk(KERN_ALERT
254 "BUG: Bad page state: %lu messages suppressed\n",
255 nr_unshown);
256 nr_unshown = 0;
257 }
258 nr_shown = 0;
259 }
260 if (nr_shown++ == 0)
261 resume = jiffies + 60 * HZ;
262
263 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
264 current->comm, page_to_pfn(page));
265 printk(KERN_ALERT
266 "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
267 page, (void *)page->flags, page_count(page),
268 page_mapcount(page), page->mapping, page->index);
269
270 dump_stack();
271out:
272
273 __ClearPageBuddy(page);
274 add_taint(TAINT_BAD_PAGE);
275}
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292static void free_compound_page(struct page *page)
293{
294 __free_pages_ok(page, compound_order(page));
295}
296
297void prep_compound_page(struct page *page, unsigned long order)
298{
299 int i;
300 int nr_pages = 1 << order;
301
302 set_compound_page_dtor(page, free_compound_page);
303 set_compound_order(page, order);
304 __SetPageHead(page);
305 for (i = 1; i < nr_pages; i++) {
306 struct page *p = page + i;
307
308 __SetPageTail(p);
309 p->first_page = page;
310 }
311}
312
313static int destroy_compound_page(struct page *page, unsigned long order)
314{
315 int i;
316 int nr_pages = 1 << order;
317 int bad = 0;
318
319 if (unlikely(compound_order(page) != order) ||
320 unlikely(!PageHead(page))) {
321 bad_page(page);
322 bad++;
323 }
324
325 __ClearPageHead(page);
326
327 for (i = 1; i < nr_pages; i++) {
328 struct page *p = page + i;
329
330 if (unlikely(!PageTail(p) || (p->first_page != page))) {
331 bad_page(page);
332 bad++;
333 }
334 __ClearPageTail(p);
335 }
336
337 return bad;
338}
339
340static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
341{
342 int i;
343
344
345
346
347
348 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
349 for (i = 0; i < (1 << order); i++)
350 clear_highpage(page + i);
351}
352
353static inline void set_page_order(struct page *page, int order)
354{
355 set_page_private(page, order);
356 __SetPageBuddy(page);
357}
358
359static inline void rmv_page_order(struct page *page)
360{
361 __ClearPageBuddy(page);
362 set_page_private(page, 0);
363}
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382static inline struct page *
383__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
384{
385 unsigned long buddy_idx = page_idx ^ (1 << order);
386
387 return page + (buddy_idx - page_idx);
388}
389
390static inline unsigned long
391__find_combined_index(unsigned long page_idx, unsigned int order)
392{
393 return (page_idx & ~(1 << order));
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409static inline int page_is_buddy(struct page *page, struct page *buddy,
410 int order)
411{
412 if (!pfn_valid_within(page_to_pfn(buddy)))
413 return 0;
414
415 if (page_zone_id(page) != page_zone_id(buddy))
416 return 0;
417
418 if (PageBuddy(buddy) && page_order(buddy) == order) {
419 VM_BUG_ON(page_count(buddy) != 0);
420 return 1;
421 }
422 return 0;
423}
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449static inline void __free_one_page(struct page *page,
450 struct zone *zone, unsigned int order,
451 int migratetype)
452{
453 unsigned long page_idx;
454
455 if (unlikely(PageCompound(page)))
456 if (unlikely(destroy_compound_page(page, order)))
457 return;
458
459 VM_BUG_ON(migratetype == -1);
460
461 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
462
463 VM_BUG_ON(page_idx & ((1 << order) - 1));
464 VM_BUG_ON(bad_range(zone, page));
465
466 while (order < MAX_ORDER-1) {
467 unsigned long combined_idx;
468 struct page *buddy;
469
470 buddy = __page_find_buddy(page, page_idx, order);
471 if (!page_is_buddy(page, buddy, order))
472 break;
473
474
475 list_del(&buddy->lru);
476 zone->free_area[order].nr_free--;
477 rmv_page_order(buddy);
478 combined_idx = __find_combined_index(page_idx, order);
479 page = page + (combined_idx - page_idx);
480 page_idx = combined_idx;
481 order++;
482 }
483 set_page_order(page, order);
484 list_add(&page->lru,
485 &zone->free_area[order].free_list[migratetype]);
486 zone->free_area[order].nr_free++;
487}
488
489#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
490
491
492
493
494
495static inline void free_page_mlock(struct page *page)
496{
497 __dec_zone_page_state(page, NR_MLOCK);
498 __count_vm_event(UNEVICTABLE_MLOCKFREED);
499}
500#else
501static void free_page_mlock(struct page *page) { }
502#endif
503
504static inline int free_pages_check(struct page *page)
505{
506 if (unlikely(page_mapcount(page) |
507 (page->mapping != NULL) |
508 (atomic_read(&page->_count) != 0) |
509 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
510 bad_page(page);
511 return 1;
512 }
513 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
514 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
515 return 0;
516}
517
518
519
520
521
522
523
524
525
526
527
528
529static void free_pcppages_bulk(struct zone *zone, int count,
530 struct per_cpu_pages *pcp)
531{
532 int migratetype = 0;
533 int batch_free = 0;
534
535 spin_lock(&zone->lock);
536 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
537 zone->pages_scanned = 0;
538
539 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
540 while (count) {
541 struct page *page;
542 struct list_head *list;
543
544
545
546
547
548
549
550
551 do {
552 batch_free++;
553 if (++migratetype == MIGRATE_PCPTYPES)
554 migratetype = 0;
555 list = &pcp->lists[migratetype];
556 } while (list_empty(list));
557
558 do {
559 page = list_entry(list->prev, struct page, lru);
560
561 list_del(&page->lru);
562 __free_one_page(page, zone, 0, migratetype);
563 trace_mm_page_pcpu_drain(page, 0, migratetype);
564 } while (--count && --batch_free && !list_empty(list));
565 }
566 spin_unlock(&zone->lock);
567}
568
569static void free_one_page(struct zone *zone, struct page *page, int order,
570 int migratetype)
571{
572 spin_lock(&zone->lock);
573 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
574 zone->pages_scanned = 0;
575
576 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
577 __free_one_page(page, zone, order, migratetype);
578 spin_unlock(&zone->lock);
579}
580
581static void __free_pages_ok(struct page *page, unsigned int order)
582{
583 unsigned long flags;
584 int i;
585 int bad = 0;
586 int wasMlocked = __TestClearPageMlocked(page);
587
588 kmemcheck_free_shadow(page, order);
589
590 for (i = 0 ; i < (1 << order) ; ++i)
591 bad += free_pages_check(page + i);
592 if (bad)
593 return;
594
595 if (!PageHighMem(page)) {
596 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
597 debug_check_no_obj_freed(page_address(page),
598 PAGE_SIZE << order);
599 }
600 arch_free_page(page, order);
601 kernel_map_pages(page, 1 << order, 0);
602
603 local_irq_save(flags);
604 if (unlikely(wasMlocked))
605 free_page_mlock(page);
606 __count_vm_events(PGFREE, 1 << order);
607 free_one_page(page_zone(page), page, order,
608 get_pageblock_migratetype(page));
609 local_irq_restore(flags);
610}
611
612
613
614
615void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
616{
617 if (order == 0) {
618 __ClearPageReserved(page);
619 set_page_count(page, 0);
620 set_page_refcounted(page);
621 __free_page(page);
622 } else {
623 int loop;
624
625 prefetchw(page);
626 for (loop = 0; loop < BITS_PER_LONG; loop++) {
627 struct page *p = &page[loop];
628
629 if (loop + 1 < BITS_PER_LONG)
630 prefetchw(p + 1);
631 __ClearPageReserved(p);
632 set_page_count(p, 0);
633 }
634
635 set_page_refcounted(page);
636 __free_pages(page, order);
637 }
638}
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655static inline void expand(struct zone *zone, struct page *page,
656 int low, int high, struct free_area *area,
657 int migratetype)
658{
659 unsigned long size = 1 << high;
660
661 while (high > low) {
662 area--;
663 high--;
664 size >>= 1;
665 VM_BUG_ON(bad_range(zone, &page[size]));
666 list_add(&page[size].lru, &area->free_list[migratetype]);
667 area->nr_free++;
668 set_page_order(&page[size], high);
669 }
670}
671
672
673
674
675static inline int check_new_page(struct page *page)
676{
677 if (unlikely(page_mapcount(page) |
678 (page->mapping != NULL) |
679 (atomic_read(&page->_count) != 0) |
680 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
681 bad_page(page);
682 return 1;
683 }
684 return 0;
685}
686
687static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
688{
689 int i;
690
691 for (i = 0; i < (1 << order); i++) {
692 struct page *p = page + i;
693 if (unlikely(check_new_page(p)))
694 return 1;
695 }
696
697 set_page_private(page, 0);
698 set_page_refcounted(page);
699
700 arch_alloc_page(page, order);
701 kernel_map_pages(page, 1 << order, 1);
702
703 if (gfp_flags & __GFP_ZERO)
704 prep_zero_page(page, order, gfp_flags);
705
706 if (order && (gfp_flags & __GFP_COMP))
707 prep_compound_page(page, order);
708
709 return 0;
710}
711
712
713
714
715
716static inline
717struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
718 int migratetype)
719{
720 unsigned int current_order;
721 struct free_area * area;
722 struct page *page;
723
724
725 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
726 area = &(zone->free_area[current_order]);
727 if (list_empty(&area->free_list[migratetype]))
728 continue;
729
730 page = list_entry(area->free_list[migratetype].next,
731 struct page, lru);
732 list_del(&page->lru);
733 rmv_page_order(page);
734 area->nr_free--;
735 expand(zone, page, order, current_order, area, migratetype);
736 return page;
737 }
738
739 return NULL;
740}
741
742
743
744
745
746
747static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
748 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
749 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
750 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
751 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE },
752};
753
754
755
756
757
758
759static int move_freepages(struct zone *zone,
760 struct page *start_page, struct page *end_page,
761 int migratetype)
762{
763 struct page *page;
764 unsigned long order;
765 int pages_moved = 0;
766
767#ifndef CONFIG_HOLES_IN_ZONE
768
769
770
771
772
773
774
775 BUG_ON(page_zone(start_page) != page_zone(end_page));
776#endif
777
778 for (page = start_page; page <= end_page;) {
779
780 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
781
782 if (!pfn_valid_within(page_to_pfn(page))) {
783 page++;
784 continue;
785 }
786
787 if (!PageBuddy(page)) {
788 page++;
789 continue;
790 }
791
792 order = page_order(page);
793 list_del(&page->lru);
794 list_add(&page->lru,
795 &zone->free_area[order].free_list[migratetype]);
796 page += 1 << order;
797 pages_moved += 1 << order;
798 }
799
800 return pages_moved;
801}
802
803static int move_freepages_block(struct zone *zone, struct page *page,
804 int migratetype)
805{
806 unsigned long start_pfn, end_pfn;
807 struct page *start_page, *end_page;
808
809 start_pfn = page_to_pfn(page);
810 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
811 start_page = pfn_to_page(start_pfn);
812 end_page = start_page + pageblock_nr_pages - 1;
813 end_pfn = start_pfn + pageblock_nr_pages - 1;
814
815
816 if (start_pfn < zone->zone_start_pfn)
817 start_page = page;
818 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
819 return 0;
820
821 return move_freepages(zone, start_page, end_page, migratetype);
822}
823
824static void change_pageblock_range(struct page *pageblock_page,
825 int start_order, int migratetype)
826{
827 int nr_pageblocks = 1 << (start_order - pageblock_order);
828
829 while (nr_pageblocks--) {
830 set_pageblock_migratetype(pageblock_page, migratetype);
831 pageblock_page += pageblock_nr_pages;
832 }
833}
834
835
836static inline struct page *
837__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
838{
839 struct free_area * area;
840 int current_order;
841 struct page *page;
842 int migratetype, i;
843
844
845 for (current_order = MAX_ORDER-1; current_order >= order;
846 --current_order) {
847 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
848 migratetype = fallbacks[start_migratetype][i];
849
850
851 if (migratetype == MIGRATE_RESERVE)
852 continue;
853
854 area = &(zone->free_area[current_order]);
855 if (list_empty(&area->free_list[migratetype]))
856 continue;
857
858 page = list_entry(area->free_list[migratetype].next,
859 struct page, lru);
860 area->nr_free--;
861
862
863
864
865
866
867
868 if (unlikely(current_order >= (pageblock_order >> 1)) ||
869 start_migratetype == MIGRATE_RECLAIMABLE ||
870 page_group_by_mobility_disabled) {
871 unsigned long pages;
872 pages = move_freepages_block(zone, page,
873 start_migratetype);
874
875
876 if (pages >= (1 << (pageblock_order-1)) ||
877 page_group_by_mobility_disabled)
878 set_pageblock_migratetype(page,
879 start_migratetype);
880
881 migratetype = start_migratetype;
882 }
883
884
885 list_del(&page->lru);
886 rmv_page_order(page);
887
888
889 if (current_order >= pageblock_order)
890 change_pageblock_range(page, current_order,
891 start_migratetype);
892
893 expand(zone, page, order, current_order, area, migratetype);
894
895 trace_mm_page_alloc_extfrag(page, order, current_order,
896 start_migratetype, migratetype);
897
898 return page;
899 }
900 }
901
902 return NULL;
903}
904
905
906
907
908
909static struct page *__rmqueue(struct zone *zone, unsigned int order,
910 int migratetype)
911{
912 struct page *page;
913
914retry_reserve:
915 page = __rmqueue_smallest(zone, order, migratetype);
916
917 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
918 page = __rmqueue_fallback(zone, order, migratetype);
919
920
921
922
923
924
925 if (!page) {
926 migratetype = MIGRATE_RESERVE;
927 goto retry_reserve;
928 }
929 }
930
931 trace_mm_page_alloc_zone_locked(page, order, migratetype);
932 return page;
933}
934
935
936
937
938
939
940static int rmqueue_bulk(struct zone *zone, unsigned int order,
941 unsigned long count, struct list_head *list,
942 int migratetype, int cold)
943{
944 int i;
945
946 spin_lock(&zone->lock);
947 for (i = 0; i < count; ++i) {
948 struct page *page = __rmqueue(zone, order, migratetype);
949 if (unlikely(page == NULL))
950 break;
951
952
953
954
955
956
957
958
959
960
961 if (likely(cold == 0))
962 list_add(&page->lru, list);
963 else
964 list_add_tail(&page->lru, list);
965 set_page_private(page, migratetype);
966 list = &page->lru;
967 }
968 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
969 spin_unlock(&zone->lock);
970 return i;
971}
972
973#ifdef CONFIG_NUMA
974
975
976
977
978
979
980
981
982void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
983{
984 unsigned long flags;
985 int to_drain;
986
987 local_irq_save(flags);
988 if (pcp->count >= pcp->batch)
989 to_drain = pcp->batch;
990 else
991 to_drain = pcp->count;
992 free_pcppages_bulk(zone, to_drain, pcp);
993 pcp->count -= to_drain;
994 local_irq_restore(flags);
995}
996#endif
997
998
999
1000
1001
1002
1003
1004
1005static void drain_pages(unsigned int cpu)
1006{
1007 unsigned long flags;
1008 struct zone *zone;
1009
1010 for_each_populated_zone(zone) {
1011 struct per_cpu_pageset *pset;
1012 struct per_cpu_pages *pcp;
1013
1014 pset = zone_pcp(zone, cpu);
1015
1016 pcp = &pset->pcp;
1017 local_irq_save(flags);
1018 free_pcppages_bulk(zone, pcp->count, pcp);
1019 pcp->count = 0;
1020 local_irq_restore(flags);
1021 }
1022}
1023
1024
1025
1026
1027void drain_local_pages(void *arg)
1028{
1029 drain_pages(smp_processor_id());
1030}
1031
1032
1033
1034
1035void drain_all_pages(void)
1036{
1037 on_each_cpu(drain_local_pages, NULL, 1);
1038}
1039
1040#ifdef CONFIG_HIBERNATION
1041
1042void mark_free_pages(struct zone *zone)
1043{
1044 unsigned long pfn, max_zone_pfn;
1045 unsigned long flags;
1046 int order, t;
1047 struct list_head *curr;
1048
1049 if (!zone->spanned_pages)
1050 return;
1051
1052 spin_lock_irqsave(&zone->lock, flags);
1053
1054 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1055 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1056 if (pfn_valid(pfn)) {
1057 struct page *page = pfn_to_page(pfn);
1058
1059 if (!swsusp_page_is_forbidden(page))
1060 swsusp_unset_page_free(page);
1061 }
1062
1063 for_each_migratetype_order(order, t) {
1064 list_for_each(curr, &zone->free_area[order].free_list[t]) {
1065 unsigned long i;
1066
1067 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1068 for (i = 0; i < (1UL << order); i++)
1069 swsusp_set_page_free(pfn_to_page(pfn + i));
1070 }
1071 }
1072 spin_unlock_irqrestore(&zone->lock, flags);
1073}
1074#endif
1075
1076
1077
1078
1079static void free_hot_cold_page(struct page *page, int cold)
1080{
1081 struct zone *zone = page_zone(page);
1082 struct per_cpu_pages *pcp;
1083 unsigned long flags;
1084 int migratetype;
1085 int wasMlocked = __TestClearPageMlocked(page);
1086
1087 kmemcheck_free_shadow(page, 0);
1088
1089 if (PageAnon(page))
1090 page->mapping = NULL;
1091 if (free_pages_check(page))
1092 return;
1093
1094 if (!PageHighMem(page)) {
1095 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1096 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1097 }
1098 arch_free_page(page, 0);
1099 kernel_map_pages(page, 1, 0);
1100
1101 pcp = &zone_pcp(zone, get_cpu())->pcp;
1102 migratetype = get_pageblock_migratetype(page);
1103 set_page_private(page, migratetype);
1104 local_irq_save(flags);
1105 if (unlikely(wasMlocked))
1106 free_page_mlock(page);
1107 __count_vm_event(PGFREE);
1108
1109
1110
1111
1112
1113
1114
1115
1116 if (migratetype >= MIGRATE_PCPTYPES) {
1117 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1118 free_one_page(zone, page, 0, migratetype);
1119 goto out;
1120 }
1121 migratetype = MIGRATE_MOVABLE;
1122 }
1123
1124 if (cold)
1125 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1126 else
1127 list_add(&page->lru, &pcp->lists[migratetype]);
1128 pcp->count++;
1129 if (pcp->count >= pcp->high) {
1130 free_pcppages_bulk(zone, pcp->batch, pcp);
1131 pcp->count -= pcp->batch;
1132 }
1133
1134out:
1135 local_irq_restore(flags);
1136 put_cpu();
1137}
1138
1139void free_hot_page(struct page *page)
1140{
1141 trace_mm_page_free_direct(page, 0);
1142 free_hot_cold_page(page, 0);
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153void split_page(struct page *page, unsigned int order)
1154{
1155 int i;
1156
1157 VM_BUG_ON(PageCompound(page));
1158 VM_BUG_ON(!page_count(page));
1159
1160#ifdef CONFIG_KMEMCHECK
1161
1162
1163
1164
1165 if (kmemcheck_page_is_tracked(page))
1166 split_page(virt_to_page(page[0].shadow), order);
1167#endif
1168
1169 for (i = 1; i < (1 << order); i++)
1170 set_page_refcounted(page + i);
1171}
1172
1173
1174
1175
1176
1177
1178static inline
1179struct page *buffered_rmqueue(struct zone *preferred_zone,
1180 struct zone *zone, int order, gfp_t gfp_flags,
1181 int migratetype)
1182{
1183 unsigned long flags;
1184 struct page *page;
1185 int cold = !!(gfp_flags & __GFP_COLD);
1186 int cpu;
1187
1188again:
1189 cpu = get_cpu();
1190 if (likely(order == 0)) {
1191 struct per_cpu_pages *pcp;
1192 struct list_head *list;
1193
1194 pcp = &zone_pcp(zone, cpu)->pcp;
1195 list = &pcp->lists[migratetype];
1196 local_irq_save(flags);
1197 if (list_empty(list)) {
1198 pcp->count += rmqueue_bulk(zone, 0,
1199 pcp->batch, list,
1200 migratetype, cold);
1201 if (unlikely(list_empty(list)))
1202 goto failed;
1203 }
1204
1205 if (cold)
1206 page = list_entry(list->prev, struct page, lru);
1207 else
1208 page = list_entry(list->next, struct page, lru);
1209
1210 list_del(&page->lru);
1211 pcp->count--;
1212 } else {
1213 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224 WARN_ON_ONCE(order > 1);
1225 }
1226 spin_lock_irqsave(&zone->lock, flags);
1227 page = __rmqueue(zone, order, migratetype);
1228 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1229 spin_unlock(&zone->lock);
1230 if (!page)
1231 goto failed;
1232 }
1233
1234 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1235 zone_statistics(preferred_zone, zone);
1236 local_irq_restore(flags);
1237 put_cpu();
1238
1239 VM_BUG_ON(bad_range(zone, page));
1240 if (prep_new_page(page, order, gfp_flags))
1241 goto again;
1242 return page;
1243
1244failed:
1245 local_irq_restore(flags);
1246 put_cpu();
1247 return NULL;
1248}
1249
1250
1251#define ALLOC_WMARK_MIN WMARK_MIN
1252#define ALLOC_WMARK_LOW WMARK_LOW
1253#define ALLOC_WMARK_HIGH WMARK_HIGH
1254#define ALLOC_NO_WATERMARKS 0x04
1255
1256
1257#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1258
1259#define ALLOC_HARDER 0x10
1260#define ALLOC_HIGH 0x20
1261#define ALLOC_CPUSET 0x40
1262
1263#ifdef CONFIG_FAIL_PAGE_ALLOC
1264
1265static struct fail_page_alloc_attr {
1266 struct fault_attr attr;
1267
1268 u32 ignore_gfp_highmem;
1269 u32 ignore_gfp_wait;
1270 u32 min_order;
1271
1272#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1273
1274 struct dentry *ignore_gfp_highmem_file;
1275 struct dentry *ignore_gfp_wait_file;
1276 struct dentry *min_order_file;
1277
1278#endif
1279
1280} fail_page_alloc = {
1281 .attr = FAULT_ATTR_INITIALIZER,
1282 .ignore_gfp_wait = 1,
1283 .ignore_gfp_highmem = 1,
1284 .min_order = 1,
1285};
1286
1287static int __init setup_fail_page_alloc(char *str)
1288{
1289 return setup_fault_attr(&fail_page_alloc.attr, str);
1290}
1291__setup("fail_page_alloc=", setup_fail_page_alloc);
1292
1293static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1294{
1295 if (order < fail_page_alloc.min_order)
1296 return 0;
1297 if (gfp_mask & __GFP_NOFAIL)
1298 return 0;
1299 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1300 return 0;
1301 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1302 return 0;
1303
1304 return should_fail(&fail_page_alloc.attr, 1 << order);
1305}
1306
1307#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1308
1309static int __init fail_page_alloc_debugfs(void)
1310{
1311 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1312 struct dentry *dir;
1313 int err;
1314
1315 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1316 "fail_page_alloc");
1317 if (err)
1318 return err;
1319 dir = fail_page_alloc.attr.dentries.dir;
1320
1321 fail_page_alloc.ignore_gfp_wait_file =
1322 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1323 &fail_page_alloc.ignore_gfp_wait);
1324
1325 fail_page_alloc.ignore_gfp_highmem_file =
1326 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1327 &fail_page_alloc.ignore_gfp_highmem);
1328 fail_page_alloc.min_order_file =
1329 debugfs_create_u32("min-order", mode, dir,
1330 &fail_page_alloc.min_order);
1331
1332 if (!fail_page_alloc.ignore_gfp_wait_file ||
1333 !fail_page_alloc.ignore_gfp_highmem_file ||
1334 !fail_page_alloc.min_order_file) {
1335 err = -ENOMEM;
1336 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1337 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1338 debugfs_remove(fail_page_alloc.min_order_file);
1339 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1340 }
1341
1342 return err;
1343}
1344
1345late_initcall(fail_page_alloc_debugfs);
1346
1347#endif
1348
1349#else
1350
1351static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1352{
1353 return 0;
1354}
1355
1356#endif
1357
1358
1359
1360
1361
1362int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1363 int classzone_idx, int alloc_flags)
1364{
1365
1366 long min = mark;
1367 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1368 int o;
1369
1370 if (alloc_flags & ALLOC_HIGH)
1371 min -= min / 2;
1372 if (alloc_flags & ALLOC_HARDER)
1373 min -= min / 4;
1374
1375 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1376 return 0;
1377 for (o = 0; o < order; o++) {
1378
1379 free_pages -= z->free_area[o].nr_free << o;
1380
1381
1382 min >>= 1;
1383
1384 if (free_pages <= min)
1385 return 0;
1386 }
1387 return 1;
1388}
1389
1390#ifdef CONFIG_NUMA
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1414{
1415 struct zonelist_cache *zlc;
1416 nodemask_t *allowednodes;
1417
1418 zlc = zonelist->zlcache_ptr;
1419 if (!zlc)
1420 return NULL;
1421
1422 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1423 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1424 zlc->last_full_zap = jiffies;
1425 }
1426
1427 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1428 &cpuset_current_mems_allowed :
1429 &node_states[N_HIGH_MEMORY];
1430 return allowednodes;
1431}
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1456 nodemask_t *allowednodes)
1457{
1458 struct zonelist_cache *zlc;
1459 int i;
1460 int n;
1461
1462 zlc = zonelist->zlcache_ptr;
1463 if (!zlc)
1464 return 1;
1465
1466 i = z - zonelist->_zonerefs;
1467 n = zlc->z_to_n[i];
1468
1469
1470 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1471}
1472
1473
1474
1475
1476
1477
1478static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1479{
1480 struct zonelist_cache *zlc;
1481 int i;
1482
1483 zlc = zonelist->zlcache_ptr;
1484 if (!zlc)
1485 return;
1486
1487 i = z - zonelist->_zonerefs;
1488
1489 set_bit(i, zlc->fullzones);
1490}
1491
1492#else
1493
1494static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1495{
1496 return NULL;
1497}
1498
1499static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1500 nodemask_t *allowednodes)
1501{
1502 return 1;
1503}
1504
1505static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1506{
1507}
1508#endif
1509
1510
1511
1512
1513
1514static struct page *
1515get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1516 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1517 struct zone *preferred_zone, int migratetype)
1518{
1519 struct zoneref *z;
1520 struct page *page = NULL;
1521 int classzone_idx;
1522 struct zone *zone;
1523 nodemask_t *allowednodes = NULL;
1524 int zlc_active = 0;
1525 int did_zlc_setup = 0;
1526
1527 classzone_idx = zone_idx(preferred_zone);
1528zonelist_scan:
1529
1530
1531
1532
1533 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1534 high_zoneidx, nodemask) {
1535 if (NUMA_BUILD && zlc_active &&
1536 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1537 continue;
1538 if ((alloc_flags & ALLOC_CPUSET) &&
1539 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1540 goto try_next_zone;
1541
1542 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1543 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1544 unsigned long mark;
1545 int ret;
1546
1547 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1548 if (zone_watermark_ok(zone, order, mark,
1549 classzone_idx, alloc_flags))
1550 goto try_this_zone;
1551
1552 if (zone_reclaim_mode == 0)
1553 goto this_zone_full;
1554
1555 ret = zone_reclaim(zone, gfp_mask, order);
1556 switch (ret) {
1557 case ZONE_RECLAIM_NOSCAN:
1558
1559 goto try_next_zone;
1560 case ZONE_RECLAIM_FULL:
1561
1562 goto this_zone_full;
1563 default:
1564
1565 if (!zone_watermark_ok(zone, order, mark,
1566 classzone_idx, alloc_flags))
1567 goto this_zone_full;
1568 }
1569 }
1570
1571try_this_zone:
1572 page = buffered_rmqueue(preferred_zone, zone, order,
1573 gfp_mask, migratetype);
1574 if (page)
1575 break;
1576this_zone_full:
1577 if (NUMA_BUILD)
1578 zlc_mark_zone_full(zonelist, z);
1579try_next_zone:
1580 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1581
1582
1583
1584
1585 allowednodes = zlc_setup(zonelist, alloc_flags);
1586 zlc_active = 1;
1587 did_zlc_setup = 1;
1588 }
1589 }
1590
1591 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1592
1593 zlc_active = 0;
1594 goto zonelist_scan;
1595 }
1596 return page;
1597}
1598
1599static inline int
1600should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1601 unsigned long pages_reclaimed)
1602{
1603
1604 if (gfp_mask & __GFP_NORETRY)
1605 return 0;
1606
1607
1608
1609
1610
1611
1612 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1613 return 1;
1614
1615
1616
1617
1618
1619
1620
1621
1622 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1623 return 1;
1624
1625
1626
1627
1628
1629 if (gfp_mask & __GFP_NOFAIL)
1630 return 1;
1631
1632 return 0;
1633}
1634
1635static inline struct page *
1636__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1637 struct zonelist *zonelist, enum zone_type high_zoneidx,
1638 nodemask_t *nodemask, struct zone *preferred_zone,
1639 int migratetype)
1640{
1641 struct page *page;
1642
1643
1644 if (!try_set_zone_oom(zonelist, gfp_mask)) {
1645 schedule_timeout_uninterruptible(1);
1646 return NULL;
1647 }
1648
1649
1650
1651
1652
1653
1654 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1655 order, zonelist, high_zoneidx,
1656 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1657 preferred_zone, migratetype);
1658 if (page)
1659 goto out;
1660
1661
1662 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
1663 goto out;
1664
1665
1666 out_of_memory(zonelist, gfp_mask, order);
1667
1668out:
1669 clear_zonelist_oom(zonelist, gfp_mask);
1670 return page;
1671}
1672
1673
1674static inline struct page *
1675__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1676 struct zonelist *zonelist, enum zone_type high_zoneidx,
1677 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1678 int migratetype, unsigned long *did_some_progress)
1679{
1680 struct page *page = NULL;
1681 struct reclaim_state reclaim_state;
1682 struct task_struct *p = current;
1683
1684 cond_resched();
1685
1686
1687 cpuset_memory_pressure_bump();
1688 p->flags |= PF_MEMALLOC;
1689 lockdep_set_current_reclaim_state(gfp_mask);
1690 reclaim_state.reclaimed_slab = 0;
1691 p->reclaim_state = &reclaim_state;
1692
1693 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1694
1695 p->reclaim_state = NULL;
1696 lockdep_clear_current_reclaim_state();
1697 p->flags &= ~PF_MEMALLOC;
1698
1699 cond_resched();
1700
1701 if (order != 0)
1702 drain_all_pages();
1703
1704 if (likely(*did_some_progress))
1705 page = get_page_from_freelist(gfp_mask, nodemask, order,
1706 zonelist, high_zoneidx,
1707 alloc_flags, preferred_zone,
1708 migratetype);
1709 return page;
1710}
1711
1712
1713
1714
1715
1716static inline struct page *
1717__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1718 struct zonelist *zonelist, enum zone_type high_zoneidx,
1719 nodemask_t *nodemask, struct zone *preferred_zone,
1720 int migratetype)
1721{
1722 struct page *page;
1723
1724 do {
1725 page = get_page_from_freelist(gfp_mask, nodemask, order,
1726 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1727 preferred_zone, migratetype);
1728
1729 if (!page && gfp_mask & __GFP_NOFAIL)
1730 congestion_wait(BLK_RW_ASYNC, HZ/50);
1731 } while (!page && (gfp_mask & __GFP_NOFAIL));
1732
1733 return page;
1734}
1735
1736static inline
1737void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1738 enum zone_type high_zoneidx)
1739{
1740 struct zoneref *z;
1741 struct zone *zone;
1742
1743 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1744 wakeup_kswapd(zone, order);
1745}
1746
1747static inline int
1748gfp_to_alloc_flags(gfp_t gfp_mask)
1749{
1750 struct task_struct *p = current;
1751 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1752 const gfp_t wait = gfp_mask & __GFP_WAIT;
1753
1754
1755 BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1756
1757
1758
1759
1760
1761
1762
1763 alloc_flags |= (gfp_mask & __GFP_HIGH);
1764
1765 if (!wait) {
1766 alloc_flags |= ALLOC_HARDER;
1767
1768
1769
1770
1771 alloc_flags &= ~ALLOC_CPUSET;
1772 } else if (unlikely(rt_task(p)) && !in_interrupt())
1773 alloc_flags |= ALLOC_HARDER;
1774
1775 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1776 if (!in_interrupt() &&
1777 ((p->flags & PF_MEMALLOC) ||
1778 unlikely(test_thread_flag(TIF_MEMDIE))))
1779 alloc_flags |= ALLOC_NO_WATERMARKS;
1780 }
1781
1782 return alloc_flags;
1783}
1784
1785static inline struct page *
1786__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1787 struct zonelist *zonelist, enum zone_type high_zoneidx,
1788 nodemask_t *nodemask, struct zone *preferred_zone,
1789 int migratetype)
1790{
1791 const gfp_t wait = gfp_mask & __GFP_WAIT;
1792 struct page *page = NULL;
1793 int alloc_flags;
1794 unsigned long pages_reclaimed = 0;
1795 unsigned long did_some_progress;
1796 struct task_struct *p = current;
1797
1798
1799
1800
1801
1802
1803
1804 if (order >= MAX_ORDER) {
1805 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1806 return NULL;
1807 }
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1818 goto nopage;
1819
1820restart:
1821 wake_all_kswapd(order, zonelist, high_zoneidx);
1822
1823
1824
1825
1826
1827
1828 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1829
1830
1831 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1832 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1833 preferred_zone, migratetype);
1834 if (page)
1835 goto got_pg;
1836
1837rebalance:
1838
1839 if (alloc_flags & ALLOC_NO_WATERMARKS) {
1840 page = __alloc_pages_high_priority(gfp_mask, order,
1841 zonelist, high_zoneidx, nodemask,
1842 preferred_zone, migratetype);
1843 if (page)
1844 goto got_pg;
1845 }
1846
1847
1848 if (!wait)
1849 goto nopage;
1850
1851
1852 if (p->flags & PF_MEMALLOC)
1853 goto nopage;
1854
1855
1856 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1857 goto nopage;
1858
1859
1860 page = __alloc_pages_direct_reclaim(gfp_mask, order,
1861 zonelist, high_zoneidx,
1862 nodemask,
1863 alloc_flags, preferred_zone,
1864 migratetype, &did_some_progress);
1865 if (page)
1866 goto got_pg;
1867
1868
1869
1870
1871
1872 if (!did_some_progress) {
1873 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1874 if (oom_killer_disabled)
1875 goto nopage;
1876 page = __alloc_pages_may_oom(gfp_mask, order,
1877 zonelist, high_zoneidx,
1878 nodemask, preferred_zone,
1879 migratetype);
1880 if (page)
1881 goto got_pg;
1882
1883
1884
1885
1886
1887
1888
1889 if (order > PAGE_ALLOC_COSTLY_ORDER &&
1890 !(gfp_mask & __GFP_NOFAIL))
1891 goto nopage;
1892
1893 goto restart;
1894 }
1895 }
1896
1897
1898 pages_reclaimed += did_some_progress;
1899 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1900
1901 congestion_wait(BLK_RW_ASYNC, HZ/50);
1902 goto rebalance;
1903 }
1904
1905nopage:
1906 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1907 printk(KERN_WARNING "%s: page allocation failure."
1908 " order:%d, mode:0x%x\n",
1909 p->comm, order, gfp_mask);
1910 dump_stack();
1911 show_mem();
1912 }
1913 return page;
1914got_pg:
1915 if (kmemcheck_enabled)
1916 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1917 return page;
1918
1919}
1920
1921
1922
1923
1924struct page *
1925__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1926 struct zonelist *zonelist, nodemask_t *nodemask)
1927{
1928 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1929 struct zone *preferred_zone;
1930 struct page *page;
1931 int migratetype = allocflags_to_migratetype(gfp_mask);
1932
1933 gfp_mask &= gfp_allowed_mask;
1934
1935 lockdep_trace_alloc(gfp_mask);
1936
1937 might_sleep_if(gfp_mask & __GFP_WAIT);
1938
1939 if (should_fail_alloc_page(gfp_mask, order))
1940 return NULL;
1941
1942
1943
1944
1945
1946
1947 if (unlikely(!zonelist->_zonerefs->zone))
1948 return NULL;
1949
1950
1951 first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1952 if (!preferred_zone)
1953 return NULL;
1954
1955
1956 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1957 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1958 preferred_zone, migratetype);
1959 if (unlikely(!page))
1960 page = __alloc_pages_slowpath(gfp_mask, order,
1961 zonelist, high_zoneidx, nodemask,
1962 preferred_zone, migratetype);
1963
1964 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
1965 return page;
1966}
1967EXPORT_SYMBOL(__alloc_pages_nodemask);
1968
1969
1970
1971
1972unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1973{
1974 struct page *page;
1975
1976
1977
1978
1979
1980 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1981
1982 page = alloc_pages(gfp_mask, order);
1983 if (!page)
1984 return 0;
1985 return (unsigned long) page_address(page);
1986}
1987EXPORT_SYMBOL(__get_free_pages);
1988
1989unsigned long get_zeroed_page(gfp_t gfp_mask)
1990{
1991 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1992}
1993EXPORT_SYMBOL(get_zeroed_page);
1994
1995void __pagevec_free(struct pagevec *pvec)
1996{
1997 int i = pagevec_count(pvec);
1998
1999 while (--i >= 0) {
2000 trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2001 free_hot_cold_page(pvec->pages[i], pvec->cold);
2002 }
2003}
2004
2005void __free_pages(struct page *page, unsigned int order)
2006{
2007 if (put_page_testzero(page)) {
2008 trace_mm_page_free_direct(page, order);
2009 if (order == 0)
2010 free_hot_page(page);
2011 else
2012 __free_pages_ok(page, order);
2013 }
2014}
2015
2016EXPORT_SYMBOL(__free_pages);
2017
2018void free_pages(unsigned long addr, unsigned int order)
2019{
2020 if (addr != 0) {
2021 VM_BUG_ON(!virt_addr_valid((void *)addr));
2022 __free_pages(virt_to_page((void *)addr), order);
2023 }
2024}
2025
2026EXPORT_SYMBOL(free_pages);
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2042{
2043 unsigned int order = get_order(size);
2044 unsigned long addr;
2045
2046 addr = __get_free_pages(gfp_mask, order);
2047 if (addr) {
2048 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2049 unsigned long used = addr + PAGE_ALIGN(size);
2050
2051 split_page(virt_to_page((void *)addr), order);
2052 while (used < alloc_end) {
2053 free_page(used);
2054 used += PAGE_SIZE;
2055 }
2056 }
2057
2058 return (void *)addr;
2059}
2060EXPORT_SYMBOL(alloc_pages_exact);
2061
2062
2063
2064
2065
2066
2067
2068
2069void free_pages_exact(void *virt, size_t size)
2070{
2071 unsigned long addr = (unsigned long)virt;
2072 unsigned long end = addr + PAGE_ALIGN(size);
2073
2074 while (addr < end) {
2075 free_page(addr);
2076 addr += PAGE_SIZE;
2077 }
2078}
2079EXPORT_SYMBOL(free_pages_exact);
2080
2081static unsigned int nr_free_zone_pages(int offset)
2082{
2083 struct zoneref *z;
2084 struct zone *zone;
2085
2086
2087 unsigned int sum = 0;
2088
2089 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2090
2091 for_each_zone_zonelist(zone, z, zonelist, offset) {
2092 unsigned long size = zone->present_pages;
2093 unsigned long high = high_wmark_pages(zone);
2094 if (size > high)
2095 sum += size - high;
2096 }
2097
2098 return sum;
2099}
2100
2101
2102
2103
2104unsigned int nr_free_buffer_pages(void)
2105{
2106 return nr_free_zone_pages(gfp_zone(GFP_USER));
2107}
2108EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2109
2110
2111
2112
2113unsigned int nr_free_pagecache_pages(void)
2114{
2115 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2116}
2117
2118static inline void show_node(struct zone *zone)
2119{
2120 if (NUMA_BUILD)
2121 printk("Node %d ", zone_to_nid(zone));
2122}
2123
2124void si_meminfo(struct sysinfo *val)
2125{
2126 val->totalram = totalram_pages;
2127 val->sharedram = 0;
2128 val->freeram = global_page_state(NR_FREE_PAGES);
2129 val->bufferram = nr_blockdev_pages();
2130 val->totalhigh = totalhigh_pages;
2131 val->freehigh = nr_free_highpages();
2132 val->mem_unit = PAGE_SIZE;
2133}
2134
2135EXPORT_SYMBOL(si_meminfo);
2136
2137#ifdef CONFIG_NUMA
2138void si_meminfo_node(struct sysinfo *val, int nid)
2139{
2140 pg_data_t *pgdat = NODE_DATA(nid);
2141
2142 val->totalram = pgdat->node_present_pages;
2143 val->freeram = node_page_state(nid, NR_FREE_PAGES);
2144#ifdef CONFIG_HIGHMEM
2145 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2146 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2147 NR_FREE_PAGES);
2148#else
2149 val->totalhigh = 0;
2150 val->freehigh = 0;
2151#endif
2152 val->mem_unit = PAGE_SIZE;
2153}
2154#endif
2155
2156#define K(x) ((x) << (PAGE_SHIFT-10))
2157
2158
2159
2160
2161
2162
2163void show_free_areas(void)
2164{
2165 int cpu;
2166 struct zone *zone;
2167
2168 for_each_populated_zone(zone) {
2169 show_node(zone);
2170 printk("%s per-cpu:\n", zone->name);
2171
2172 for_each_online_cpu(cpu) {
2173 struct per_cpu_pageset *pageset;
2174
2175 pageset = zone_pcp(zone, cpu);
2176
2177 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2178 cpu, pageset->pcp.high,
2179 pageset->pcp.batch, pageset->pcp.count);
2180 }
2181 }
2182
2183 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2184 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2185 " unevictable:%lu"
2186 " dirty:%lu writeback:%lu unstable:%lu\n"
2187 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2188 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2189 global_page_state(NR_ACTIVE_ANON),
2190 global_page_state(NR_INACTIVE_ANON),
2191 global_page_state(NR_ISOLATED_ANON),
2192 global_page_state(NR_ACTIVE_FILE),
2193 global_page_state(NR_INACTIVE_FILE),
2194 global_page_state(NR_ISOLATED_FILE),
2195 global_page_state(NR_UNEVICTABLE),
2196 global_page_state(NR_FILE_DIRTY),
2197 global_page_state(NR_WRITEBACK),
2198 global_page_state(NR_UNSTABLE_NFS),
2199 global_page_state(NR_FREE_PAGES),
2200 global_page_state(NR_SLAB_RECLAIMABLE),
2201 global_page_state(NR_SLAB_UNRECLAIMABLE),
2202 global_page_state(NR_FILE_MAPPED),
2203 global_page_state(NR_SHMEM),
2204 global_page_state(NR_PAGETABLE),
2205 global_page_state(NR_BOUNCE));
2206
2207 for_each_populated_zone(zone) {
2208 int i;
2209
2210 show_node(zone);
2211 printk("%s"
2212 " free:%lukB"
2213 " min:%lukB"
2214 " low:%lukB"
2215 " high:%lukB"
2216 " active_anon:%lukB"
2217 " inactive_anon:%lukB"
2218 " active_file:%lukB"
2219 " inactive_file:%lukB"
2220 " unevictable:%lukB"
2221 " isolated(anon):%lukB"
2222 " isolated(file):%lukB"
2223 " present:%lukB"
2224 " mlocked:%lukB"
2225 " dirty:%lukB"
2226 " writeback:%lukB"
2227 " mapped:%lukB"
2228 " shmem:%lukB"
2229 " slab_reclaimable:%lukB"
2230 " slab_unreclaimable:%lukB"
2231 " kernel_stack:%lukB"
2232 " pagetables:%lukB"
2233 " unstable:%lukB"
2234 " bounce:%lukB"
2235 " writeback_tmp:%lukB"
2236 " pages_scanned:%lu"
2237 " all_unreclaimable? %s"
2238 "\n",
2239 zone->name,
2240 K(zone_page_state(zone, NR_FREE_PAGES)),
2241 K(min_wmark_pages(zone)),
2242 K(low_wmark_pages(zone)),
2243 K(high_wmark_pages(zone)),
2244 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2245 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2246 K(zone_page_state(zone, NR_ACTIVE_FILE)),
2247 K(zone_page_state(zone, NR_INACTIVE_FILE)),
2248 K(zone_page_state(zone, NR_UNEVICTABLE)),
2249 K(zone_page_state(zone, NR_ISOLATED_ANON)),
2250 K(zone_page_state(zone, NR_ISOLATED_FILE)),
2251 K(zone->present_pages),
2252 K(zone_page_state(zone, NR_MLOCK)),
2253 K(zone_page_state(zone, NR_FILE_DIRTY)),
2254 K(zone_page_state(zone, NR_WRITEBACK)),
2255 K(zone_page_state(zone, NR_FILE_MAPPED)),
2256 K(zone_page_state(zone, NR_SHMEM)),
2257 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2258 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2259 zone_page_state(zone, NR_KERNEL_STACK) *
2260 THREAD_SIZE / 1024,
2261 K(zone_page_state(zone, NR_PAGETABLE)),
2262 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2263 K(zone_page_state(zone, NR_BOUNCE)),
2264 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2265 zone->pages_scanned,
2266 (zone_is_all_unreclaimable(zone) ? "yes" : "no")
2267 );
2268 printk("lowmem_reserve[]:");
2269 for (i = 0; i < MAX_NR_ZONES; i++)
2270 printk(" %lu", zone->lowmem_reserve[i]);
2271 printk("\n");
2272 }
2273
2274 for_each_populated_zone(zone) {
2275 unsigned long nr[MAX_ORDER], flags, order, total = 0;
2276
2277 show_node(zone);
2278 printk("%s: ", zone->name);
2279
2280 spin_lock_irqsave(&zone->lock, flags);
2281 for (order = 0; order < MAX_ORDER; order++) {
2282 nr[order] = zone->free_area[order].nr_free;
2283 total += nr[order] << order;
2284 }
2285 spin_unlock_irqrestore(&zone->lock, flags);
2286 for (order = 0; order < MAX_ORDER; order++)
2287 printk("%lu*%lukB ", nr[order], K(1UL) << order);
2288 printk("= %lukB\n", K(total));
2289 }
2290
2291 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2292
2293 show_swap_cache_info();
2294}
2295
2296static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2297{
2298 zoneref->zone = zone;
2299 zoneref->zone_idx = zone_idx(zone);
2300}
2301
2302
2303
2304
2305
2306
2307static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2308 int nr_zones, enum zone_type zone_type)
2309{
2310 struct zone *zone;
2311
2312 BUG_ON(zone_type >= MAX_NR_ZONES);
2313 zone_type++;
2314
2315 do {
2316 zone_type--;
2317 zone = pgdat->node_zones + zone_type;
2318 if (populated_zone(zone)) {
2319 zoneref_set_zone(zone,
2320 &zonelist->_zonerefs[nr_zones++]);
2321 check_highest_zone(zone_type);
2322 }
2323
2324 } while (zone_type);
2325 return nr_zones;
2326}
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338#define ZONELIST_ORDER_DEFAULT 0
2339#define ZONELIST_ORDER_NODE 1
2340#define ZONELIST_ORDER_ZONE 2
2341
2342
2343
2344
2345static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2346static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2347
2348
2349#ifdef CONFIG_NUMA
2350
2351static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2352
2353#define NUMA_ZONELIST_ORDER_LEN 16
2354char numa_zonelist_order[16] = "default";
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364static int __parse_numa_zonelist_order(char *s)
2365{
2366 if (*s == 'd' || *s == 'D') {
2367 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2368 } else if (*s == 'n' || *s == 'N') {
2369 user_zonelist_order = ZONELIST_ORDER_NODE;
2370 } else if (*s == 'z' || *s == 'Z') {
2371 user_zonelist_order = ZONELIST_ORDER_ZONE;
2372 } else {
2373 printk(KERN_WARNING
2374 "Ignoring invalid numa_zonelist_order value: "
2375 "%s\n", s);
2376 return -EINVAL;
2377 }
2378 return 0;
2379}
2380
2381static __init int setup_numa_zonelist_order(char *s)
2382{
2383 if (s)
2384 return __parse_numa_zonelist_order(s);
2385 return 0;
2386}
2387early_param("numa_zonelist_order", setup_numa_zonelist_order);
2388
2389
2390
2391
2392int numa_zonelist_order_handler(ctl_table *table, int write,
2393 void __user *buffer, size_t *length,
2394 loff_t *ppos)
2395{
2396 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2397 int ret;
2398
2399 if (write)
2400 strncpy(saved_string, (char*)table->data,
2401 NUMA_ZONELIST_ORDER_LEN);
2402 ret = proc_dostring(table, write, buffer, length, ppos);
2403 if (ret)
2404 return ret;
2405 if (write) {
2406 int oldval = user_zonelist_order;
2407 if (__parse_numa_zonelist_order((char*)table->data)) {
2408
2409
2410
2411 strncpy((char*)table->data, saved_string,
2412 NUMA_ZONELIST_ORDER_LEN);
2413 user_zonelist_order = oldval;
2414 } else if (oldval != user_zonelist_order)
2415 build_all_zonelists();
2416 }
2417 return 0;
2418}
2419
2420
2421#define MAX_NODE_LOAD (nr_online_nodes)
2422static int node_load[MAX_NUMNODES];
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438static int find_next_best_node(int node, nodemask_t *used_node_mask)
2439{
2440 int n, val;
2441 int min_val = INT_MAX;
2442 int best_node = -1;
2443 const struct cpumask *tmp = cpumask_of_node(0);
2444
2445
2446 if (!node_isset(node, *used_node_mask)) {
2447 node_set(node, *used_node_mask);
2448 return node;
2449 }
2450
2451 for_each_node_state(n, N_HIGH_MEMORY) {
2452
2453
2454 if (node_isset(n, *used_node_mask))
2455 continue;
2456
2457
2458 val = node_distance(node, n);
2459
2460
2461 val += (n < node);
2462
2463
2464 tmp = cpumask_of_node(n);
2465 if (!cpumask_empty(tmp))
2466 val += PENALTY_FOR_NODE_WITH_CPUS;
2467
2468
2469 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2470 val += node_load[n];
2471
2472 if (val < min_val) {
2473 min_val = val;
2474 best_node = n;
2475 }
2476 }
2477
2478 if (best_node >= 0)
2479 node_set(best_node, *used_node_mask);
2480
2481 return best_node;
2482}
2483
2484
2485
2486
2487
2488
2489
2490static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2491{
2492 int j;
2493 struct zonelist *zonelist;
2494
2495 zonelist = &pgdat->node_zonelists[0];
2496 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2497 ;
2498 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2499 MAX_NR_ZONES - 1);
2500 zonelist->_zonerefs[j].zone = NULL;
2501 zonelist->_zonerefs[j].zone_idx = 0;
2502}
2503
2504
2505
2506
2507static void build_thisnode_zonelists(pg_data_t *pgdat)
2508{
2509 int j;
2510 struct zonelist *zonelist;
2511
2512 zonelist = &pgdat->node_zonelists[1];
2513 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2514 zonelist->_zonerefs[j].zone = NULL;
2515 zonelist->_zonerefs[j].zone_idx = 0;
2516}
2517
2518
2519
2520
2521
2522
2523
2524static int node_order[MAX_NUMNODES];
2525
2526static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2527{
2528 int pos, j, node;
2529 int zone_type;
2530 struct zone *z;
2531 struct zonelist *zonelist;
2532
2533 zonelist = &pgdat->node_zonelists[0];
2534 pos = 0;
2535 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2536 for (j = 0; j < nr_nodes; j++) {
2537 node = node_order[j];
2538 z = &NODE_DATA(node)->node_zones[zone_type];
2539 if (populated_zone(z)) {
2540 zoneref_set_zone(z,
2541 &zonelist->_zonerefs[pos++]);
2542 check_highest_zone(zone_type);
2543 }
2544 }
2545 }
2546 zonelist->_zonerefs[pos].zone = NULL;
2547 zonelist->_zonerefs[pos].zone_idx = 0;
2548}
2549
2550static int default_zonelist_order(void)
2551{
2552 int nid, zone_type;
2553 unsigned long low_kmem_size,total_size;
2554 struct zone *z;
2555 int average_size;
2556
2557
2558
2559
2560
2561
2562
2563 low_kmem_size = 0;
2564 total_size = 0;
2565 for_each_online_node(nid) {
2566 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2567 z = &NODE_DATA(nid)->node_zones[zone_type];
2568 if (populated_zone(z)) {
2569 if (zone_type < ZONE_NORMAL)
2570 low_kmem_size += z->present_pages;
2571 total_size += z->present_pages;
2572 }
2573 }
2574 }
2575 if (!low_kmem_size ||
2576 low_kmem_size > total_size/2)
2577 return ZONELIST_ORDER_NODE;
2578
2579
2580
2581
2582
2583 average_size = total_size /
2584 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2585 for_each_online_node(nid) {
2586 low_kmem_size = 0;
2587 total_size = 0;
2588 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2589 z = &NODE_DATA(nid)->node_zones[zone_type];
2590 if (populated_zone(z)) {
2591 if (zone_type < ZONE_NORMAL)
2592 low_kmem_size += z->present_pages;
2593 total_size += z->present_pages;
2594 }
2595 }
2596 if (low_kmem_size &&
2597 total_size > average_size &&
2598 low_kmem_size > total_size * 70/100)
2599 return ZONELIST_ORDER_NODE;
2600 }
2601 return ZONELIST_ORDER_ZONE;
2602}
2603
2604static void set_zonelist_order(void)
2605{
2606 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2607 current_zonelist_order = default_zonelist_order();
2608 else
2609 current_zonelist_order = user_zonelist_order;
2610}
2611
2612static void build_zonelists(pg_data_t *pgdat)
2613{
2614 int j, node, load;
2615 enum zone_type i;
2616 nodemask_t used_mask;
2617 int local_node, prev_node;
2618 struct zonelist *zonelist;
2619 int order = current_zonelist_order;
2620
2621
2622 for (i = 0; i < MAX_ZONELISTS; i++) {
2623 zonelist = pgdat->node_zonelists + i;
2624 zonelist->_zonerefs[0].zone = NULL;
2625 zonelist->_zonerefs[0].zone_idx = 0;
2626 }
2627
2628
2629 local_node = pgdat->node_id;
2630 load = nr_online_nodes;
2631 prev_node = local_node;
2632 nodes_clear(used_mask);
2633
2634 memset(node_order, 0, sizeof(node_order));
2635 j = 0;
2636
2637 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2638 int distance = node_distance(local_node, node);
2639
2640
2641
2642
2643
2644 if (distance > RECLAIM_DISTANCE)
2645 zone_reclaim_mode = 1;
2646
2647
2648
2649
2650
2651
2652 if (distance != node_distance(local_node, prev_node))
2653 node_load[node] = load;
2654
2655 prev_node = node;
2656 load--;
2657 if (order == ZONELIST_ORDER_NODE)
2658 build_zonelists_in_node_order(pgdat, node);
2659 else
2660 node_order[j++] = node;
2661 }
2662
2663 if (order == ZONELIST_ORDER_ZONE) {
2664
2665 build_zonelists_in_zone_order(pgdat, j);
2666 }
2667
2668 build_thisnode_zonelists(pgdat);
2669}
2670
2671
2672static void build_zonelist_cache(pg_data_t *pgdat)
2673{
2674 struct zonelist *zonelist;
2675 struct zonelist_cache *zlc;
2676 struct zoneref *z;
2677
2678 zonelist = &pgdat->node_zonelists[0];
2679 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2680 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2681 for (z = zonelist->_zonerefs; z->zone; z++)
2682 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2683}
2684
2685
2686#else
2687
2688static void set_zonelist_order(void)
2689{
2690 current_zonelist_order = ZONELIST_ORDER_ZONE;
2691}
2692
2693static void build_zonelists(pg_data_t *pgdat)
2694{
2695 int node, local_node;
2696 enum zone_type j;
2697 struct zonelist *zonelist;
2698
2699 local_node = pgdat->node_id;
2700
2701 zonelist = &pgdat->node_zonelists[0];
2702 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2713 if (!node_online(node))
2714 continue;
2715 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2716 MAX_NR_ZONES - 1);
2717 }
2718 for (node = 0; node < local_node; node++) {
2719 if (!node_online(node))
2720 continue;
2721 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2722 MAX_NR_ZONES - 1);
2723 }
2724
2725 zonelist->_zonerefs[j].zone = NULL;
2726 zonelist->_zonerefs[j].zone_idx = 0;
2727}
2728
2729
2730static void build_zonelist_cache(pg_data_t *pgdat)
2731{
2732 pgdat->node_zonelists[0].zlcache_ptr = NULL;
2733}
2734
2735#endif
2736
2737
2738static int __build_all_zonelists(void *dummy)
2739{
2740 int nid;
2741
2742#ifdef CONFIG_NUMA
2743 memset(node_load, 0, sizeof(node_load));
2744#endif
2745 for_each_online_node(nid) {
2746 pg_data_t *pgdat = NODE_DATA(nid);
2747
2748 build_zonelists(pgdat);
2749 build_zonelist_cache(pgdat);
2750 }
2751 return 0;
2752}
2753
2754void build_all_zonelists(void)
2755{
2756 set_zonelist_order();
2757
2758 if (system_state == SYSTEM_BOOTING) {
2759 __build_all_zonelists(NULL);
2760 mminit_verify_zonelist();
2761 cpuset_init_current_mems_allowed();
2762 } else {
2763
2764
2765 stop_machine(__build_all_zonelists, NULL, NULL);
2766
2767 }
2768 vm_total_pages = nr_free_pagecache_pages();
2769
2770
2771
2772
2773
2774
2775
2776 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2777 page_group_by_mobility_disabled = 1;
2778 else
2779 page_group_by_mobility_disabled = 0;
2780
2781 printk("Built %i zonelists in %s order, mobility grouping %s. "
2782 "Total pages: %ld\n",
2783 nr_online_nodes,
2784 zonelist_order_name[current_zonelist_order],
2785 page_group_by_mobility_disabled ? "off" : "on",
2786 vm_total_pages);
2787#ifdef CONFIG_NUMA
2788 printk("Policy zone: %s\n", zone_names[policy_zone]);
2789#endif
2790}
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803#define PAGES_PER_WAITQUEUE 256
2804
2805#ifndef CONFIG_MEMORY_HOTPLUG
2806static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2807{
2808 unsigned long size = 1;
2809
2810 pages /= PAGES_PER_WAITQUEUE;
2811
2812 while (size < pages)
2813 size <<= 1;
2814
2815
2816
2817
2818
2819
2820 size = min(size, 4096UL);
2821
2822 return max(size, 4UL);
2823}
2824#else
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2843{
2844 return 4096UL;
2845}
2846#endif
2847
2848
2849
2850
2851
2852
2853static inline unsigned long wait_table_bits(unsigned long size)
2854{
2855 return ffz(~size);
2856}
2857
2858#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2859
2860
2861
2862
2863
2864
2865
2866
2867static void setup_zone_migrate_reserve(struct zone *zone)
2868{
2869 unsigned long start_pfn, pfn, end_pfn;
2870 struct page *page;
2871 unsigned long block_migratetype;
2872 int reserve;
2873
2874
2875 start_pfn = zone->zone_start_pfn;
2876 end_pfn = start_pfn + zone->spanned_pages;
2877 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2878 pageblock_order;
2879
2880
2881
2882
2883
2884
2885
2886
2887 reserve = min(2, reserve);
2888
2889 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2890 if (!pfn_valid(pfn))
2891 continue;
2892 page = pfn_to_page(pfn);
2893
2894
2895 if (page_to_nid(page) != zone_to_nid(zone))
2896 continue;
2897
2898
2899 if (PageReserved(page))
2900 continue;
2901
2902 block_migratetype = get_pageblock_migratetype(page);
2903
2904
2905 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2906 reserve--;
2907 continue;
2908 }
2909
2910
2911 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2912 set_pageblock_migratetype(page, MIGRATE_RESERVE);
2913 move_freepages_block(zone, page, MIGRATE_RESERVE);
2914 reserve--;
2915 continue;
2916 }
2917
2918
2919
2920
2921
2922 if (block_migratetype == MIGRATE_RESERVE) {
2923 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2924 move_freepages_block(zone, page, MIGRATE_MOVABLE);
2925 }
2926 }
2927}
2928
2929
2930
2931
2932
2933
2934void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2935 unsigned long start_pfn, enum memmap_context context)
2936{
2937 struct page *page;
2938 unsigned long end_pfn = start_pfn + size;
2939 unsigned long pfn;
2940 struct zone *z;
2941
2942 if (highest_memmap_pfn < end_pfn - 1)
2943 highest_memmap_pfn = end_pfn - 1;
2944
2945 z = &NODE_DATA(nid)->node_zones[zone];
2946 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2947
2948
2949
2950
2951
2952 if (context == MEMMAP_EARLY) {
2953 if (!early_pfn_valid(pfn))
2954 continue;
2955 if (!early_pfn_in_nid(pfn, nid))
2956 continue;
2957 }
2958 page = pfn_to_page(pfn);
2959 set_page_links(page, zone, nid, pfn);
2960 mminit_verify_page_links(page, zone, nid, pfn);
2961 init_page_count(page);
2962 reset_page_mapcount(page);
2963 SetPageReserved(page);
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978 if ((z->zone_start_pfn <= pfn)
2979 && (pfn < z->zone_start_pfn + z->spanned_pages)
2980 && !(pfn & (pageblock_nr_pages - 1)))
2981 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2982
2983 INIT_LIST_HEAD(&page->lru);
2984#ifdef WANT_PAGE_VIRTUAL
2985
2986 if (!is_highmem_idx(zone))
2987 set_page_address(page, __va(pfn << PAGE_SHIFT));
2988#endif
2989 }
2990}
2991
2992static void __meminit zone_init_free_lists(struct zone *zone)
2993{
2994 int order, t;
2995 for_each_migratetype_order(order, t) {
2996 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2997 zone->free_area[order].nr_free = 0;
2998 }
2999}
3000
3001#ifndef __HAVE_ARCH_MEMMAP_INIT
3002#define memmap_init(size, nid, zone, start_pfn) \
3003 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3004#endif
3005
3006static int zone_batchsize(struct zone *zone)
3007{
3008#ifdef CONFIG_MMU
3009 int batch;
3010
3011
3012
3013
3014
3015
3016
3017 batch = zone->present_pages / 1024;
3018 if (batch * PAGE_SIZE > 512 * 1024)
3019 batch = (512 * 1024) / PAGE_SIZE;
3020 batch /= 4;
3021 if (batch < 1)
3022 batch = 1;
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034 batch = rounddown_pow_of_two(batch + batch/2) - 1;
3035
3036 return batch;
3037
3038#else
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052 return 0;
3053#endif
3054}
3055
3056static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3057{
3058 struct per_cpu_pages *pcp;
3059 int migratetype;
3060
3061 memset(p, 0, sizeof(*p));
3062
3063 pcp = &p->pcp;
3064 pcp->count = 0;
3065 pcp->high = 6 * batch;
3066 pcp->batch = max(1UL, 1 * batch);
3067 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3068 INIT_LIST_HEAD(&pcp->lists[migratetype]);
3069}
3070
3071
3072
3073
3074
3075
3076static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3077 unsigned long high)
3078{
3079 struct per_cpu_pages *pcp;
3080
3081 pcp = &p->pcp;
3082 pcp->high = high;
3083 pcp->batch = max(1UL, high/4);
3084 if ((high/4) > (PAGE_SHIFT * 8))
3085 pcp->batch = PAGE_SHIFT * 8;
3086}
3087
3088
3089#ifdef CONFIG_NUMA
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107static struct per_cpu_pageset boot_pageset[NR_CPUS];
3108
3109
3110
3111
3112
3113static int __cpuinit process_zones(int cpu)
3114{
3115 struct zone *zone, *dzone;
3116 int node = cpu_to_node(cpu);
3117
3118 node_set_state(node, N_CPU);
3119
3120 for_each_populated_zone(zone) {
3121 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
3122 GFP_KERNEL, node);
3123 if (!zone_pcp(zone, cpu))
3124 goto bad;
3125
3126 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
3127
3128 if (percpu_pagelist_fraction)
3129 setup_pagelist_highmark(zone_pcp(zone, cpu),
3130 (zone->present_pages / percpu_pagelist_fraction));
3131 }
3132
3133 return 0;
3134bad:
3135 for_each_zone(dzone) {
3136 if (!populated_zone(dzone))
3137 continue;
3138 if (dzone == zone)
3139 break;
3140 kfree(zone_pcp(dzone, cpu));
3141 zone_pcp(dzone, cpu) = &boot_pageset[cpu];
3142 }
3143 return -ENOMEM;
3144}
3145
3146static inline void free_zone_pagesets(int cpu)
3147{
3148 struct zone *zone;
3149
3150 for_each_zone(zone) {
3151 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
3152
3153
3154 if (pset != &boot_pageset[cpu])
3155 kfree(pset);
3156 zone_pcp(zone, cpu) = &boot_pageset[cpu];
3157 }
3158}
3159
3160static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
3161 unsigned long action,
3162 void *hcpu)
3163{
3164 int cpu = (long)hcpu;
3165 int ret = NOTIFY_OK;
3166
3167 switch (action) {
3168 case CPU_UP_PREPARE:
3169 case CPU_UP_PREPARE_FROZEN:
3170 if (process_zones(cpu))
3171 ret = NOTIFY_BAD;
3172 break;
3173 case CPU_UP_CANCELED:
3174 case CPU_UP_CANCELED_FROZEN:
3175 case CPU_DEAD:
3176 case CPU_DEAD_FROZEN:
3177 free_zone_pagesets(cpu);
3178 break;
3179 default:
3180 break;
3181 }
3182 return ret;
3183}
3184
3185static struct notifier_block __cpuinitdata pageset_notifier =
3186 { &pageset_cpuup_callback, NULL, 0 };
3187
3188void __init setup_per_cpu_pageset(void)
3189{
3190 int err;
3191
3192
3193
3194
3195
3196 err = process_zones(smp_processor_id());
3197 BUG_ON(err);
3198 register_cpu_notifier(&pageset_notifier);
3199}
3200
3201#endif
3202
3203static noinline __init_refok
3204int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3205{
3206 int i;
3207 struct pglist_data *pgdat = zone->zone_pgdat;
3208 size_t alloc_size;
3209
3210
3211
3212
3213
3214 zone->wait_table_hash_nr_entries =
3215 wait_table_hash_nr_entries(zone_size_pages);
3216 zone->wait_table_bits =
3217 wait_table_bits(zone->wait_table_hash_nr_entries);
3218 alloc_size = zone->wait_table_hash_nr_entries
3219 * sizeof(wait_queue_head_t);
3220
3221 if (!slab_is_available()) {
3222 zone->wait_table = (wait_queue_head_t *)
3223 alloc_bootmem_node(pgdat, alloc_size);
3224 } else {
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235 zone->wait_table = vmalloc(alloc_size);
3236 }
3237 if (!zone->wait_table)
3238 return -ENOMEM;
3239
3240 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3241 init_waitqueue_head(zone->wait_table + i);
3242
3243 return 0;
3244}
3245
3246static int __zone_pcp_update(void *data)
3247{
3248 struct zone *zone = data;
3249 int cpu;
3250 unsigned long batch = zone_batchsize(zone), flags;
3251
3252 for (cpu = 0; cpu < NR_CPUS; cpu++) {
3253 struct per_cpu_pageset *pset;
3254 struct per_cpu_pages *pcp;
3255
3256 pset = zone_pcp(zone, cpu);
3257 pcp = &pset->pcp;
3258
3259 local_irq_save(flags);
3260 free_pcppages_bulk(zone, pcp->count, pcp);
3261 setup_pageset(pset, batch);
3262 local_irq_restore(flags);
3263 }
3264 return 0;
3265}
3266
3267void zone_pcp_update(struct zone *zone)
3268{
3269 stop_machine(__zone_pcp_update, zone, NULL);
3270}
3271
3272static __meminit void zone_pcp_init(struct zone *zone)
3273{
3274 int cpu;
3275 unsigned long batch = zone_batchsize(zone);
3276
3277 for (cpu = 0; cpu < NR_CPUS; cpu++) {
3278#ifdef CONFIG_NUMA
3279
3280 zone_pcp(zone, cpu) = &boot_pageset[cpu];
3281 setup_pageset(&boot_pageset[cpu],0);
3282#else
3283 setup_pageset(zone_pcp(zone,cpu), batch);
3284#endif
3285 }
3286 if (zone->present_pages)
3287 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
3288 zone->name, zone->present_pages, batch);
3289}
3290
3291__meminit int init_currently_empty_zone(struct zone *zone,
3292 unsigned long zone_start_pfn,
3293 unsigned long size,
3294 enum memmap_context context)
3295{
3296 struct pglist_data *pgdat = zone->zone_pgdat;
3297 int ret;
3298 ret = zone_wait_table_init(zone, size);
3299 if (ret)
3300 return ret;
3301 pgdat->nr_zones = zone_idx(zone) + 1;
3302
3303 zone->zone_start_pfn = zone_start_pfn;
3304
3305 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3306 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3307 pgdat->node_id,
3308 (unsigned long)zone_idx(zone),
3309 zone_start_pfn, (zone_start_pfn + size));
3310
3311 zone_init_free_lists(zone);
3312
3313 return 0;
3314}
3315
3316#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3317
3318
3319
3320
3321static int __meminit first_active_region_index_in_nid(int nid)
3322{
3323 int i;
3324
3325 for (i = 0; i < nr_nodemap_entries; i++)
3326 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3327 return i;
3328
3329 return -1;
3330}
3331
3332
3333
3334
3335
3336static int __meminit next_active_region_index_in_nid(int index, int nid)
3337{
3338 for (index = index + 1; index < nr_nodemap_entries; index++)
3339 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3340 return index;
3341
3342 return -1;
3343}
3344
3345#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3346
3347
3348
3349
3350
3351
3352int __meminit __early_pfn_to_nid(unsigned long pfn)
3353{
3354 int i;
3355
3356 for (i = 0; i < nr_nodemap_entries; i++) {
3357 unsigned long start_pfn = early_node_map[i].start_pfn;
3358 unsigned long end_pfn = early_node_map[i].end_pfn;
3359
3360 if (start_pfn <= pfn && pfn < end_pfn)
3361 return early_node_map[i].nid;
3362 }
3363
3364 return -1;
3365}
3366#endif
3367
3368int __meminit early_pfn_to_nid(unsigned long pfn)
3369{
3370 int nid;
3371
3372 nid = __early_pfn_to_nid(pfn);
3373 if (nid >= 0)
3374 return nid;
3375
3376 return 0;
3377}
3378
3379#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3380bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3381{
3382 int nid;
3383
3384 nid = __early_pfn_to_nid(pfn);
3385 if (nid >= 0 && nid != node)
3386 return false;
3387 return true;
3388}
3389#endif
3390
3391
3392#define for_each_active_range_index_in_nid(i, nid) \
3393 for (i = first_active_region_index_in_nid(nid); i != -1; \
3394 i = next_active_region_index_in_nid(i, nid))
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405void __init free_bootmem_with_active_regions(int nid,
3406 unsigned long max_low_pfn)
3407{
3408 int i;
3409
3410 for_each_active_range_index_in_nid(i, nid) {
3411 unsigned long size_pages = 0;
3412 unsigned long end_pfn = early_node_map[i].end_pfn;
3413
3414 if (early_node_map[i].start_pfn >= max_low_pfn)
3415 continue;
3416
3417 if (end_pfn > max_low_pfn)
3418 end_pfn = max_low_pfn;
3419
3420 size_pages = end_pfn - early_node_map[i].start_pfn;
3421 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3422 PFN_PHYS(early_node_map[i].start_pfn),
3423 size_pages << PAGE_SHIFT);
3424 }
3425}
3426
3427void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3428{
3429 int i;
3430 int ret;
3431
3432 for_each_active_range_index_in_nid(i, nid) {
3433 ret = work_fn(early_node_map[i].start_pfn,
3434 early_node_map[i].end_pfn, data);
3435 if (ret)
3436 break;
3437 }
3438}
3439
3440
3441
3442
3443
3444
3445
3446
3447void __init sparse_memory_present_with_active_regions(int nid)
3448{
3449 int i;
3450
3451 for_each_active_range_index_in_nid(i, nid)
3452 memory_present(early_node_map[i].nid,
3453 early_node_map[i].start_pfn,
3454 early_node_map[i].end_pfn);
3455}
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468void __meminit get_pfn_range_for_nid(unsigned int nid,
3469 unsigned long *start_pfn, unsigned long *end_pfn)
3470{
3471 int i;
3472 *start_pfn = -1UL;
3473 *end_pfn = 0;
3474
3475 for_each_active_range_index_in_nid(i, nid) {
3476 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3477 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3478 }
3479
3480 if (*start_pfn == -1UL)
3481 *start_pfn = 0;
3482}
3483
3484
3485
3486
3487
3488
3489static void __init find_usable_zone_for_movable(void)
3490{
3491 int zone_index;
3492 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3493 if (zone_index == ZONE_MOVABLE)
3494 continue;
3495
3496 if (arch_zone_highest_possible_pfn[zone_index] >
3497 arch_zone_lowest_possible_pfn[zone_index])
3498 break;
3499 }
3500
3501 VM_BUG_ON(zone_index == -1);
3502 movable_zone = zone_index;
3503}
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515static void __meminit adjust_zone_range_for_zone_movable(int nid,
3516 unsigned long zone_type,
3517 unsigned long node_start_pfn,
3518 unsigned long node_end_pfn,
3519 unsigned long *zone_start_pfn,
3520 unsigned long *zone_end_pfn)
3521{
3522
3523 if (zone_movable_pfn[nid]) {
3524
3525 if (zone_type == ZONE_MOVABLE) {
3526 *zone_start_pfn = zone_movable_pfn[nid];
3527 *zone_end_pfn = min(node_end_pfn,
3528 arch_zone_highest_possible_pfn[movable_zone]);
3529
3530
3531 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3532 *zone_end_pfn > zone_movable_pfn[nid]) {
3533 *zone_end_pfn = zone_movable_pfn[nid];
3534
3535
3536 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3537 *zone_start_pfn = *zone_end_pfn;
3538 }
3539}
3540
3541
3542
3543
3544
3545static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3546 unsigned long zone_type,
3547 unsigned long *ignored)
3548{
3549 unsigned long node_start_pfn, node_end_pfn;
3550 unsigned long zone_start_pfn, zone_end_pfn;
3551
3552
3553 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3554 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3555 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3556 adjust_zone_range_for_zone_movable(nid, zone_type,
3557 node_start_pfn, node_end_pfn,
3558 &zone_start_pfn, &zone_end_pfn);
3559
3560
3561 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3562 return 0;
3563
3564
3565 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3566 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3567
3568
3569 return zone_end_pfn - zone_start_pfn;
3570}
3571
3572
3573
3574
3575
3576static unsigned long __meminit __absent_pages_in_range(int nid,
3577 unsigned long range_start_pfn,
3578 unsigned long range_end_pfn)
3579{
3580 int i = 0;
3581 unsigned long prev_end_pfn = 0, hole_pages = 0;
3582 unsigned long start_pfn;
3583
3584
3585 i = first_active_region_index_in_nid(nid);
3586 if (i == -1)
3587 return 0;
3588
3589 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3590
3591
3592 if (early_node_map[i].start_pfn > range_start_pfn)
3593 hole_pages = prev_end_pfn - range_start_pfn;
3594
3595
3596 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3597
3598
3599 if (prev_end_pfn >= range_end_pfn)
3600 break;
3601
3602
3603 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3604 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3605
3606
3607 if (start_pfn > range_start_pfn) {
3608 BUG_ON(prev_end_pfn > start_pfn);
3609 hole_pages += start_pfn - prev_end_pfn;
3610 }
3611 prev_end_pfn = early_node_map[i].end_pfn;
3612 }
3613
3614
3615 if (range_end_pfn > prev_end_pfn)
3616 hole_pages += range_end_pfn -
3617 max(range_start_pfn, prev_end_pfn);
3618
3619 return hole_pages;
3620}
3621
3622
3623
3624
3625
3626
3627
3628
3629unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3630 unsigned long end_pfn)
3631{
3632 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3633}
3634
3635
3636static unsigned long __meminit zone_absent_pages_in_node(int nid,
3637 unsigned long zone_type,
3638 unsigned long *ignored)
3639{
3640 unsigned long node_start_pfn, node_end_pfn;
3641 unsigned long zone_start_pfn, zone_end_pfn;
3642
3643 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3644 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3645 node_start_pfn);
3646 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3647 node_end_pfn);
3648
3649 adjust_zone_range_for_zone_movable(nid, zone_type,
3650 node_start_pfn, node_end_pfn,
3651 &zone_start_pfn, &zone_end_pfn);
3652 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3653}
3654
3655#else
3656static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3657 unsigned long zone_type,
3658 unsigned long *zones_size)
3659{
3660 return zones_size[zone_type];
3661}
3662
3663static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3664 unsigned long zone_type,
3665 unsigned long *zholes_size)
3666{
3667 if (!zholes_size)
3668 return 0;
3669
3670 return zholes_size[zone_type];
3671}
3672
3673#endif
3674
3675static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3676 unsigned long *zones_size, unsigned long *zholes_size)
3677{
3678 unsigned long realtotalpages, totalpages = 0;
3679 enum zone_type i;
3680
3681 for (i = 0; i < MAX_NR_ZONES; i++)
3682 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3683 zones_size);
3684 pgdat->node_spanned_pages = totalpages;
3685
3686 realtotalpages = totalpages;
3687 for (i = 0; i < MAX_NR_ZONES; i++)
3688 realtotalpages -=
3689 zone_absent_pages_in_node(pgdat->node_id, i,
3690 zholes_size);
3691 pgdat->node_present_pages = realtotalpages;
3692 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3693 realtotalpages);
3694}
3695
3696#ifndef CONFIG_SPARSEMEM
3697
3698
3699
3700
3701
3702
3703
3704static unsigned long __init usemap_size(unsigned long zonesize)
3705{
3706 unsigned long usemapsize;
3707
3708 usemapsize = roundup(zonesize, pageblock_nr_pages);
3709 usemapsize = usemapsize >> pageblock_order;
3710 usemapsize *= NR_PAGEBLOCK_BITS;
3711 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3712
3713 return usemapsize / 8;
3714}
3715
3716static void __init setup_usemap(struct pglist_data *pgdat,
3717 struct zone *zone, unsigned long zonesize)
3718{
3719 unsigned long usemapsize = usemap_size(zonesize);
3720 zone->pageblock_flags = NULL;
3721 if (usemapsize)
3722 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3723}
3724#else
3725static void inline setup_usemap(struct pglist_data *pgdat,
3726 struct zone *zone, unsigned long zonesize) {}
3727#endif
3728
3729#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3730
3731
3732static inline int pageblock_default_order(void)
3733{
3734 if (HPAGE_SHIFT > PAGE_SHIFT)
3735 return HUGETLB_PAGE_ORDER;
3736
3737 return MAX_ORDER-1;
3738}
3739
3740
3741static inline void __init set_pageblock_order(unsigned int order)
3742{
3743
3744 if (pageblock_order)
3745 return;
3746
3747
3748
3749
3750
3751 pageblock_order = order;
3752}
3753#else
3754
3755
3756
3757
3758
3759
3760
3761static inline int pageblock_default_order(unsigned int order)
3762{
3763 return MAX_ORDER-1;
3764}
3765#define set_pageblock_order(x) do {} while (0)
3766
3767#endif
3768
3769
3770
3771
3772
3773
3774
3775static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3776 unsigned long *zones_size, unsigned long *zholes_size)
3777{
3778 enum zone_type j;
3779 int nid = pgdat->node_id;
3780 unsigned long zone_start_pfn = pgdat->node_start_pfn;
3781 int ret;
3782
3783 pgdat_resize_init(pgdat);
3784 pgdat->nr_zones = 0;
3785 init_waitqueue_head(&pgdat->kswapd_wait);
3786 pgdat->kswapd_max_order = 0;
3787 pgdat_page_cgroup_init(pgdat);
3788
3789 for (j = 0; j < MAX_NR_ZONES; j++) {
3790 struct zone *zone = pgdat->node_zones + j;
3791 unsigned long size, realsize, memmap_pages;
3792 enum lru_list l;
3793
3794 size = zone_spanned_pages_in_node(nid, j, zones_size);
3795 realsize = size - zone_absent_pages_in_node(nid, j,
3796 zholes_size);
3797
3798
3799
3800
3801
3802
3803 memmap_pages =
3804 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3805 if (realsize >= memmap_pages) {
3806 realsize -= memmap_pages;
3807 if (memmap_pages)
3808 printk(KERN_DEBUG
3809 " %s zone: %lu pages used for memmap\n",
3810 zone_names[j], memmap_pages);
3811 } else
3812 printk(KERN_WARNING
3813 " %s zone: %lu pages exceeds realsize %lu\n",
3814 zone_names[j], memmap_pages, realsize);
3815
3816
3817 if (j == 0 && realsize > dma_reserve) {
3818 realsize -= dma_reserve;
3819 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
3820 zone_names[0], dma_reserve);
3821 }
3822
3823 if (!is_highmem_idx(j))
3824 nr_kernel_pages += realsize;
3825 nr_all_pages += realsize;
3826
3827 zone->spanned_pages = size;
3828 zone->present_pages = realsize;
3829#ifdef CONFIG_NUMA
3830 zone->node = nid;
3831 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3832 / 100;
3833 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3834#endif
3835 zone->name = zone_names[j];
3836 spin_lock_init(&zone->lock);
3837 spin_lock_init(&zone->lru_lock);
3838 zone_seqlock_init(zone);
3839 zone->zone_pgdat = pgdat;
3840
3841 zone->prev_priority = DEF_PRIORITY;
3842
3843 zone_pcp_init(zone);
3844 for_each_lru(l) {
3845 INIT_LIST_HEAD(&zone->lru[l].list);
3846 zone->reclaim_stat.nr_saved_scan[l] = 0;
3847 }
3848 zone->reclaim_stat.recent_rotated[0] = 0;
3849 zone->reclaim_stat.recent_rotated[1] = 0;
3850 zone->reclaim_stat.recent_scanned[0] = 0;
3851 zone->reclaim_stat.recent_scanned[1] = 0;
3852 zap_zone_vm_stats(zone);
3853 zone->flags = 0;
3854 if (!size)
3855 continue;
3856
3857 set_pageblock_order(pageblock_default_order());
3858 setup_usemap(pgdat, zone, size);
3859 ret = init_currently_empty_zone(zone, zone_start_pfn,
3860 size, MEMMAP_EARLY);
3861 BUG_ON(ret);
3862 memmap_init(size, nid, j, zone_start_pfn);
3863 zone_start_pfn += size;
3864 }
3865}
3866
3867static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3868{
3869
3870 if (!pgdat->node_spanned_pages)
3871 return;
3872
3873#ifdef CONFIG_FLAT_NODE_MEM_MAP
3874
3875 if (!pgdat->node_mem_map) {
3876 unsigned long size, start, end;
3877 struct page *map;
3878
3879
3880
3881
3882
3883
3884 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3885 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3886 end = ALIGN(end, MAX_ORDER_NR_PAGES);
3887 size = (end - start) * sizeof(struct page);
3888 map = alloc_remap(pgdat->node_id, size);
3889 if (!map)
3890 map = alloc_bootmem_node(pgdat, size);
3891 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3892 }
3893#ifndef CONFIG_NEED_MULTIPLE_NODES
3894
3895
3896
3897 if (pgdat == NODE_DATA(0)) {
3898 mem_map = NODE_DATA(0)->node_mem_map;
3899#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3900 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3901 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3902#endif
3903 }
3904#endif
3905#endif
3906}
3907
3908void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3909 unsigned long node_start_pfn, unsigned long *zholes_size)
3910{
3911 pg_data_t *pgdat = NODE_DATA(nid);
3912
3913 pgdat->node_id = nid;
3914 pgdat->node_start_pfn = node_start_pfn;
3915 calculate_node_totalpages(pgdat, zones_size, zholes_size);
3916
3917 alloc_node_mem_map(pgdat);
3918#ifdef CONFIG_FLAT_NODE_MEM_MAP
3919 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3920 nid, (unsigned long)pgdat,
3921 (unsigned long)pgdat->node_mem_map);
3922#endif
3923
3924 free_area_init_core(pgdat, zones_size, zholes_size);
3925}
3926
3927#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3928
3929#if MAX_NUMNODES > 1
3930
3931
3932
3933static void __init setup_nr_node_ids(void)
3934{
3935 unsigned int node;
3936 unsigned int highest = 0;
3937
3938 for_each_node_mask(node, node_possible_map)
3939 highest = node;
3940 nr_node_ids = highest + 1;
3941}
3942#else
3943static inline void setup_nr_node_ids(void)
3944{
3945}
3946#endif
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3961 unsigned long end_pfn)
3962{
3963 int i;
3964
3965 mminit_dprintk(MMINIT_TRACE, "memory_register",
3966 "Entering add_active_range(%d, %#lx, %#lx) "
3967 "%d entries of %d used\n",
3968 nid, start_pfn, end_pfn,
3969 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3970
3971 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3972
3973
3974 for (i = 0; i < nr_nodemap_entries; i++) {
3975 if (early_node_map[i].nid != nid)
3976 continue;
3977
3978
3979 if (start_pfn >= early_node_map[i].start_pfn &&
3980 end_pfn <= early_node_map[i].end_pfn)
3981 return;
3982
3983
3984 if (start_pfn <= early_node_map[i].end_pfn &&
3985 end_pfn > early_node_map[i].end_pfn) {
3986 early_node_map[i].end_pfn = end_pfn;
3987 return;
3988 }
3989
3990
3991 if (start_pfn < early_node_map[i].end_pfn &&
3992 end_pfn >= early_node_map[i].start_pfn) {
3993 early_node_map[i].start_pfn = start_pfn;
3994 return;
3995 }
3996 }
3997
3998
3999 if (i >= MAX_ACTIVE_REGIONS) {
4000 printk(KERN_CRIT "More than %d memory regions, truncating\n",
4001 MAX_ACTIVE_REGIONS);
4002 return;
4003 }
4004
4005 early_node_map[i].nid = nid;
4006 early_node_map[i].start_pfn = start_pfn;
4007 early_node_map[i].end_pfn = end_pfn;
4008 nr_nodemap_entries = i + 1;
4009}
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4023 unsigned long end_pfn)
4024{
4025 int i, j;
4026 int removed = 0;
4027
4028 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4029 nid, start_pfn, end_pfn);
4030
4031
4032 for_each_active_range_index_in_nid(i, nid) {
4033 if (early_node_map[i].start_pfn >= start_pfn &&
4034 early_node_map[i].end_pfn <= end_pfn) {
4035
4036 early_node_map[i].start_pfn = 0;
4037 early_node_map[i].end_pfn = 0;
4038 removed = 1;
4039 continue;
4040 }
4041 if (early_node_map[i].start_pfn < start_pfn &&
4042 early_node_map[i].end_pfn > start_pfn) {
4043 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4044 early_node_map[i].end_pfn = start_pfn;
4045 if (temp_end_pfn > end_pfn)
4046 add_active_range(nid, end_pfn, temp_end_pfn);
4047 continue;
4048 }
4049 if (early_node_map[i].start_pfn >= start_pfn &&
4050 early_node_map[i].end_pfn > end_pfn &&
4051 early_node_map[i].start_pfn < end_pfn) {
4052 early_node_map[i].start_pfn = end_pfn;
4053 continue;
4054 }
4055 }
4056
4057 if (!removed)
4058 return;
4059
4060
4061 for (i = nr_nodemap_entries - 1; i > 0; i--) {
4062 if (early_node_map[i].nid != nid)
4063 continue;
4064 if (early_node_map[i].end_pfn)
4065 continue;
4066
4067 for (j = i; j < nr_nodemap_entries - 1; j++)
4068 memcpy(&early_node_map[j], &early_node_map[j+1],
4069 sizeof(early_node_map[j]));
4070 j = nr_nodemap_entries - 1;
4071 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4072 nr_nodemap_entries--;
4073 }
4074}
4075
4076
4077
4078
4079
4080
4081
4082
4083void __init remove_all_active_ranges(void)
4084{
4085 memset(early_node_map, 0, sizeof(early_node_map));
4086 nr_nodemap_entries = 0;
4087}
4088
4089
4090static int __init cmp_node_active_region(const void *a, const void *b)
4091{
4092 struct node_active_region *arange = (struct node_active_region *)a;
4093 struct node_active_region *brange = (struct node_active_region *)b;
4094
4095
4096 if (arange->start_pfn > brange->start_pfn)
4097 return 1;
4098 if (arange->start_pfn < brange->start_pfn)
4099 return -1;
4100
4101 return 0;
4102}
4103
4104
4105static void __init sort_node_map(void)
4106{
4107 sort(early_node_map, (size_t)nr_nodemap_entries,
4108 sizeof(struct node_active_region),
4109 cmp_node_active_region, NULL);
4110}
4111
4112
4113static unsigned long __init find_min_pfn_for_node(int nid)
4114{
4115 int i;
4116 unsigned long min_pfn = ULONG_MAX;
4117
4118
4119 for_each_active_range_index_in_nid(i, nid)
4120 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4121
4122 if (min_pfn == ULONG_MAX) {
4123 printk(KERN_WARNING
4124 "Could not find start_pfn for node %d\n", nid);
4125 return 0;
4126 }
4127
4128 return min_pfn;
4129}
4130
4131
4132
4133
4134
4135
4136
4137unsigned long __init find_min_pfn_with_active_regions(void)
4138{
4139 return find_min_pfn_for_node(MAX_NUMNODES);
4140}
4141
4142
4143
4144
4145
4146
4147static unsigned long __init early_calculate_totalpages(void)
4148{
4149 int i;
4150 unsigned long totalpages = 0;
4151
4152 for (i = 0; i < nr_nodemap_entries; i++) {
4153 unsigned long pages = early_node_map[i].end_pfn -
4154 early_node_map[i].start_pfn;
4155 totalpages += pages;
4156 if (pages)
4157 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4158 }
4159 return totalpages;
4160}
4161
4162
4163
4164
4165
4166
4167
4168static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4169{
4170 int i, nid;
4171 unsigned long usable_startpfn;
4172 unsigned long kernelcore_node, kernelcore_remaining;
4173
4174 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4175 unsigned long totalpages = early_calculate_totalpages();
4176 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186 if (required_movablecore) {
4187 unsigned long corepages;
4188
4189
4190
4191
4192
4193 required_movablecore =
4194 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4195 corepages = totalpages - required_movablecore;
4196
4197 required_kernelcore = max(required_kernelcore, corepages);
4198 }
4199
4200
4201 if (!required_kernelcore)
4202 goto out;
4203
4204
4205 find_usable_zone_for_movable();
4206 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4207
4208restart:
4209
4210 kernelcore_node = required_kernelcore / usable_nodes;
4211 for_each_node_state(nid, N_HIGH_MEMORY) {
4212
4213
4214
4215
4216
4217 if (required_kernelcore < kernelcore_node)
4218 kernelcore_node = required_kernelcore / usable_nodes;
4219
4220
4221
4222
4223
4224
4225 kernelcore_remaining = kernelcore_node;
4226
4227
4228 for_each_active_range_index_in_nid(i, nid) {
4229 unsigned long start_pfn, end_pfn;
4230 unsigned long size_pages;
4231
4232 start_pfn = max(early_node_map[i].start_pfn,
4233 zone_movable_pfn[nid]);
4234 end_pfn = early_node_map[i].end_pfn;
4235 if (start_pfn >= end_pfn)
4236 continue;
4237
4238
4239 if (start_pfn < usable_startpfn) {
4240 unsigned long kernel_pages;
4241 kernel_pages = min(end_pfn, usable_startpfn)
4242 - start_pfn;
4243
4244 kernelcore_remaining -= min(kernel_pages,
4245 kernelcore_remaining);
4246 required_kernelcore -= min(kernel_pages,
4247 required_kernelcore);
4248
4249
4250 if (end_pfn <= usable_startpfn) {
4251
4252
4253
4254
4255
4256
4257
4258 zone_movable_pfn[nid] = end_pfn;
4259 continue;
4260 }
4261 start_pfn = usable_startpfn;
4262 }
4263
4264
4265
4266
4267
4268
4269 size_pages = end_pfn - start_pfn;
4270 if (size_pages > kernelcore_remaining)
4271 size_pages = kernelcore_remaining;
4272 zone_movable_pfn[nid] = start_pfn + size_pages;
4273
4274
4275
4276
4277
4278
4279 required_kernelcore -= min(required_kernelcore,
4280 size_pages);
4281 kernelcore_remaining -= size_pages;
4282 if (!kernelcore_remaining)
4283 break;
4284 }
4285 }
4286
4287
4288
4289
4290
4291
4292
4293 usable_nodes--;
4294 if (usable_nodes && required_kernelcore > usable_nodes)
4295 goto restart;
4296
4297
4298 for (nid = 0; nid < MAX_NUMNODES; nid++)
4299 zone_movable_pfn[nid] =
4300 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4301
4302out:
4303
4304 node_states[N_HIGH_MEMORY] = saved_node_state;
4305}
4306
4307
4308static void check_for_regular_memory(pg_data_t *pgdat)
4309{
4310#ifdef CONFIG_HIGHMEM
4311 enum zone_type zone_type;
4312
4313 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4314 struct zone *zone = &pgdat->node_zones[zone_type];
4315 if (zone->present_pages)
4316 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4317 }
4318#endif
4319}
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4335{
4336 unsigned long nid;
4337 int i;
4338
4339
4340 sort_node_map();
4341
4342
4343 memset(arch_zone_lowest_possible_pfn, 0,
4344 sizeof(arch_zone_lowest_possible_pfn));
4345 memset(arch_zone_highest_possible_pfn, 0,
4346 sizeof(arch_zone_highest_possible_pfn));
4347 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4348 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4349 for (i = 1; i < MAX_NR_ZONES; i++) {
4350 if (i == ZONE_MOVABLE)
4351 continue;
4352 arch_zone_lowest_possible_pfn[i] =
4353 arch_zone_highest_possible_pfn[i-1];
4354 arch_zone_highest_possible_pfn[i] =
4355 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4356 }
4357 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4358 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4359
4360
4361 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4362 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4363
4364
4365 printk("Zone PFN ranges:\n");
4366 for (i = 0; i < MAX_NR_ZONES; i++) {
4367 if (i == ZONE_MOVABLE)
4368 continue;
4369 printk(" %-8s %0#10lx -> %0#10lx\n",
4370 zone_names[i],
4371 arch_zone_lowest_possible_pfn[i],
4372 arch_zone_highest_possible_pfn[i]);
4373 }
4374
4375
4376 printk("Movable zone start PFN for each node\n");
4377 for (i = 0; i < MAX_NUMNODES; i++) {
4378 if (zone_movable_pfn[i])
4379 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4380 }
4381
4382
4383 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4384 for (i = 0; i < nr_nodemap_entries; i++)
4385 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4386 early_node_map[i].start_pfn,
4387 early_node_map[i].end_pfn);
4388
4389
4390 mminit_verify_pageflags_layout();
4391 setup_nr_node_ids();
4392 for_each_online_node(nid) {
4393 pg_data_t *pgdat = NODE_DATA(nid);
4394 free_area_init_node(nid, NULL,
4395 find_min_pfn_for_node(nid), NULL);
4396
4397
4398 if (pgdat->node_present_pages)
4399 node_set_state(nid, N_HIGH_MEMORY);
4400 check_for_regular_memory(pgdat);
4401 }
4402}
4403
4404static int __init cmdline_parse_core(char *p, unsigned long *core)
4405{
4406 unsigned long long coremem;
4407 if (!p)
4408 return -EINVAL;
4409
4410 coremem = memparse(p, &p);
4411 *core = coremem >> PAGE_SHIFT;
4412
4413
4414 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4415
4416 return 0;
4417}
4418
4419
4420
4421
4422
4423static int __init cmdline_parse_kernelcore(char *p)
4424{
4425 return cmdline_parse_core(p, &required_kernelcore);
4426}
4427
4428
4429
4430
4431
4432static int __init cmdline_parse_movablecore(char *p)
4433{
4434 return cmdline_parse_core(p, &required_movablecore);
4435}
4436
4437early_param("kernelcore", cmdline_parse_kernelcore);
4438early_param("movablecore", cmdline_parse_movablecore);
4439
4440#endif
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453void __init set_dma_reserve(unsigned long new_dma_reserve)
4454{
4455 dma_reserve = new_dma_reserve;
4456}
4457
4458#ifndef CONFIG_NEED_MULTIPLE_NODES
4459struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4460EXPORT_SYMBOL(contig_page_data);
4461#endif
4462
4463void __init free_area_init(unsigned long *zones_size)
4464{
4465 free_area_init_node(0, zones_size,
4466 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4467}
4468
4469static int page_alloc_cpu_notify(struct notifier_block *self,
4470 unsigned long action, void *hcpu)
4471{
4472 int cpu = (unsigned long)hcpu;
4473
4474 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4475 drain_pages(cpu);
4476
4477
4478
4479
4480
4481
4482
4483 vm_events_fold_cpu(cpu);
4484
4485
4486
4487
4488
4489
4490
4491
4492 refresh_cpu_vm_stats(cpu);
4493 }
4494 return NOTIFY_OK;
4495}
4496
4497void __init page_alloc_init(void)
4498{
4499 hotcpu_notifier(page_alloc_cpu_notify, 0);
4500}
4501
4502
4503
4504
4505
4506static void calculate_totalreserve_pages(void)
4507{
4508 struct pglist_data *pgdat;
4509 unsigned long reserve_pages = 0;
4510 enum zone_type i, j;
4511
4512 for_each_online_pgdat(pgdat) {
4513 for (i = 0; i < MAX_NR_ZONES; i++) {
4514 struct zone *zone = pgdat->node_zones + i;
4515 unsigned long max = 0;
4516
4517
4518 for (j = i; j < MAX_NR_ZONES; j++) {
4519 if (zone->lowmem_reserve[j] > max)
4520 max = zone->lowmem_reserve[j];
4521 }
4522
4523
4524 max += high_wmark_pages(zone);
4525
4526 if (max > zone->present_pages)
4527 max = zone->present_pages;
4528 reserve_pages += max;
4529 }
4530 }
4531 totalreserve_pages = reserve_pages;
4532}
4533
4534
4535
4536
4537
4538
4539
4540static void setup_per_zone_lowmem_reserve(void)
4541{
4542 struct pglist_data *pgdat;
4543 enum zone_type j, idx;
4544
4545 for_each_online_pgdat(pgdat) {
4546 for (j = 0; j < MAX_NR_ZONES; j++) {
4547 struct zone *zone = pgdat->node_zones + j;
4548 unsigned long present_pages = zone->present_pages;
4549
4550 zone->lowmem_reserve[j] = 0;
4551
4552 idx = j;
4553 while (idx) {
4554 struct zone *lower_zone;
4555
4556 idx--;
4557
4558 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4559 sysctl_lowmem_reserve_ratio[idx] = 1;
4560
4561 lower_zone = pgdat->node_zones + idx;
4562 lower_zone->lowmem_reserve[j] = present_pages /
4563 sysctl_lowmem_reserve_ratio[idx];
4564 present_pages += lower_zone->present_pages;
4565 }
4566 }
4567 }
4568
4569
4570 calculate_totalreserve_pages();
4571}
4572
4573
4574
4575
4576
4577
4578
4579
4580void setup_per_zone_wmarks(void)
4581{
4582 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4583 unsigned long lowmem_pages = 0;
4584 struct zone *zone;
4585 unsigned long flags;
4586
4587
4588 for_each_zone(zone) {
4589 if (!is_highmem(zone))
4590 lowmem_pages += zone->present_pages;
4591 }
4592
4593 for_each_zone(zone) {
4594 u64 tmp;
4595
4596 spin_lock_irqsave(&zone->lock, flags);
4597 tmp = (u64)pages_min * zone->present_pages;
4598 do_div(tmp, lowmem_pages);
4599 if (is_highmem(zone)) {
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609 int min_pages;
4610
4611 min_pages = zone->present_pages / 1024;
4612 if (min_pages < SWAP_CLUSTER_MAX)
4613 min_pages = SWAP_CLUSTER_MAX;
4614 if (min_pages > 128)
4615 min_pages = 128;
4616 zone->watermark[WMARK_MIN] = min_pages;
4617 } else {
4618
4619
4620
4621
4622 zone->watermark[WMARK_MIN] = tmp;
4623 }
4624
4625 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
4626 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4627 setup_zone_migrate_reserve(zone);
4628 spin_unlock_irqrestore(&zone->lock, flags);
4629 }
4630
4631
4632 calculate_totalreserve_pages();
4633}
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656void calculate_zone_inactive_ratio(struct zone *zone)
4657{
4658 unsigned int gb, ratio;
4659
4660
4661 gb = zone->present_pages >> (30 - PAGE_SHIFT);
4662 if (gb)
4663 ratio = int_sqrt(10 * gb);
4664 else
4665 ratio = 1;
4666
4667 zone->inactive_ratio = ratio;
4668}
4669
4670static void __init setup_per_zone_inactive_ratio(void)
4671{
4672 struct zone *zone;
4673
4674 for_each_zone(zone)
4675 calculate_zone_inactive_ratio(zone);
4676}
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702static int __init init_per_zone_wmark_min(void)
4703{
4704 unsigned long lowmem_kbytes;
4705
4706 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4707
4708 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4709 if (min_free_kbytes < 128)
4710 min_free_kbytes = 128;
4711 if (min_free_kbytes > 65536)
4712 min_free_kbytes = 65536;
4713 setup_per_zone_wmarks();
4714 setup_per_zone_lowmem_reserve();
4715 setup_per_zone_inactive_ratio();
4716 return 0;
4717}
4718module_init(init_per_zone_wmark_min)
4719
4720
4721
4722
4723
4724
4725int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4726 void __user *buffer, size_t *length, loff_t *ppos)
4727{
4728 proc_dointvec(table, write, buffer, length, ppos);
4729 if (write)
4730 setup_per_zone_wmarks();
4731 return 0;
4732}
4733
4734#ifdef CONFIG_NUMA
4735int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4736 void __user *buffer, size_t *length, loff_t *ppos)
4737{
4738 struct zone *zone;
4739 int rc;
4740
4741 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4742 if (rc)
4743 return rc;
4744
4745 for_each_zone(zone)
4746 zone->min_unmapped_pages = (zone->present_pages *
4747 sysctl_min_unmapped_ratio) / 100;
4748 return 0;
4749}
4750
4751int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4752 void __user *buffer, size_t *length, loff_t *ppos)
4753{
4754 struct zone *zone;
4755 int rc;
4756
4757 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4758 if (rc)
4759 return rc;
4760
4761 for_each_zone(zone)
4762 zone->min_slab_pages = (zone->present_pages *
4763 sysctl_min_slab_ratio) / 100;
4764 return 0;
4765}
4766#endif
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4778 void __user *buffer, size_t *length, loff_t *ppos)
4779{
4780 proc_dointvec_minmax(table, write, buffer, length, ppos);
4781 setup_per_zone_lowmem_reserve();
4782 return 0;
4783}
4784
4785
4786
4787
4788
4789
4790
4791int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4792 void __user *buffer, size_t *length, loff_t *ppos)
4793{
4794 struct zone *zone;
4795 unsigned int cpu;
4796 int ret;
4797
4798 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
4799 if (!write || (ret == -EINVAL))
4800 return ret;
4801 for_each_populated_zone(zone) {
4802 for_each_online_cpu(cpu) {
4803 unsigned long high;
4804 high = zone->present_pages / percpu_pagelist_fraction;
4805 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4806 }
4807 }
4808 return 0;
4809}
4810
4811int hashdist = HASHDIST_DEFAULT;
4812
4813#ifdef CONFIG_NUMA
4814static int __init set_hashdist(char *str)
4815{
4816 if (!str)
4817 return 0;
4818 hashdist = simple_strtoul(str, &str, 0);
4819 return 1;
4820}
4821__setup("hashdist=", set_hashdist);
4822#endif
4823
4824
4825
4826
4827
4828
4829
4830void *__init alloc_large_system_hash(const char *tablename,
4831 unsigned long bucketsize,
4832 unsigned long numentries,
4833 int scale,
4834 int flags,
4835 unsigned int *_hash_shift,
4836 unsigned int *_hash_mask,
4837 unsigned long limit)
4838{
4839 unsigned long long max = limit;
4840 unsigned long log2qty, size;
4841 void *table = NULL;
4842
4843
4844 if (!numentries) {
4845
4846 numentries = nr_kernel_pages;
4847 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4848 numentries >>= 20 - PAGE_SHIFT;
4849 numentries <<= 20 - PAGE_SHIFT;
4850
4851
4852 if (scale > PAGE_SHIFT)
4853 numentries >>= (scale - PAGE_SHIFT);
4854 else
4855 numentries <<= (PAGE_SHIFT - scale);
4856
4857
4858 if (unlikely(flags & HASH_SMALL)) {
4859
4860 WARN_ON(!(flags & HASH_EARLY));
4861 if (!(numentries >> *_hash_shift)) {
4862 numentries = 1UL << *_hash_shift;
4863 BUG_ON(!numentries);
4864 }
4865 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4866 numentries = PAGE_SIZE / bucketsize;
4867 }
4868 numentries = roundup_pow_of_two(numentries);
4869
4870
4871 if (max == 0) {
4872 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4873 do_div(max, bucketsize);
4874 }
4875
4876 if (numentries > max)
4877 numentries = max;
4878
4879 log2qty = ilog2(numentries);
4880
4881 do {
4882 size = bucketsize << log2qty;
4883 if (flags & HASH_EARLY)
4884 table = alloc_bootmem_nopanic(size);
4885 else if (hashdist)
4886 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4887 else {
4888
4889
4890
4891
4892
4893 if (get_order(size) < MAX_ORDER) {
4894 table = alloc_pages_exact(size, GFP_ATOMIC);
4895 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4896 }
4897 }
4898 } while (!table && size > PAGE_SIZE && --log2qty);
4899
4900 if (!table)
4901 panic("Failed to allocate %s hash table\n", tablename);
4902
4903 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4904 tablename,
4905 (1U << log2qty),
4906 ilog2(size) - PAGE_SHIFT,
4907 size);
4908
4909 if (_hash_shift)
4910 *_hash_shift = log2qty;
4911 if (_hash_mask)
4912 *_hash_mask = (1 << log2qty) - 1;
4913
4914 return table;
4915}
4916
4917
4918static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4919 unsigned long pfn)
4920{
4921#ifdef CONFIG_SPARSEMEM
4922 return __pfn_to_section(pfn)->pageblock_flags;
4923#else
4924 return zone->pageblock_flags;
4925#endif
4926}
4927
4928static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4929{
4930#ifdef CONFIG_SPARSEMEM
4931 pfn &= (PAGES_PER_SECTION-1);
4932 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4933#else
4934 pfn = pfn - zone->zone_start_pfn;
4935 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4936#endif
4937}
4938
4939
4940
4941
4942
4943
4944
4945
4946unsigned long get_pageblock_flags_group(struct page *page,
4947 int start_bitidx, int end_bitidx)
4948{
4949 struct zone *zone;
4950 unsigned long *bitmap;
4951 unsigned long pfn, bitidx;
4952 unsigned long flags = 0;
4953 unsigned long value = 1;
4954
4955 zone = page_zone(page);
4956 pfn = page_to_pfn(page);
4957 bitmap = get_pageblock_bitmap(zone, pfn);
4958 bitidx = pfn_to_bitidx(zone, pfn);
4959
4960 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4961 if (test_bit(bitidx + start_bitidx, bitmap))
4962 flags |= value;
4963
4964 return flags;
4965}
4966
4967
4968
4969
4970
4971
4972
4973
4974void set_pageblock_flags_group(struct page *page, unsigned long flags,
4975 int start_bitidx, int end_bitidx)
4976{
4977 struct zone *zone;
4978 unsigned long *bitmap;
4979 unsigned long pfn, bitidx;
4980 unsigned long value = 1;
4981
4982 zone = page_zone(page);
4983 pfn = page_to_pfn(page);
4984 bitmap = get_pageblock_bitmap(zone, pfn);
4985 bitidx = pfn_to_bitidx(zone, pfn);
4986 VM_BUG_ON(pfn < zone->zone_start_pfn);
4987 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4988
4989 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4990 if (flags & value)
4991 __set_bit(bitidx + start_bitidx, bitmap);
4992 else
4993 __clear_bit(bitidx + start_bitidx, bitmap);
4994}
4995
4996
4997
4998
4999
5000
5001
5002int set_migratetype_isolate(struct page *page)
5003{
5004 struct zone *zone;
5005 unsigned long flags;
5006 int ret = -EBUSY;
5007 int zone_idx;
5008
5009 zone = page_zone(page);
5010 zone_idx = zone_idx(zone);
5011 spin_lock_irqsave(&zone->lock, flags);
5012
5013
5014
5015 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
5016 zone_idx != ZONE_MOVABLE)
5017 goto out;
5018 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5019 move_freepages_block(zone, page, MIGRATE_ISOLATE);
5020 ret = 0;
5021out:
5022 spin_unlock_irqrestore(&zone->lock, flags);
5023 if (!ret)
5024 drain_all_pages();
5025 return ret;
5026}
5027
5028void unset_migratetype_isolate(struct page *page)
5029{
5030 struct zone *zone;
5031 unsigned long flags;
5032 zone = page_zone(page);
5033 spin_lock_irqsave(&zone->lock, flags);
5034 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5035 goto out;
5036 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5037 move_freepages_block(zone, page, MIGRATE_MOVABLE);
5038out:
5039 spin_unlock_irqrestore(&zone->lock, flags);
5040}
5041
5042#ifdef CONFIG_MEMORY_HOTREMOVE
5043
5044
5045
5046void
5047__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5048{
5049 struct page *page;
5050 struct zone *zone;
5051 int order, i;
5052 unsigned long pfn;
5053 unsigned long flags;
5054
5055 for (pfn = start_pfn; pfn < end_pfn; pfn++)
5056 if (pfn_valid(pfn))
5057 break;
5058 if (pfn == end_pfn)
5059 return;
5060 zone = page_zone(pfn_to_page(pfn));
5061 spin_lock_irqsave(&zone->lock, flags);
5062 pfn = start_pfn;
5063 while (pfn < end_pfn) {
5064 if (!pfn_valid(pfn)) {
5065 pfn++;
5066 continue;
5067 }
5068 page = pfn_to_page(pfn);
5069 BUG_ON(page_count(page));
5070 BUG_ON(!PageBuddy(page));
5071 order = page_order(page);
5072#ifdef CONFIG_DEBUG_VM
5073 printk(KERN_INFO "remove from free list %lx %d %lx\n",
5074 pfn, 1 << order, end_pfn);
5075#endif
5076 list_del(&page->lru);
5077 rmv_page_order(page);
5078 zone->free_area[order].nr_free--;
5079 __mod_zone_page_state(zone, NR_FREE_PAGES,
5080 - (1UL << order));
5081 for (i = 0; i < (1 << order); i++)
5082 SetPageReserved((page+i));
5083 pfn += (1 << order);
5084 }
5085 spin_unlock_irqrestore(&zone->lock, flags);
5086}
5087#endif
5088