1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h>
25#include <linux/compiler.h>
26#include <linux/kernel.h>
27#include <linux/kmemcheck.h>
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
33#include <linux/oom.h>
34#include <linux/notifier.h>
35#include <linux/topology.h>
36#include <linux/sysctl.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
39#include <linux/memory_hotplug.h>
40#include <linux/nodemask.h>
41#include <linux/vmalloc.h>
42#include <linux/mempolicy.h>
43#include <linux/stop_machine.h>
44#include <linux/sort.h>
45#include <linux/pfn.h>
46#include <linux/backing-dev.h>
47#include <linux/fault-inject.h>
48#include <linux/page-isolation.h>
49#include <linux/page_cgroup.h>
50#include <linux/debugobjects.h>
51#include <linux/kmemleak.h>
52#include <linux/memory.h>
53#include <linux/compaction.h>
54#include <trace/events/kmem.h>
55#include <linux/ftrace_event.h>
56
57#include <asm/tlbflush.h>
58#include <asm/div64.h>
59#include "internal.h"
60
61#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
62DEFINE_PER_CPU(int, numa_node);
63EXPORT_PER_CPU_SYMBOL(numa_node);
64#endif
65
66#ifdef CONFIG_HAVE_MEMORYLESS_NODES
67
68
69
70
71
72
73DEFINE_PER_CPU(int, _numa_mem_);
74EXPORT_PER_CPU_SYMBOL(_numa_mem_);
75#endif
76
77
78
79
80nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
81 [N_POSSIBLE] = NODE_MASK_ALL,
82 [N_ONLINE] = { { [0] = 1UL } },
83#ifndef CONFIG_NUMA
84 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
85#ifdef CONFIG_HIGHMEM
86 [N_HIGH_MEMORY] = { { [0] = 1UL } },
87#endif
88 [N_CPU] = { { [0] = 1UL } },
89#endif
90};
91EXPORT_SYMBOL(node_states);
92
93unsigned long totalram_pages __read_mostly;
94unsigned long totalreserve_pages __read_mostly;
95int percpu_pagelist_fraction;
96gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
97
98#ifdef CONFIG_PM_SLEEP
99
100
101
102
103
104
105
106
107
108static gfp_t saved_gfp_mask;
109
110void pm_restore_gfp_mask(void)
111{
112 WARN_ON(!mutex_is_locked(&pm_mutex));
113 if (saved_gfp_mask) {
114 gfp_allowed_mask = saved_gfp_mask;
115 saved_gfp_mask = 0;
116 }
117}
118
119void pm_restrict_gfp_mask(void)
120{
121 WARN_ON(!mutex_is_locked(&pm_mutex));
122 WARN_ON(saved_gfp_mask);
123 saved_gfp_mask = gfp_allowed_mask;
124 gfp_allowed_mask &= ~GFP_IOFS;
125}
126#endif
127
128#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
129int pageblock_order __read_mostly;
130#endif
131
132static void __free_pages_ok(struct page *page, unsigned int order);
133
134
135
136
137
138
139
140
141
142
143
144
145int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
146#ifdef CONFIG_ZONE_DMA
147 256,
148#endif
149#ifdef CONFIG_ZONE_DMA32
150 256,
151#endif
152#ifdef CONFIG_HIGHMEM
153 32,
154#endif
155 32,
156};
157
158EXPORT_SYMBOL(totalram_pages);
159
160static char * const zone_names[MAX_NR_ZONES] = {
161#ifdef CONFIG_ZONE_DMA
162 "DMA",
163#endif
164#ifdef CONFIG_ZONE_DMA32
165 "DMA32",
166#endif
167 "Normal",
168#ifdef CONFIG_HIGHMEM
169 "HighMem",
170#endif
171 "Movable",
172};
173
174int min_free_kbytes = 1024;
175
176static unsigned long __meminitdata nr_kernel_pages;
177static unsigned long __meminitdata nr_all_pages;
178static unsigned long __meminitdata dma_reserve;
179
180#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
181
182
183
184
185
186
187
188 #ifdef CONFIG_MAX_ACTIVE_REGIONS
189
190 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
191 #else
192 #if MAX_NUMNODES >= 32
193
194 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
195 #else
196
197 #define MAX_ACTIVE_REGIONS 256
198 #endif
199 #endif
200
201 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
202 static int __meminitdata nr_nodemap_entries;
203 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
204 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
205 static unsigned long __initdata required_kernelcore;
206 static unsigned long __initdata required_movablecore;
207 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
208
209
210 int movable_zone;
211 EXPORT_SYMBOL(movable_zone);
212#endif
213
214#if MAX_NUMNODES > 1
215int nr_node_ids __read_mostly = MAX_NUMNODES;
216int nr_online_nodes __read_mostly = 1;
217EXPORT_SYMBOL(nr_node_ids);
218EXPORT_SYMBOL(nr_online_nodes);
219#endif
220
221int page_group_by_mobility_disabled __read_mostly;
222
223static void set_pageblock_migratetype(struct page *page, int migratetype)
224{
225
226 if (unlikely(page_group_by_mobility_disabled))
227 migratetype = MIGRATE_UNMOVABLE;
228
229 set_pageblock_flags_group(page, (unsigned long)migratetype,
230 PB_migrate, PB_migrate_end);
231}
232
233bool oom_killer_disabled __read_mostly;
234
235#ifdef CONFIG_DEBUG_VM
236static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
237{
238 int ret = 0;
239 unsigned seq;
240 unsigned long pfn = page_to_pfn(page);
241
242 do {
243 seq = zone_span_seqbegin(zone);
244 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
245 ret = 1;
246 else if (pfn < zone->zone_start_pfn)
247 ret = 1;
248 } while (zone_span_seqretry(zone, seq));
249
250 return ret;
251}
252
253static int page_is_consistent(struct zone *zone, struct page *page)
254{
255 if (!pfn_valid_within(page_to_pfn(page)))
256 return 0;
257 if (zone != page_zone(page))
258 return 0;
259
260 return 1;
261}
262
263
264
265static int bad_range(struct zone *zone, struct page *page)
266{
267 if (page_outside_zone_boundaries(zone, page))
268 return 1;
269 if (!page_is_consistent(zone, page))
270 return 1;
271
272 return 0;
273}
274#else
275static inline int bad_range(struct zone *zone, struct page *page)
276{
277 return 0;
278}
279#endif
280
281static void bad_page(struct page *page)
282{
283 static unsigned long resume;
284 static unsigned long nr_shown;
285 static unsigned long nr_unshown;
286
287
288 if (PageHWPoison(page)) {
289 __ClearPageBuddy(page);
290 return;
291 }
292
293
294
295
296
297 if (nr_shown == 60) {
298 if (time_before(jiffies, resume)) {
299 nr_unshown++;
300 goto out;
301 }
302 if (nr_unshown) {
303 printk(KERN_ALERT
304 "BUG: Bad page state: %lu messages suppressed\n",
305 nr_unshown);
306 nr_unshown = 0;
307 }
308 nr_shown = 0;
309 }
310 if (nr_shown++ == 0)
311 resume = jiffies + 60 * HZ;
312
313 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
314 current->comm, page_to_pfn(page));
315 dump_page(page);
316
317 dump_stack();
318out:
319
320 __ClearPageBuddy(page);
321 add_taint(TAINT_BAD_PAGE);
322}
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339static void free_compound_page(struct page *page)
340{
341 __free_pages_ok(page, compound_order(page));
342}
343
344void prep_compound_page(struct page *page, unsigned long order)
345{
346 int i;
347 int nr_pages = 1 << order;
348
349 set_compound_page_dtor(page, free_compound_page);
350 set_compound_order(page, order);
351 __SetPageHead(page);
352 for (i = 1; i < nr_pages; i++) {
353 struct page *p = page + i;
354
355 __SetPageTail(p);
356 p->first_page = page;
357 }
358}
359
360
361static int destroy_compound_page(struct page *page, unsigned long order)
362{
363 int i;
364 int nr_pages = 1 << order;
365 int bad = 0;
366
367 if (unlikely(compound_order(page) != order) ||
368 unlikely(!PageHead(page))) {
369 bad_page(page);
370 bad++;
371 }
372
373 __ClearPageHead(page);
374
375 for (i = 1; i < nr_pages; i++) {
376 struct page *p = page + i;
377
378 if (unlikely(!PageTail(p) || (p->first_page != page))) {
379 bad_page(page);
380 bad++;
381 }
382 __ClearPageTail(p);
383 }
384
385 return bad;
386}
387
388static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
389{
390 int i;
391
392
393
394
395
396 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
397 for (i = 0; i < (1 << order); i++)
398 clear_highpage(page + i);
399}
400
401static inline void set_page_order(struct page *page, int order)
402{
403 set_page_private(page, order);
404 __SetPageBuddy(page);
405}
406
407static inline void rmv_page_order(struct page *page)
408{
409 __ClearPageBuddy(page);
410 set_page_private(page, 0);
411}
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430static inline unsigned long
431__find_buddy_index(unsigned long page_idx, unsigned int order)
432{
433 return page_idx ^ (1 << order);
434}
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449static inline int page_is_buddy(struct page *page, struct page *buddy,
450 int order)
451{
452 if (!pfn_valid_within(page_to_pfn(buddy)))
453 return 0;
454
455 if (page_zone_id(page) != page_zone_id(buddy))
456 return 0;
457
458 if (PageBuddy(buddy) && page_order(buddy) == order) {
459 VM_BUG_ON(page_count(buddy) != 0);
460 return 1;
461 }
462 return 0;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489static inline void __free_one_page(struct page *page,
490 struct zone *zone, unsigned int order,
491 int migratetype)
492{
493 unsigned long page_idx;
494 unsigned long combined_idx;
495 unsigned long uninitialized_var(buddy_idx);
496 struct page *buddy;
497
498 if (unlikely(PageCompound(page)))
499 if (unlikely(destroy_compound_page(page, order)))
500 return;
501
502 VM_BUG_ON(migratetype == -1);
503
504 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
505
506 VM_BUG_ON(page_idx & ((1 << order) - 1));
507 VM_BUG_ON(bad_range(zone, page));
508
509 while (order < MAX_ORDER-1) {
510 buddy_idx = __find_buddy_index(page_idx, order);
511 buddy = page + (buddy_idx - page_idx);
512 if (!page_is_buddy(page, buddy, order))
513 break;
514
515
516 list_del(&buddy->lru);
517 zone->free_area[order].nr_free--;
518 rmv_page_order(buddy);
519 combined_idx = buddy_idx & page_idx;
520 page = page + (combined_idx - page_idx);
521 page_idx = combined_idx;
522 order++;
523 }
524 set_page_order(page, order);
525
526
527
528
529
530
531
532
533
534 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
535 struct page *higher_page, *higher_buddy;
536 combined_idx = buddy_idx & page_idx;
537 higher_page = page + (combined_idx - page_idx);
538 buddy_idx = __find_buddy_index(combined_idx, order + 1);
539 higher_buddy = page + (buddy_idx - combined_idx);
540 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
541 list_add_tail(&page->lru,
542 &zone->free_area[order].free_list[migratetype]);
543 goto out;
544 }
545 }
546
547 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
548out:
549 zone->free_area[order].nr_free++;
550}
551
552
553
554
555
556
557static inline void free_page_mlock(struct page *page)
558{
559 __dec_zone_page_state(page, NR_MLOCK);
560 __count_vm_event(UNEVICTABLE_MLOCKFREED);
561}
562
563static inline int free_pages_check(struct page *page)
564{
565 if (unlikely(page_mapcount(page) |
566 (page->mapping != NULL) |
567 (atomic_read(&page->_count) != 0) |
568 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
569 bad_page(page);
570 return 1;
571 }
572 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
573 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
574 return 0;
575}
576
577
578
579
580
581
582
583
584
585
586
587
588static void free_pcppages_bulk(struct zone *zone, int count,
589 struct per_cpu_pages *pcp)
590{
591 int migratetype = 0;
592 int batch_free = 0;
593 int to_free = count;
594
595 spin_lock(&zone->lock);
596 zone->all_unreclaimable = 0;
597 zone->pages_scanned = 0;
598
599 while (to_free) {
600 struct page *page;
601 struct list_head *list;
602
603
604
605
606
607
608
609
610 do {
611 batch_free++;
612 if (++migratetype == MIGRATE_PCPTYPES)
613 migratetype = 0;
614 list = &pcp->lists[migratetype];
615 } while (list_empty(list));
616
617 do {
618 page = list_entry(list->prev, struct page, lru);
619
620 list_del(&page->lru);
621
622 __free_one_page(page, zone, 0, page_private(page));
623 trace_mm_page_pcpu_drain(page, 0, page_private(page));
624 } while (--to_free && --batch_free && !list_empty(list));
625 }
626 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
627 spin_unlock(&zone->lock);
628}
629
630static void free_one_page(struct zone *zone, struct page *page, int order,
631 int migratetype)
632{
633 spin_lock(&zone->lock);
634 zone->all_unreclaimable = 0;
635 zone->pages_scanned = 0;
636
637 __free_one_page(page, zone, order, migratetype);
638 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
639 spin_unlock(&zone->lock);
640}
641
642static bool free_pages_prepare(struct page *page, unsigned int order)
643{
644 int i;
645 int bad = 0;
646
647 trace_mm_page_free_direct(page, order);
648 kmemcheck_free_shadow(page, order);
649
650 if (PageAnon(page))
651 page->mapping = NULL;
652 for (i = 0; i < (1 << order); i++)
653 bad += free_pages_check(page + i);
654 if (bad)
655 return false;
656
657 if (!PageHighMem(page)) {
658 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
659 debug_check_no_obj_freed(page_address(page),
660 PAGE_SIZE << order);
661 }
662 arch_free_page(page, order);
663 kernel_map_pages(page, 1 << order, 0);
664
665 return true;
666}
667
668static void __free_pages_ok(struct page *page, unsigned int order)
669{
670 unsigned long flags;
671 int wasMlocked = __TestClearPageMlocked(page);
672
673 if (!free_pages_prepare(page, order))
674 return;
675
676 local_irq_save(flags);
677 if (unlikely(wasMlocked))
678 free_page_mlock(page);
679 __count_vm_events(PGFREE, 1 << order);
680 free_one_page(page_zone(page), page, order,
681 get_pageblock_migratetype(page));
682 local_irq_restore(flags);
683}
684
685
686
687
688void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
689{
690 if (order == 0) {
691 __ClearPageReserved(page);
692 set_page_count(page, 0);
693 set_page_refcounted(page);
694 __free_page(page);
695 } else {
696 int loop;
697
698 prefetchw(page);
699 for (loop = 0; loop < BITS_PER_LONG; loop++) {
700 struct page *p = &page[loop];
701
702 if (loop + 1 < BITS_PER_LONG)
703 prefetchw(p + 1);
704 __ClearPageReserved(p);
705 set_page_count(p, 0);
706 }
707
708 set_page_refcounted(page);
709 __free_pages(page, order);
710 }
711}
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728static inline void expand(struct zone *zone, struct page *page,
729 int low, int high, struct free_area *area,
730 int migratetype)
731{
732 unsigned long size = 1 << high;
733
734 while (high > low) {
735 area--;
736 high--;
737 size >>= 1;
738 VM_BUG_ON(bad_range(zone, &page[size]));
739 list_add(&page[size].lru, &area->free_list[migratetype]);
740 area->nr_free++;
741 set_page_order(&page[size], high);
742 }
743}
744
745
746
747
748static inline int check_new_page(struct page *page)
749{
750 if (unlikely(page_mapcount(page) |
751 (page->mapping != NULL) |
752 (atomic_read(&page->_count) != 0) |
753 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
754 bad_page(page);
755 return 1;
756 }
757 return 0;
758}
759
760static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
761{
762 int i;
763
764 for (i = 0; i < (1 << order); i++) {
765 struct page *p = page + i;
766 if (unlikely(check_new_page(p)))
767 return 1;
768 }
769
770 set_page_private(page, 0);
771 set_page_refcounted(page);
772
773 arch_alloc_page(page, order);
774 kernel_map_pages(page, 1 << order, 1);
775
776 if (gfp_flags & __GFP_ZERO)
777 prep_zero_page(page, order, gfp_flags);
778
779 if (order && (gfp_flags & __GFP_COMP))
780 prep_compound_page(page, order);
781
782 return 0;
783}
784
785
786
787
788
789static inline
790struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
791 int migratetype)
792{
793 unsigned int current_order;
794 struct free_area * area;
795 struct page *page;
796
797
798 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
799 area = &(zone->free_area[current_order]);
800 if (list_empty(&area->free_list[migratetype]))
801 continue;
802
803 page = list_entry(area->free_list[migratetype].next,
804 struct page, lru);
805 list_del(&page->lru);
806 rmv_page_order(page);
807 area->nr_free--;
808 expand(zone, page, order, current_order, area, migratetype);
809 return page;
810 }
811
812 return NULL;
813}
814
815
816
817
818
819
820static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
821 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
822 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
823 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
824 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE },
825};
826
827
828
829
830
831
832static int move_freepages(struct zone *zone,
833 struct page *start_page, struct page *end_page,
834 int migratetype)
835{
836 struct page *page;
837 unsigned long order;
838 int pages_moved = 0;
839
840#ifndef CONFIG_HOLES_IN_ZONE
841
842
843
844
845
846
847
848 BUG_ON(page_zone(start_page) != page_zone(end_page));
849#endif
850
851 for (page = start_page; page <= end_page;) {
852
853 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
854
855 if (!pfn_valid_within(page_to_pfn(page))) {
856 page++;
857 continue;
858 }
859
860 if (!PageBuddy(page)) {
861 page++;
862 continue;
863 }
864
865 order = page_order(page);
866 list_del(&page->lru);
867 list_add(&page->lru,
868 &zone->free_area[order].free_list[migratetype]);
869 page += 1 << order;
870 pages_moved += 1 << order;
871 }
872
873 return pages_moved;
874}
875
876static int move_freepages_block(struct zone *zone, struct page *page,
877 int migratetype)
878{
879 unsigned long start_pfn, end_pfn;
880 struct page *start_page, *end_page;
881
882 start_pfn = page_to_pfn(page);
883 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
884 start_page = pfn_to_page(start_pfn);
885 end_page = start_page + pageblock_nr_pages - 1;
886 end_pfn = start_pfn + pageblock_nr_pages - 1;
887
888
889 if (start_pfn < zone->zone_start_pfn)
890 start_page = page;
891 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
892 return 0;
893
894 return move_freepages(zone, start_page, end_page, migratetype);
895}
896
897static void change_pageblock_range(struct page *pageblock_page,
898 int start_order, int migratetype)
899{
900 int nr_pageblocks = 1 << (start_order - pageblock_order);
901
902 while (nr_pageblocks--) {
903 set_pageblock_migratetype(pageblock_page, migratetype);
904 pageblock_page += pageblock_nr_pages;
905 }
906}
907
908
909static inline struct page *
910__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
911{
912 struct free_area * area;
913 int current_order;
914 struct page *page;
915 int migratetype, i;
916
917
918 for (current_order = MAX_ORDER-1; current_order >= order;
919 --current_order) {
920 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
921 migratetype = fallbacks[start_migratetype][i];
922
923
924 if (migratetype == MIGRATE_RESERVE)
925 continue;
926
927 area = &(zone->free_area[current_order]);
928 if (list_empty(&area->free_list[migratetype]))
929 continue;
930
931 page = list_entry(area->free_list[migratetype].next,
932 struct page, lru);
933 area->nr_free--;
934
935
936
937
938
939
940
941 if (unlikely(current_order >= (pageblock_order >> 1)) ||
942 start_migratetype == MIGRATE_RECLAIMABLE ||
943 page_group_by_mobility_disabled) {
944 unsigned long pages;
945 pages = move_freepages_block(zone, page,
946 start_migratetype);
947
948
949 if (pages >= (1 << (pageblock_order-1)) ||
950 page_group_by_mobility_disabled)
951 set_pageblock_migratetype(page,
952 start_migratetype);
953
954 migratetype = start_migratetype;
955 }
956
957
958 list_del(&page->lru);
959 rmv_page_order(page);
960
961
962 if (current_order >= pageblock_order)
963 change_pageblock_range(page, current_order,
964 start_migratetype);
965
966 expand(zone, page, order, current_order, area, migratetype);
967
968 trace_mm_page_alloc_extfrag(page, order, current_order,
969 start_migratetype, migratetype);
970
971 return page;
972 }
973 }
974
975 return NULL;
976}
977
978
979
980
981
982static struct page *__rmqueue(struct zone *zone, unsigned int order,
983 int migratetype)
984{
985 struct page *page;
986
987retry_reserve:
988 page = __rmqueue_smallest(zone, order, migratetype);
989
990 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
991 page = __rmqueue_fallback(zone, order, migratetype);
992
993
994
995
996
997
998 if (!page) {
999 migratetype = MIGRATE_RESERVE;
1000 goto retry_reserve;
1001 }
1002 }
1003
1004 trace_mm_page_alloc_zone_locked(page, order, migratetype);
1005 return page;
1006}
1007
1008
1009
1010
1011
1012
1013static int rmqueue_bulk(struct zone *zone, unsigned int order,
1014 unsigned long count, struct list_head *list,
1015 int migratetype, int cold)
1016{
1017 int i;
1018
1019 spin_lock(&zone->lock);
1020 for (i = 0; i < count; ++i) {
1021 struct page *page = __rmqueue(zone, order, migratetype);
1022 if (unlikely(page == NULL))
1023 break;
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 if (likely(cold == 0))
1035 list_add(&page->lru, list);
1036 else
1037 list_add_tail(&page->lru, list);
1038 set_page_private(page, migratetype);
1039 list = &page->lru;
1040 }
1041 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1042 spin_unlock(&zone->lock);
1043 return i;
1044}
1045
1046#ifdef CONFIG_NUMA
1047
1048
1049
1050
1051
1052
1053
1054
1055void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1056{
1057 unsigned long flags;
1058 int to_drain;
1059
1060 local_irq_save(flags);
1061 if (pcp->count >= pcp->batch)
1062 to_drain = pcp->batch;
1063 else
1064 to_drain = pcp->count;
1065 free_pcppages_bulk(zone, to_drain, pcp);
1066 pcp->count -= to_drain;
1067 local_irq_restore(flags);
1068}
1069#endif
1070
1071
1072
1073
1074
1075
1076
1077
1078static void drain_pages(unsigned int cpu)
1079{
1080 unsigned long flags;
1081 struct zone *zone;
1082
1083 for_each_populated_zone(zone) {
1084 struct per_cpu_pageset *pset;
1085 struct per_cpu_pages *pcp;
1086
1087 local_irq_save(flags);
1088 pset = per_cpu_ptr(zone->pageset, cpu);
1089
1090 pcp = &pset->pcp;
1091 if (pcp->count) {
1092 free_pcppages_bulk(zone, pcp->count, pcp);
1093 pcp->count = 0;
1094 }
1095 local_irq_restore(flags);
1096 }
1097}
1098
1099
1100
1101
1102void drain_local_pages(void *arg)
1103{
1104 drain_pages(smp_processor_id());
1105}
1106
1107
1108
1109
1110void drain_all_pages(void)
1111{
1112 on_each_cpu(drain_local_pages, NULL, 1);
1113}
1114
1115#ifdef CONFIG_HIBERNATION
1116
1117void mark_free_pages(struct zone *zone)
1118{
1119 unsigned long pfn, max_zone_pfn;
1120 unsigned long flags;
1121 int order, t;
1122 struct list_head *curr;
1123
1124 if (!zone->spanned_pages)
1125 return;
1126
1127 spin_lock_irqsave(&zone->lock, flags);
1128
1129 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1130 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1131 if (pfn_valid(pfn)) {
1132 struct page *page = pfn_to_page(pfn);
1133
1134 if (!swsusp_page_is_forbidden(page))
1135 swsusp_unset_page_free(page);
1136 }
1137
1138 for_each_migratetype_order(order, t) {
1139 list_for_each(curr, &zone->free_area[order].free_list[t]) {
1140 unsigned long i;
1141
1142 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1143 for (i = 0; i < (1UL << order); i++)
1144 swsusp_set_page_free(pfn_to_page(pfn + i));
1145 }
1146 }
1147 spin_unlock_irqrestore(&zone->lock, flags);
1148}
1149#endif
1150
1151
1152
1153
1154
1155void free_hot_cold_page(struct page *page, int cold)
1156{
1157 struct zone *zone = page_zone(page);
1158 struct per_cpu_pages *pcp;
1159 unsigned long flags;
1160 int migratetype;
1161 int wasMlocked = __TestClearPageMlocked(page);
1162
1163 if (!free_pages_prepare(page, 0))
1164 return;
1165
1166 migratetype = get_pageblock_migratetype(page);
1167 set_page_private(page, migratetype);
1168 local_irq_save(flags);
1169 if (unlikely(wasMlocked))
1170 free_page_mlock(page);
1171 __count_vm_event(PGFREE);
1172
1173
1174
1175
1176
1177
1178
1179
1180 if (migratetype >= MIGRATE_PCPTYPES) {
1181 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1182 free_one_page(zone, page, 0, migratetype);
1183 goto out;
1184 }
1185 migratetype = MIGRATE_MOVABLE;
1186 }
1187
1188 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1189 if (cold)
1190 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1191 else
1192 list_add(&page->lru, &pcp->lists[migratetype]);
1193 pcp->count++;
1194 if (pcp->count >= pcp->high) {
1195 free_pcppages_bulk(zone, pcp->batch, pcp);
1196 pcp->count -= pcp->batch;
1197 }
1198
1199out:
1200 local_irq_restore(flags);
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211void split_page(struct page *page, unsigned int order)
1212{
1213 int i;
1214
1215 VM_BUG_ON(PageCompound(page));
1216 VM_BUG_ON(!page_count(page));
1217
1218#ifdef CONFIG_KMEMCHECK
1219
1220
1221
1222
1223 if (kmemcheck_page_is_tracked(page))
1224 split_page(virt_to_page(page[0].shadow), order);
1225#endif
1226
1227 for (i = 1; i < (1 << order); i++)
1228 set_page_refcounted(page + i);
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241int split_free_page(struct page *page)
1242{
1243 unsigned int order;
1244 unsigned long watermark;
1245 struct zone *zone;
1246
1247 BUG_ON(!PageBuddy(page));
1248
1249 zone = page_zone(page);
1250 order = page_order(page);
1251
1252
1253 watermark = low_wmark_pages(zone) + (1 << order);
1254 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1255 return 0;
1256
1257
1258 list_del(&page->lru);
1259 zone->free_area[order].nr_free--;
1260 rmv_page_order(page);
1261 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1262
1263
1264 set_page_refcounted(page);
1265 split_page(page, order);
1266
1267 if (order >= pageblock_order - 1) {
1268 struct page *endpage = page + (1 << order) - 1;
1269 for (; page < endpage; page += pageblock_nr_pages)
1270 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1271 }
1272
1273 return 1 << order;
1274}
1275
1276
1277
1278
1279
1280
1281static inline
1282struct page *buffered_rmqueue(struct zone *preferred_zone,
1283 struct zone *zone, int order, gfp_t gfp_flags,
1284 int migratetype)
1285{
1286 unsigned long flags;
1287 struct page *page;
1288 int cold = !!(gfp_flags & __GFP_COLD);
1289
1290again:
1291 if (likely(order == 0)) {
1292 struct per_cpu_pages *pcp;
1293 struct list_head *list;
1294
1295 local_irq_save(flags);
1296 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1297 list = &pcp->lists[migratetype];
1298 if (list_empty(list)) {
1299 pcp->count += rmqueue_bulk(zone, 0,
1300 pcp->batch, list,
1301 migratetype, cold);
1302 if (unlikely(list_empty(list)))
1303 goto failed;
1304 }
1305
1306 if (cold)
1307 page = list_entry(list->prev, struct page, lru);
1308 else
1309 page = list_entry(list->next, struct page, lru);
1310
1311 list_del(&page->lru);
1312 pcp->count--;
1313 } else {
1314 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 WARN_ON_ONCE(order > 1);
1326 }
1327 spin_lock_irqsave(&zone->lock, flags);
1328 page = __rmqueue(zone, order, migratetype);
1329 spin_unlock(&zone->lock);
1330 if (!page)
1331 goto failed;
1332 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1333 }
1334
1335 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1336 zone_statistics(preferred_zone, zone);
1337 local_irq_restore(flags);
1338
1339 VM_BUG_ON(bad_range(zone, page));
1340 if (prep_new_page(page, order, gfp_flags))
1341 goto again;
1342 return page;
1343
1344failed:
1345 local_irq_restore(flags);
1346 return NULL;
1347}
1348
1349
1350#define ALLOC_WMARK_MIN WMARK_MIN
1351#define ALLOC_WMARK_LOW WMARK_LOW
1352#define ALLOC_WMARK_HIGH WMARK_HIGH
1353#define ALLOC_NO_WATERMARKS 0x04
1354
1355
1356#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1357
1358#define ALLOC_HARDER 0x10
1359#define ALLOC_HIGH 0x20
1360#define ALLOC_CPUSET 0x40
1361
1362#ifdef CONFIG_FAIL_PAGE_ALLOC
1363
1364static struct fail_page_alloc_attr {
1365 struct fault_attr attr;
1366
1367 u32 ignore_gfp_highmem;
1368 u32 ignore_gfp_wait;
1369 u32 min_order;
1370
1371#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1372
1373 struct dentry *ignore_gfp_highmem_file;
1374 struct dentry *ignore_gfp_wait_file;
1375 struct dentry *min_order_file;
1376
1377#endif
1378
1379} fail_page_alloc = {
1380 .attr = FAULT_ATTR_INITIALIZER,
1381 .ignore_gfp_wait = 1,
1382 .ignore_gfp_highmem = 1,
1383 .min_order = 1,
1384};
1385
1386static int __init setup_fail_page_alloc(char *str)
1387{
1388 return setup_fault_attr(&fail_page_alloc.attr, str);
1389}
1390__setup("fail_page_alloc=", setup_fail_page_alloc);
1391
1392static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1393{
1394 if (order < fail_page_alloc.min_order)
1395 return 0;
1396 if (gfp_mask & __GFP_NOFAIL)
1397 return 0;
1398 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1399 return 0;
1400 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1401 return 0;
1402
1403 return should_fail(&fail_page_alloc.attr, 1 << order);
1404}
1405
1406#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1407
1408static int __init fail_page_alloc_debugfs(void)
1409{
1410 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1411 struct dentry *dir;
1412 int err;
1413
1414 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1415 "fail_page_alloc");
1416 if (err)
1417 return err;
1418 dir = fail_page_alloc.attr.dentries.dir;
1419
1420 fail_page_alloc.ignore_gfp_wait_file =
1421 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1422 &fail_page_alloc.ignore_gfp_wait);
1423
1424 fail_page_alloc.ignore_gfp_highmem_file =
1425 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1426 &fail_page_alloc.ignore_gfp_highmem);
1427 fail_page_alloc.min_order_file =
1428 debugfs_create_u32("min-order", mode, dir,
1429 &fail_page_alloc.min_order);
1430
1431 if (!fail_page_alloc.ignore_gfp_wait_file ||
1432 !fail_page_alloc.ignore_gfp_highmem_file ||
1433 !fail_page_alloc.min_order_file) {
1434 err = -ENOMEM;
1435 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1436 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1437 debugfs_remove(fail_page_alloc.min_order_file);
1438 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1439 }
1440
1441 return err;
1442}
1443
1444late_initcall(fail_page_alloc_debugfs);
1445
1446#endif
1447
1448#else
1449
1450static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1451{
1452 return 0;
1453}
1454
1455#endif
1456
1457
1458
1459
1460
1461static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1462 int classzone_idx, int alloc_flags, long free_pages)
1463{
1464
1465 long min = mark;
1466 int o;
1467
1468 free_pages -= (1 << order) + 1;
1469 if (alloc_flags & ALLOC_HIGH)
1470 min -= min / 2;
1471 if (alloc_flags & ALLOC_HARDER)
1472 min -= min / 4;
1473
1474 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1475 return false;
1476 for (o = 0; o < order; o++) {
1477
1478 free_pages -= z->free_area[o].nr_free << o;
1479
1480
1481 min >>= 1;
1482
1483 if (free_pages <= min)
1484 return false;
1485 }
1486 return true;
1487}
1488
1489bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1490 int classzone_idx, int alloc_flags)
1491{
1492 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1493 zone_page_state(z, NR_FREE_PAGES));
1494}
1495
1496bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1497 int classzone_idx, int alloc_flags)
1498{
1499 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1500
1501 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1502 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1503
1504 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1505 free_pages);
1506}
1507
1508#ifdef CONFIG_NUMA
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1532{
1533 struct zonelist_cache *zlc;
1534 nodemask_t *allowednodes;
1535
1536 zlc = zonelist->zlcache_ptr;
1537 if (!zlc)
1538 return NULL;
1539
1540 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1541 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1542 zlc->last_full_zap = jiffies;
1543 }
1544
1545 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1546 &cpuset_current_mems_allowed :
1547 &node_states[N_HIGH_MEMORY];
1548 return allowednodes;
1549}
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1574 nodemask_t *allowednodes)
1575{
1576 struct zonelist_cache *zlc;
1577 int i;
1578 int n;
1579
1580 zlc = zonelist->zlcache_ptr;
1581 if (!zlc)
1582 return 1;
1583
1584 i = z - zonelist->_zonerefs;
1585 n = zlc->z_to_n[i];
1586
1587
1588 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1589}
1590
1591
1592
1593
1594
1595
1596static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1597{
1598 struct zonelist_cache *zlc;
1599 int i;
1600
1601 zlc = zonelist->zlcache_ptr;
1602 if (!zlc)
1603 return;
1604
1605 i = z - zonelist->_zonerefs;
1606
1607 set_bit(i, zlc->fullzones);
1608}
1609
1610#else
1611
1612static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1613{
1614 return NULL;
1615}
1616
1617static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1618 nodemask_t *allowednodes)
1619{
1620 return 1;
1621}
1622
1623static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1624{
1625}
1626#endif
1627
1628
1629
1630
1631
1632static struct page *
1633get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1634 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1635 struct zone *preferred_zone, int migratetype)
1636{
1637 struct zoneref *z;
1638 struct page *page = NULL;
1639 int classzone_idx;
1640 struct zone *zone;
1641 nodemask_t *allowednodes = NULL;
1642 int zlc_active = 0;
1643 int did_zlc_setup = 0;
1644
1645 classzone_idx = zone_idx(preferred_zone);
1646zonelist_scan:
1647
1648
1649
1650
1651 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1652 high_zoneidx, nodemask) {
1653 if (NUMA_BUILD && zlc_active &&
1654 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1655 continue;
1656 if ((alloc_flags & ALLOC_CPUSET) &&
1657 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1658 goto try_next_zone;
1659
1660 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1661 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1662 unsigned long mark;
1663 int ret;
1664
1665 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1666 if (zone_watermark_ok(zone, order, mark,
1667 classzone_idx, alloc_flags))
1668 goto try_this_zone;
1669
1670 if (zone_reclaim_mode == 0)
1671 goto this_zone_full;
1672
1673 ret = zone_reclaim(zone, gfp_mask, order);
1674 switch (ret) {
1675 case ZONE_RECLAIM_NOSCAN:
1676
1677 goto try_next_zone;
1678 case ZONE_RECLAIM_FULL:
1679
1680 goto this_zone_full;
1681 default:
1682
1683 if (!zone_watermark_ok(zone, order, mark,
1684 classzone_idx, alloc_flags))
1685 goto this_zone_full;
1686 }
1687 }
1688
1689try_this_zone:
1690 page = buffered_rmqueue(preferred_zone, zone, order,
1691 gfp_mask, migratetype);
1692 if (page)
1693 break;
1694this_zone_full:
1695 if (NUMA_BUILD)
1696 zlc_mark_zone_full(zonelist, z);
1697try_next_zone:
1698 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1699
1700
1701
1702
1703 allowednodes = zlc_setup(zonelist, alloc_flags);
1704 zlc_active = 1;
1705 did_zlc_setup = 1;
1706 }
1707 }
1708
1709 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1710
1711 zlc_active = 0;
1712 goto zonelist_scan;
1713 }
1714 return page;
1715}
1716
1717static inline int
1718should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1719 unsigned long pages_reclaimed)
1720{
1721
1722 if (gfp_mask & __GFP_NORETRY)
1723 return 0;
1724
1725
1726
1727
1728
1729
1730 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1731 return 1;
1732
1733
1734
1735
1736
1737
1738
1739
1740 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1741 return 1;
1742
1743
1744
1745
1746
1747 if (gfp_mask & __GFP_NOFAIL)
1748 return 1;
1749
1750 return 0;
1751}
1752
1753static inline struct page *
1754__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1755 struct zonelist *zonelist, enum zone_type high_zoneidx,
1756 nodemask_t *nodemask, struct zone *preferred_zone,
1757 int migratetype)
1758{
1759 struct page *page;
1760
1761
1762 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
1763 schedule_timeout_uninterruptible(1);
1764 return NULL;
1765 }
1766
1767
1768
1769
1770
1771
1772 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1773 order, zonelist, high_zoneidx,
1774 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1775 preferred_zone, migratetype);
1776 if (page)
1777 goto out;
1778
1779 if (!(gfp_mask & __GFP_NOFAIL)) {
1780
1781 if (order > PAGE_ALLOC_COSTLY_ORDER)
1782 goto out;
1783
1784 if (high_zoneidx < ZONE_NORMAL)
1785 goto out;
1786
1787
1788
1789
1790
1791
1792
1793 if (gfp_mask & __GFP_THISNODE)
1794 goto out;
1795 }
1796
1797 out_of_memory(zonelist, gfp_mask, order, nodemask);
1798
1799out:
1800 clear_zonelist_oom(zonelist, gfp_mask);
1801 return page;
1802}
1803
1804#ifdef CONFIG_COMPACTION
1805
1806static struct page *
1807__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1808 struct zonelist *zonelist, enum zone_type high_zoneidx,
1809 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1810 int migratetype, unsigned long *did_some_progress,
1811 bool sync_migration)
1812{
1813 struct page *page;
1814
1815 if (!order || compaction_deferred(preferred_zone))
1816 return NULL;
1817
1818 current->flags |= PF_MEMALLOC;
1819 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1820 nodemask, sync_migration);
1821 current->flags &= ~PF_MEMALLOC;
1822 if (*did_some_progress != COMPACT_SKIPPED) {
1823
1824
1825 drain_pages(get_cpu());
1826 put_cpu();
1827
1828 page = get_page_from_freelist(gfp_mask, nodemask,
1829 order, zonelist, high_zoneidx,
1830 alloc_flags, preferred_zone,
1831 migratetype);
1832 if (page) {
1833 preferred_zone->compact_considered = 0;
1834 preferred_zone->compact_defer_shift = 0;
1835 count_vm_event(COMPACTSUCCESS);
1836 return page;
1837 }
1838
1839
1840
1841
1842
1843
1844 count_vm_event(COMPACTFAIL);
1845 defer_compaction(preferred_zone);
1846
1847 cond_resched();
1848 }
1849
1850 return NULL;
1851}
1852#else
1853static inline struct page *
1854__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1855 struct zonelist *zonelist, enum zone_type high_zoneidx,
1856 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1857 int migratetype, unsigned long *did_some_progress,
1858 bool sync_migration)
1859{
1860 return NULL;
1861}
1862#endif
1863
1864
1865static inline struct page *
1866__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1867 struct zonelist *zonelist, enum zone_type high_zoneidx,
1868 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1869 int migratetype, unsigned long *did_some_progress)
1870{
1871 struct page *page = NULL;
1872 struct reclaim_state reclaim_state;
1873 bool drained = false;
1874
1875 cond_resched();
1876
1877
1878 cpuset_memory_pressure_bump();
1879 current->flags |= PF_MEMALLOC;
1880 lockdep_set_current_reclaim_state(gfp_mask);
1881 reclaim_state.reclaimed_slab = 0;
1882 current->reclaim_state = &reclaim_state;
1883
1884 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1885
1886 current->reclaim_state = NULL;
1887 lockdep_clear_current_reclaim_state();
1888 current->flags &= ~PF_MEMALLOC;
1889
1890 cond_resched();
1891
1892 if (unlikely(!(*did_some_progress)))
1893 return NULL;
1894
1895retry:
1896 page = get_page_from_freelist(gfp_mask, nodemask, order,
1897 zonelist, high_zoneidx,
1898 alloc_flags, preferred_zone,
1899 migratetype);
1900
1901
1902
1903
1904
1905 if (!page && !drained) {
1906 drain_all_pages();
1907 drained = true;
1908 goto retry;
1909 }
1910
1911 return page;
1912}
1913
1914
1915
1916
1917
1918static inline struct page *
1919__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1920 struct zonelist *zonelist, enum zone_type high_zoneidx,
1921 nodemask_t *nodemask, struct zone *preferred_zone,
1922 int migratetype)
1923{
1924 struct page *page;
1925
1926 do {
1927 page = get_page_from_freelist(gfp_mask, nodemask, order,
1928 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1929 preferred_zone, migratetype);
1930
1931 if (!page && gfp_mask & __GFP_NOFAIL)
1932 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
1933 } while (!page && (gfp_mask & __GFP_NOFAIL));
1934
1935 return page;
1936}
1937
1938static inline
1939void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1940 enum zone_type high_zoneidx,
1941 enum zone_type classzone_idx)
1942{
1943 struct zoneref *z;
1944 struct zone *zone;
1945
1946 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1947 wakeup_kswapd(zone, order, classzone_idx);
1948}
1949
1950static inline int
1951gfp_to_alloc_flags(gfp_t gfp_mask)
1952{
1953 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1954 const gfp_t wait = gfp_mask & __GFP_WAIT;
1955
1956
1957 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
1958
1959
1960
1961
1962
1963
1964
1965 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1966
1967 if (!wait) {
1968
1969
1970
1971
1972 if (!(gfp_mask & __GFP_NOMEMALLOC))
1973 alloc_flags |= ALLOC_HARDER;
1974
1975
1976
1977
1978 alloc_flags &= ~ALLOC_CPUSET;
1979 } else if (unlikely(rt_task(current)) && !in_interrupt())
1980 alloc_flags |= ALLOC_HARDER;
1981
1982 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1983 if (!in_interrupt() &&
1984 ((current->flags & PF_MEMALLOC) ||
1985 unlikely(test_thread_flag(TIF_MEMDIE))))
1986 alloc_flags |= ALLOC_NO_WATERMARKS;
1987 }
1988
1989 return alloc_flags;
1990}
1991
1992static inline struct page *
1993__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1994 struct zonelist *zonelist, enum zone_type high_zoneidx,
1995 nodemask_t *nodemask, struct zone *preferred_zone,
1996 int migratetype)
1997{
1998 const gfp_t wait = gfp_mask & __GFP_WAIT;
1999 struct page *page = NULL;
2000 int alloc_flags;
2001 unsigned long pages_reclaimed = 0;
2002 unsigned long did_some_progress;
2003 bool sync_migration = false;
2004
2005
2006
2007
2008
2009
2010
2011 if (order >= MAX_ORDER) {
2012 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2013 return NULL;
2014 }
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2025 goto nopage;
2026
2027restart:
2028 if (!(gfp_mask & __GFP_NO_KSWAPD))
2029 wake_all_kswapd(order, zonelist, high_zoneidx,
2030 zone_idx(preferred_zone));
2031
2032
2033
2034
2035
2036
2037 alloc_flags = gfp_to_alloc_flags(gfp_mask);
2038
2039
2040
2041
2042
2043 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2044 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2045 &preferred_zone);
2046
2047
2048 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2049 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2050 preferred_zone, migratetype);
2051 if (page)
2052 goto got_pg;
2053
2054rebalance:
2055
2056 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2057 page = __alloc_pages_high_priority(gfp_mask, order,
2058 zonelist, high_zoneidx, nodemask,
2059 preferred_zone, migratetype);
2060 if (page)
2061 goto got_pg;
2062 }
2063
2064
2065 if (!wait)
2066 goto nopage;
2067
2068
2069 if (current->flags & PF_MEMALLOC)
2070 goto nopage;
2071
2072
2073 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2074 goto nopage;
2075
2076
2077
2078
2079
2080 page = __alloc_pages_direct_compact(gfp_mask, order,
2081 zonelist, high_zoneidx,
2082 nodemask,
2083 alloc_flags, preferred_zone,
2084 migratetype, &did_some_progress,
2085 sync_migration);
2086 if (page)
2087 goto got_pg;
2088 sync_migration = true;
2089
2090
2091 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2092 zonelist, high_zoneidx,
2093 nodemask,
2094 alloc_flags, preferred_zone,
2095 migratetype, &did_some_progress);
2096 if (page)
2097 goto got_pg;
2098
2099
2100
2101
2102
2103 if (!did_some_progress) {
2104 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2105 if (oom_killer_disabled)
2106 goto nopage;
2107 page = __alloc_pages_may_oom(gfp_mask, order,
2108 zonelist, high_zoneidx,
2109 nodemask, preferred_zone,
2110 migratetype);
2111 if (page)
2112 goto got_pg;
2113
2114 if (!(gfp_mask & __GFP_NOFAIL)) {
2115
2116
2117
2118
2119
2120
2121 if (order > PAGE_ALLOC_COSTLY_ORDER)
2122 goto nopage;
2123
2124
2125
2126
2127
2128 if (high_zoneidx < ZONE_NORMAL)
2129 goto nopage;
2130 }
2131
2132 goto restart;
2133 }
2134 }
2135
2136
2137 pages_reclaimed += did_some_progress;
2138 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2139
2140 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2141 goto rebalance;
2142 } else {
2143
2144
2145
2146
2147
2148 page = __alloc_pages_direct_compact(gfp_mask, order,
2149 zonelist, high_zoneidx,
2150 nodemask,
2151 alloc_flags, preferred_zone,
2152 migratetype, &did_some_progress,
2153 sync_migration);
2154 if (page)
2155 goto got_pg;
2156 }
2157
2158nopage:
2159 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2160 printk(KERN_WARNING "%s: page allocation failure."
2161 " order:%d, mode:0x%x\n",
2162 current->comm, order, gfp_mask);
2163 dump_stack();
2164 show_mem();
2165 }
2166 return page;
2167got_pg:
2168 if (kmemcheck_enabled)
2169 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2170 return page;
2171
2172}
2173
2174
2175
2176
2177struct page *
2178__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2179 struct zonelist *zonelist, nodemask_t *nodemask)
2180{
2181 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2182 struct zone *preferred_zone;
2183 struct page *page;
2184 int migratetype = allocflags_to_migratetype(gfp_mask);
2185
2186 gfp_mask &= gfp_allowed_mask;
2187
2188 lockdep_trace_alloc(gfp_mask);
2189
2190 might_sleep_if(gfp_mask & __GFP_WAIT);
2191
2192 if (should_fail_alloc_page(gfp_mask, order))
2193 return NULL;
2194
2195
2196
2197
2198
2199
2200 if (unlikely(!zonelist->_zonerefs->zone))
2201 return NULL;
2202
2203 get_mems_allowed();
2204
2205 first_zones_zonelist(zonelist, high_zoneidx,
2206 nodemask ? : &cpuset_current_mems_allowed,
2207 &preferred_zone);
2208 if (!preferred_zone) {
2209 put_mems_allowed();
2210 return NULL;
2211 }
2212
2213
2214 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2215 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2216 preferred_zone, migratetype);
2217 if (unlikely(!page))
2218 page = __alloc_pages_slowpath(gfp_mask, order,
2219 zonelist, high_zoneidx, nodemask,
2220 preferred_zone, migratetype);
2221 put_mems_allowed();
2222
2223 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2224 return page;
2225}
2226EXPORT_SYMBOL(__alloc_pages_nodemask);
2227
2228
2229
2230
2231unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2232{
2233 struct page *page;
2234
2235
2236
2237
2238
2239 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2240
2241 page = alloc_pages(gfp_mask, order);
2242 if (!page)
2243 return 0;
2244 return (unsigned long) page_address(page);
2245}
2246EXPORT_SYMBOL(__get_free_pages);
2247
2248unsigned long get_zeroed_page(gfp_t gfp_mask)
2249{
2250 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2251}
2252EXPORT_SYMBOL(get_zeroed_page);
2253
2254void __pagevec_free(struct pagevec *pvec)
2255{
2256 int i = pagevec_count(pvec);
2257
2258 while (--i >= 0) {
2259 trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2260 free_hot_cold_page(pvec->pages[i], pvec->cold);
2261 }
2262}
2263
2264void __free_pages(struct page *page, unsigned int order)
2265{
2266 if (put_page_testzero(page)) {
2267 if (order == 0)
2268 free_hot_cold_page(page, 0);
2269 else
2270 __free_pages_ok(page, order);
2271 }
2272}
2273
2274EXPORT_SYMBOL(__free_pages);
2275
2276void free_pages(unsigned long addr, unsigned int order)
2277{
2278 if (addr != 0) {
2279 VM_BUG_ON(!virt_addr_valid((void *)addr));
2280 __free_pages(virt_to_page((void *)addr), order);
2281 }
2282}
2283
2284EXPORT_SYMBOL(free_pages);
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2300{
2301 unsigned int order = get_order(size);
2302 unsigned long addr;
2303
2304 addr = __get_free_pages(gfp_mask, order);
2305 if (addr) {
2306 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2307 unsigned long used = addr + PAGE_ALIGN(size);
2308
2309 split_page(virt_to_page((void *)addr), order);
2310 while (used < alloc_end) {
2311 free_page(used);
2312 used += PAGE_SIZE;
2313 }
2314 }
2315
2316 return (void *)addr;
2317}
2318EXPORT_SYMBOL(alloc_pages_exact);
2319
2320
2321
2322
2323
2324
2325
2326
2327void free_pages_exact(void *virt, size_t size)
2328{
2329 unsigned long addr = (unsigned long)virt;
2330 unsigned long end = addr + PAGE_ALIGN(size);
2331
2332 while (addr < end) {
2333 free_page(addr);
2334 addr += PAGE_SIZE;
2335 }
2336}
2337EXPORT_SYMBOL(free_pages_exact);
2338
2339static unsigned int nr_free_zone_pages(int offset)
2340{
2341 struct zoneref *z;
2342 struct zone *zone;
2343
2344
2345 unsigned int sum = 0;
2346
2347 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2348
2349 for_each_zone_zonelist(zone, z, zonelist, offset) {
2350 unsigned long size = zone->present_pages;
2351 unsigned long high = high_wmark_pages(zone);
2352 if (size > high)
2353 sum += size - high;
2354 }
2355
2356 return sum;
2357}
2358
2359
2360
2361
2362unsigned int nr_free_buffer_pages(void)
2363{
2364 return nr_free_zone_pages(gfp_zone(GFP_USER));
2365}
2366EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2367
2368
2369
2370
2371unsigned int nr_free_pagecache_pages(void)
2372{
2373 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2374}
2375
2376static inline void show_node(struct zone *zone)
2377{
2378 if (NUMA_BUILD)
2379 printk("Node %d ", zone_to_nid(zone));
2380}
2381
2382void si_meminfo(struct sysinfo *val)
2383{
2384 val->totalram = totalram_pages;
2385 val->sharedram = 0;
2386 val->freeram = global_page_state(NR_FREE_PAGES);
2387 val->bufferram = nr_blockdev_pages();
2388 val->totalhigh = totalhigh_pages;
2389 val->freehigh = nr_free_highpages();
2390 val->mem_unit = PAGE_SIZE;
2391}
2392
2393EXPORT_SYMBOL(si_meminfo);
2394
2395#ifdef CONFIG_NUMA
2396void si_meminfo_node(struct sysinfo *val, int nid)
2397{
2398 pg_data_t *pgdat = NODE_DATA(nid);
2399
2400 val->totalram = pgdat->node_present_pages;
2401 val->freeram = node_page_state(nid, NR_FREE_PAGES);
2402#ifdef CONFIG_HIGHMEM
2403 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2404 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2405 NR_FREE_PAGES);
2406#else
2407 val->totalhigh = 0;
2408 val->freehigh = 0;
2409#endif
2410 val->mem_unit = PAGE_SIZE;
2411}
2412#endif
2413
2414#define K(x) ((x) << (PAGE_SHIFT-10))
2415
2416
2417
2418
2419
2420
2421void show_free_areas(void)
2422{
2423 int cpu;
2424 struct zone *zone;
2425
2426 for_each_populated_zone(zone) {
2427 show_node(zone);
2428 printk("%s per-cpu:\n", zone->name);
2429
2430 for_each_online_cpu(cpu) {
2431 struct per_cpu_pageset *pageset;
2432
2433 pageset = per_cpu_ptr(zone->pageset, cpu);
2434
2435 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2436 cpu, pageset->pcp.high,
2437 pageset->pcp.batch, pageset->pcp.count);
2438 }
2439 }
2440
2441 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2442 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2443 " unevictable:%lu"
2444 " dirty:%lu writeback:%lu unstable:%lu\n"
2445 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2446 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2447 global_page_state(NR_ACTIVE_ANON),
2448 global_page_state(NR_INACTIVE_ANON),
2449 global_page_state(NR_ISOLATED_ANON),
2450 global_page_state(NR_ACTIVE_FILE),
2451 global_page_state(NR_INACTIVE_FILE),
2452 global_page_state(NR_ISOLATED_FILE),
2453 global_page_state(NR_UNEVICTABLE),
2454 global_page_state(NR_FILE_DIRTY),
2455 global_page_state(NR_WRITEBACK),
2456 global_page_state(NR_UNSTABLE_NFS),
2457 global_page_state(NR_FREE_PAGES),
2458 global_page_state(NR_SLAB_RECLAIMABLE),
2459 global_page_state(NR_SLAB_UNRECLAIMABLE),
2460 global_page_state(NR_FILE_MAPPED),
2461 global_page_state(NR_SHMEM),
2462 global_page_state(NR_PAGETABLE),
2463 global_page_state(NR_BOUNCE));
2464
2465 for_each_populated_zone(zone) {
2466 int i;
2467
2468 show_node(zone);
2469 printk("%s"
2470 " free:%lukB"
2471 " min:%lukB"
2472 " low:%lukB"
2473 " high:%lukB"
2474 " active_anon:%lukB"
2475 " inactive_anon:%lukB"
2476 " active_file:%lukB"
2477 " inactive_file:%lukB"
2478 " unevictable:%lukB"
2479 " isolated(anon):%lukB"
2480 " isolated(file):%lukB"
2481 " present:%lukB"
2482 " mlocked:%lukB"
2483 " dirty:%lukB"
2484 " writeback:%lukB"
2485 " mapped:%lukB"
2486 " shmem:%lukB"
2487 " slab_reclaimable:%lukB"
2488 " slab_unreclaimable:%lukB"
2489 " kernel_stack:%lukB"
2490 " pagetables:%lukB"
2491 " unstable:%lukB"
2492 " bounce:%lukB"
2493 " writeback_tmp:%lukB"
2494 " pages_scanned:%lu"
2495 " all_unreclaimable? %s"
2496 "\n",
2497 zone->name,
2498 K(zone_page_state(zone, NR_FREE_PAGES)),
2499 K(min_wmark_pages(zone)),
2500 K(low_wmark_pages(zone)),
2501 K(high_wmark_pages(zone)),
2502 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2503 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2504 K(zone_page_state(zone, NR_ACTIVE_FILE)),
2505 K(zone_page_state(zone, NR_INACTIVE_FILE)),
2506 K(zone_page_state(zone, NR_UNEVICTABLE)),
2507 K(zone_page_state(zone, NR_ISOLATED_ANON)),
2508 K(zone_page_state(zone, NR_ISOLATED_FILE)),
2509 K(zone->present_pages),
2510 K(zone_page_state(zone, NR_MLOCK)),
2511 K(zone_page_state(zone, NR_FILE_DIRTY)),
2512 K(zone_page_state(zone, NR_WRITEBACK)),
2513 K(zone_page_state(zone, NR_FILE_MAPPED)),
2514 K(zone_page_state(zone, NR_SHMEM)),
2515 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2516 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2517 zone_page_state(zone, NR_KERNEL_STACK) *
2518 THREAD_SIZE / 1024,
2519 K(zone_page_state(zone, NR_PAGETABLE)),
2520 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2521 K(zone_page_state(zone, NR_BOUNCE)),
2522 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2523 zone->pages_scanned,
2524 (zone->all_unreclaimable ? "yes" : "no")
2525 );
2526 printk("lowmem_reserve[]:");
2527 for (i = 0; i < MAX_NR_ZONES; i++)
2528 printk(" %lu", zone->lowmem_reserve[i]);
2529 printk("\n");
2530 }
2531
2532 for_each_populated_zone(zone) {
2533 unsigned long nr[MAX_ORDER], flags, order, total = 0;
2534
2535 show_node(zone);
2536 printk("%s: ", zone->name);
2537
2538 spin_lock_irqsave(&zone->lock, flags);
2539 for (order = 0; order < MAX_ORDER; order++) {
2540 nr[order] = zone->free_area[order].nr_free;
2541 total += nr[order] << order;
2542 }
2543 spin_unlock_irqrestore(&zone->lock, flags);
2544 for (order = 0; order < MAX_ORDER; order++)
2545 printk("%lu*%lukB ", nr[order], K(1UL) << order);
2546 printk("= %lukB\n", K(total));
2547 }
2548
2549 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2550
2551 show_swap_cache_info();
2552}
2553
2554static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2555{
2556 zoneref->zone = zone;
2557 zoneref->zone_idx = zone_idx(zone);
2558}
2559
2560
2561
2562
2563
2564
2565static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2566 int nr_zones, enum zone_type zone_type)
2567{
2568 struct zone *zone;
2569
2570 BUG_ON(zone_type >= MAX_NR_ZONES);
2571 zone_type++;
2572
2573 do {
2574 zone_type--;
2575 zone = pgdat->node_zones + zone_type;
2576 if (populated_zone(zone)) {
2577 zoneref_set_zone(zone,
2578 &zonelist->_zonerefs[nr_zones++]);
2579 check_highest_zone(zone_type);
2580 }
2581
2582 } while (zone_type);
2583 return nr_zones;
2584}
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596#define ZONELIST_ORDER_DEFAULT 0
2597#define ZONELIST_ORDER_NODE 1
2598#define ZONELIST_ORDER_ZONE 2
2599
2600
2601
2602
2603static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2604static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2605
2606
2607#ifdef CONFIG_NUMA
2608
2609static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2610
2611#define NUMA_ZONELIST_ORDER_LEN 16
2612char numa_zonelist_order[16] = "default";
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622static int __parse_numa_zonelist_order(char *s)
2623{
2624 if (*s == 'd' || *s == 'D') {
2625 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2626 } else if (*s == 'n' || *s == 'N') {
2627 user_zonelist_order = ZONELIST_ORDER_NODE;
2628 } else if (*s == 'z' || *s == 'Z') {
2629 user_zonelist_order = ZONELIST_ORDER_ZONE;
2630 } else {
2631 printk(KERN_WARNING
2632 "Ignoring invalid numa_zonelist_order value: "
2633 "%s\n", s);
2634 return -EINVAL;
2635 }
2636 return 0;
2637}
2638
2639static __init int setup_numa_zonelist_order(char *s)
2640{
2641 int ret;
2642
2643 if (!s)
2644 return 0;
2645
2646 ret = __parse_numa_zonelist_order(s);
2647 if (ret == 0)
2648 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2649
2650 return ret;
2651}
2652early_param("numa_zonelist_order", setup_numa_zonelist_order);
2653
2654
2655
2656
2657int numa_zonelist_order_handler(ctl_table *table, int write,
2658 void __user *buffer, size_t *length,
2659 loff_t *ppos)
2660{
2661 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2662 int ret;
2663 static DEFINE_MUTEX(zl_order_mutex);
2664
2665 mutex_lock(&zl_order_mutex);
2666 if (write)
2667 strcpy(saved_string, (char*)table->data);
2668 ret = proc_dostring(table, write, buffer, length, ppos);
2669 if (ret)
2670 goto out;
2671 if (write) {
2672 int oldval = user_zonelist_order;
2673 if (__parse_numa_zonelist_order((char*)table->data)) {
2674
2675
2676
2677 strncpy((char*)table->data, saved_string,
2678 NUMA_ZONELIST_ORDER_LEN);
2679 user_zonelist_order = oldval;
2680 } else if (oldval != user_zonelist_order) {
2681 mutex_lock(&zonelists_mutex);
2682 build_all_zonelists(NULL);
2683 mutex_unlock(&zonelists_mutex);
2684 }
2685 }
2686out:
2687 mutex_unlock(&zl_order_mutex);
2688 return ret;
2689}
2690
2691
2692#define MAX_NODE_LOAD (nr_online_nodes)
2693static int node_load[MAX_NUMNODES];
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709static int find_next_best_node(int node, nodemask_t *used_node_mask)
2710{
2711 int n, val;
2712 int min_val = INT_MAX;
2713 int best_node = -1;
2714 const struct cpumask *tmp = cpumask_of_node(0);
2715
2716
2717 if (!node_isset(node, *used_node_mask)) {
2718 node_set(node, *used_node_mask);
2719 return node;
2720 }
2721
2722 for_each_node_state(n, N_HIGH_MEMORY) {
2723
2724
2725 if (node_isset(n, *used_node_mask))
2726 continue;
2727
2728
2729 val = node_distance(node, n);
2730
2731
2732 val += (n < node);
2733
2734
2735 tmp = cpumask_of_node(n);
2736 if (!cpumask_empty(tmp))
2737 val += PENALTY_FOR_NODE_WITH_CPUS;
2738
2739
2740 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2741 val += node_load[n];
2742
2743 if (val < min_val) {
2744 min_val = val;
2745 best_node = n;
2746 }
2747 }
2748
2749 if (best_node >= 0)
2750 node_set(best_node, *used_node_mask);
2751
2752 return best_node;
2753}
2754
2755
2756
2757
2758
2759
2760
2761static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2762{
2763 int j;
2764 struct zonelist *zonelist;
2765
2766 zonelist = &pgdat->node_zonelists[0];
2767 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2768 ;
2769 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2770 MAX_NR_ZONES - 1);
2771 zonelist->_zonerefs[j].zone = NULL;
2772 zonelist->_zonerefs[j].zone_idx = 0;
2773}
2774
2775
2776
2777
2778static void build_thisnode_zonelists(pg_data_t *pgdat)
2779{
2780 int j;
2781 struct zonelist *zonelist;
2782
2783 zonelist = &pgdat->node_zonelists[1];
2784 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2785 zonelist->_zonerefs[j].zone = NULL;
2786 zonelist->_zonerefs[j].zone_idx = 0;
2787}
2788
2789
2790
2791
2792
2793
2794
2795static int node_order[MAX_NUMNODES];
2796
2797static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2798{
2799 int pos, j, node;
2800 int zone_type;
2801 struct zone *z;
2802 struct zonelist *zonelist;
2803
2804 zonelist = &pgdat->node_zonelists[0];
2805 pos = 0;
2806 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2807 for (j = 0; j < nr_nodes; j++) {
2808 node = node_order[j];
2809 z = &NODE_DATA(node)->node_zones[zone_type];
2810 if (populated_zone(z)) {
2811 zoneref_set_zone(z,
2812 &zonelist->_zonerefs[pos++]);
2813 check_highest_zone(zone_type);
2814 }
2815 }
2816 }
2817 zonelist->_zonerefs[pos].zone = NULL;
2818 zonelist->_zonerefs[pos].zone_idx = 0;
2819}
2820
2821static int default_zonelist_order(void)
2822{
2823 int nid, zone_type;
2824 unsigned long low_kmem_size,total_size;
2825 struct zone *z;
2826 int average_size;
2827
2828
2829
2830
2831
2832
2833
2834 low_kmem_size = 0;
2835 total_size = 0;
2836 for_each_online_node(nid) {
2837 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2838 z = &NODE_DATA(nid)->node_zones[zone_type];
2839 if (populated_zone(z)) {
2840 if (zone_type < ZONE_NORMAL)
2841 low_kmem_size += z->present_pages;
2842 total_size += z->present_pages;
2843 } else if (zone_type == ZONE_NORMAL) {
2844
2845
2846
2847
2848
2849
2850
2851 return ZONELIST_ORDER_NODE;
2852 }
2853 }
2854 }
2855 if (!low_kmem_size ||
2856 low_kmem_size > total_size/2)
2857 return ZONELIST_ORDER_NODE;
2858
2859
2860
2861
2862
2863 average_size = total_size /
2864 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2865 for_each_online_node(nid) {
2866 low_kmem_size = 0;
2867 total_size = 0;
2868 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2869 z = &NODE_DATA(nid)->node_zones[zone_type];
2870 if (populated_zone(z)) {
2871 if (zone_type < ZONE_NORMAL)
2872 low_kmem_size += z->present_pages;
2873 total_size += z->present_pages;
2874 }
2875 }
2876 if (low_kmem_size &&
2877 total_size > average_size &&
2878 low_kmem_size > total_size * 70/100)
2879 return ZONELIST_ORDER_NODE;
2880 }
2881 return ZONELIST_ORDER_ZONE;
2882}
2883
2884static void set_zonelist_order(void)
2885{
2886 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2887 current_zonelist_order = default_zonelist_order();
2888 else
2889 current_zonelist_order = user_zonelist_order;
2890}
2891
2892static void build_zonelists(pg_data_t *pgdat)
2893{
2894 int j, node, load;
2895 enum zone_type i;
2896 nodemask_t used_mask;
2897 int local_node, prev_node;
2898 struct zonelist *zonelist;
2899 int order = current_zonelist_order;
2900
2901
2902 for (i = 0; i < MAX_ZONELISTS; i++) {
2903 zonelist = pgdat->node_zonelists + i;
2904 zonelist->_zonerefs[0].zone = NULL;
2905 zonelist->_zonerefs[0].zone_idx = 0;
2906 }
2907
2908
2909 local_node = pgdat->node_id;
2910 load = nr_online_nodes;
2911 prev_node = local_node;
2912 nodes_clear(used_mask);
2913
2914 memset(node_order, 0, sizeof(node_order));
2915 j = 0;
2916
2917 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2918 int distance = node_distance(local_node, node);
2919
2920
2921
2922
2923
2924 if (distance > RECLAIM_DISTANCE)
2925 zone_reclaim_mode = 1;
2926
2927
2928
2929
2930
2931
2932 if (distance != node_distance(local_node, prev_node))
2933 node_load[node] = load;
2934
2935 prev_node = node;
2936 load--;
2937 if (order == ZONELIST_ORDER_NODE)
2938 build_zonelists_in_node_order(pgdat, node);
2939 else
2940 node_order[j++] = node;
2941 }
2942
2943 if (order == ZONELIST_ORDER_ZONE) {
2944
2945 build_zonelists_in_zone_order(pgdat, j);
2946 }
2947
2948 build_thisnode_zonelists(pgdat);
2949}
2950
2951
2952static void build_zonelist_cache(pg_data_t *pgdat)
2953{
2954 struct zonelist *zonelist;
2955 struct zonelist_cache *zlc;
2956 struct zoneref *z;
2957
2958 zonelist = &pgdat->node_zonelists[0];
2959 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2960 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2961 for (z = zonelist->_zonerefs; z->zone; z++)
2962 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2963}
2964
2965#ifdef CONFIG_HAVE_MEMORYLESS_NODES
2966
2967
2968
2969
2970
2971
2972int local_memory_node(int node)
2973{
2974 struct zone *zone;
2975
2976 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
2977 gfp_zone(GFP_KERNEL),
2978 NULL,
2979 &zone);
2980 return zone->node;
2981}
2982#endif
2983
2984#else
2985
2986static void set_zonelist_order(void)
2987{
2988 current_zonelist_order = ZONELIST_ORDER_ZONE;
2989}
2990
2991static void build_zonelists(pg_data_t *pgdat)
2992{
2993 int node, local_node;
2994 enum zone_type j;
2995 struct zonelist *zonelist;
2996
2997 local_node = pgdat->node_id;
2998
2999 zonelist = &pgdat->node_zonelists[0];
3000 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3011 if (!node_online(node))
3012 continue;
3013 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3014 MAX_NR_ZONES - 1);
3015 }
3016 for (node = 0; node < local_node; node++) {
3017 if (!node_online(node))
3018 continue;
3019 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3020 MAX_NR_ZONES - 1);
3021 }
3022
3023 zonelist->_zonerefs[j].zone = NULL;
3024 zonelist->_zonerefs[j].zone_idx = 0;
3025}
3026
3027
3028static void build_zonelist_cache(pg_data_t *pgdat)
3029{
3030 pgdat->node_zonelists[0].zlcache_ptr = NULL;
3031}
3032
3033#endif
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3051static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3052static void setup_zone_pageset(struct zone *zone);
3053
3054
3055
3056
3057
3058DEFINE_MUTEX(zonelists_mutex);
3059
3060
3061static __init_refok int __build_all_zonelists(void *data)
3062{
3063 int nid;
3064 int cpu;
3065
3066#ifdef CONFIG_NUMA
3067 memset(node_load, 0, sizeof(node_load));
3068#endif
3069 for_each_online_node(nid) {
3070 pg_data_t *pgdat = NODE_DATA(nid);
3071
3072 build_zonelists(pgdat);
3073 build_zonelist_cache(pgdat);
3074 }
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089 for_each_possible_cpu(cpu) {
3090 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3091
3092#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3093
3094
3095
3096
3097
3098
3099
3100
3101 if (cpu_online(cpu))
3102 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3103#endif
3104 }
3105
3106 return 0;
3107}
3108
3109
3110
3111
3112
3113void build_all_zonelists(void *data)
3114{
3115 set_zonelist_order();
3116
3117 if (system_state == SYSTEM_BOOTING) {
3118 __build_all_zonelists(NULL);
3119 mminit_verify_zonelist();
3120 cpuset_init_current_mems_allowed();
3121 } else {
3122
3123
3124#ifdef CONFIG_MEMORY_HOTPLUG
3125 if (data)
3126 setup_zone_pageset((struct zone *)data);
3127#endif
3128 stop_machine(__build_all_zonelists, NULL, NULL);
3129
3130 }
3131 vm_total_pages = nr_free_pagecache_pages();
3132
3133
3134
3135
3136
3137
3138
3139 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3140 page_group_by_mobility_disabled = 1;
3141 else
3142 page_group_by_mobility_disabled = 0;
3143
3144 printk("Built %i zonelists in %s order, mobility grouping %s. "
3145 "Total pages: %ld\n",
3146 nr_online_nodes,
3147 zonelist_order_name[current_zonelist_order],
3148 page_group_by_mobility_disabled ? "off" : "on",
3149 vm_total_pages);
3150#ifdef CONFIG_NUMA
3151 printk("Policy zone: %s\n", zone_names[policy_zone]);
3152#endif
3153}
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166#define PAGES_PER_WAITQUEUE 256
3167
3168#ifndef CONFIG_MEMORY_HOTPLUG
3169static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3170{
3171 unsigned long size = 1;
3172
3173 pages /= PAGES_PER_WAITQUEUE;
3174
3175 while (size < pages)
3176 size <<= 1;
3177
3178
3179
3180
3181
3182
3183 size = min(size, 4096UL);
3184
3185 return max(size, 4UL);
3186}
3187#else
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3206{
3207 return 4096UL;
3208}
3209#endif
3210
3211
3212
3213
3214
3215
3216static inline unsigned long wait_table_bits(unsigned long size)
3217{
3218 return ffz(~size);
3219}
3220
3221#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3222
3223
3224
3225
3226
3227
3228
3229
3230static void setup_zone_migrate_reserve(struct zone *zone)
3231{
3232 unsigned long start_pfn, pfn, end_pfn;
3233 struct page *page;
3234 unsigned long block_migratetype;
3235 int reserve;
3236
3237
3238 start_pfn = zone->zone_start_pfn;
3239 end_pfn = start_pfn + zone->spanned_pages;
3240 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3241 pageblock_order;
3242
3243
3244
3245
3246
3247
3248
3249
3250 reserve = min(2, reserve);
3251
3252 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3253 if (!pfn_valid(pfn))
3254 continue;
3255 page = pfn_to_page(pfn);
3256
3257
3258 if (page_to_nid(page) != zone_to_nid(zone))
3259 continue;
3260
3261
3262 if (PageReserved(page))
3263 continue;
3264
3265 block_migratetype = get_pageblock_migratetype(page);
3266
3267
3268 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3269 reserve--;
3270 continue;
3271 }
3272
3273
3274 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3275 set_pageblock_migratetype(page, MIGRATE_RESERVE);
3276 move_freepages_block(zone, page, MIGRATE_RESERVE);
3277 reserve--;
3278 continue;
3279 }
3280
3281
3282
3283
3284
3285 if (block_migratetype == MIGRATE_RESERVE) {
3286 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3287 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3288 }
3289 }
3290}
3291
3292
3293
3294
3295
3296
3297void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3298 unsigned long start_pfn, enum memmap_context context)
3299{
3300 struct page *page;
3301 unsigned long end_pfn = start_pfn + size;
3302 unsigned long pfn;
3303 struct zone *z;
3304
3305 if (highest_memmap_pfn < end_pfn - 1)
3306 highest_memmap_pfn = end_pfn - 1;
3307
3308 z = &NODE_DATA(nid)->node_zones[zone];
3309 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3310
3311
3312
3313
3314
3315 if (context == MEMMAP_EARLY) {
3316 if (!early_pfn_valid(pfn))
3317 continue;
3318 if (!early_pfn_in_nid(pfn, nid))
3319 continue;
3320 }
3321 page = pfn_to_page(pfn);
3322 set_page_links(page, zone, nid, pfn);
3323 mminit_verify_page_links(page, zone, nid, pfn);
3324 init_page_count(page);
3325 reset_page_mapcount(page);
3326 SetPageReserved(page);
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341 if ((z->zone_start_pfn <= pfn)
3342 && (pfn < z->zone_start_pfn + z->spanned_pages)
3343 && !(pfn & (pageblock_nr_pages - 1)))
3344 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3345
3346 INIT_LIST_HEAD(&page->lru);
3347#ifdef WANT_PAGE_VIRTUAL
3348
3349 if (!is_highmem_idx(zone))
3350 set_page_address(page, __va(pfn << PAGE_SHIFT));
3351#endif
3352 }
3353}
3354
3355static void __meminit zone_init_free_lists(struct zone *zone)
3356{
3357 int order, t;
3358 for_each_migratetype_order(order, t) {
3359 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3360 zone->free_area[order].nr_free = 0;
3361 }
3362}
3363
3364#ifndef __HAVE_ARCH_MEMMAP_INIT
3365#define memmap_init(size, nid, zone, start_pfn) \
3366 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3367#endif
3368
3369static int zone_batchsize(struct zone *zone)
3370{
3371#ifdef CONFIG_MMU
3372 int batch;
3373
3374
3375
3376
3377
3378
3379
3380 batch = zone->present_pages / 1024;
3381 if (batch * PAGE_SIZE > 512 * 1024)
3382 batch = (512 * 1024) / PAGE_SIZE;
3383 batch /= 4;
3384 if (batch < 1)
3385 batch = 1;
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397 batch = rounddown_pow_of_two(batch + batch/2) - 1;
3398
3399 return batch;
3400
3401#else
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415 return 0;
3416#endif
3417}
3418
3419static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3420{
3421 struct per_cpu_pages *pcp;
3422 int migratetype;
3423
3424 memset(p, 0, sizeof(*p));
3425
3426 pcp = &p->pcp;
3427 pcp->count = 0;
3428 pcp->high = 6 * batch;
3429 pcp->batch = max(1UL, 1 * batch);
3430 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3431 INIT_LIST_HEAD(&pcp->lists[migratetype]);
3432}
3433
3434
3435
3436
3437
3438
3439static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3440 unsigned long high)
3441{
3442 struct per_cpu_pages *pcp;
3443
3444 pcp = &p->pcp;
3445 pcp->high = high;
3446 pcp->batch = max(1UL, high/4);
3447 if ((high/4) > (PAGE_SHIFT * 8))
3448 pcp->batch = PAGE_SHIFT * 8;
3449}
3450
3451static __meminit void setup_zone_pageset(struct zone *zone)
3452{
3453 int cpu;
3454
3455 zone->pageset = alloc_percpu(struct per_cpu_pageset);
3456
3457 for_each_possible_cpu(cpu) {
3458 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3459
3460 setup_pageset(pcp, zone_batchsize(zone));
3461
3462 if (percpu_pagelist_fraction)
3463 setup_pagelist_highmark(pcp,
3464 (zone->present_pages /
3465 percpu_pagelist_fraction));
3466 }
3467}
3468
3469
3470
3471
3472
3473void __init setup_per_cpu_pageset(void)
3474{
3475 struct zone *zone;
3476
3477 for_each_populated_zone(zone)
3478 setup_zone_pageset(zone);
3479}
3480
3481static noinline __init_refok
3482int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3483{
3484 int i;
3485 struct pglist_data *pgdat = zone->zone_pgdat;
3486 size_t alloc_size;
3487
3488
3489
3490
3491
3492 zone->wait_table_hash_nr_entries =
3493 wait_table_hash_nr_entries(zone_size_pages);
3494 zone->wait_table_bits =
3495 wait_table_bits(zone->wait_table_hash_nr_entries);
3496 alloc_size = zone->wait_table_hash_nr_entries
3497 * sizeof(wait_queue_head_t);
3498
3499 if (!slab_is_available()) {
3500 zone->wait_table = (wait_queue_head_t *)
3501 alloc_bootmem_node(pgdat, alloc_size);
3502 } else {
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513 zone->wait_table = vmalloc(alloc_size);
3514 }
3515 if (!zone->wait_table)
3516 return -ENOMEM;
3517
3518 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3519 init_waitqueue_head(zone->wait_table + i);
3520
3521 return 0;
3522}
3523
3524static int __zone_pcp_update(void *data)
3525{
3526 struct zone *zone = data;
3527 int cpu;
3528 unsigned long batch = zone_batchsize(zone), flags;
3529
3530 for_each_possible_cpu(cpu) {
3531 struct per_cpu_pageset *pset;
3532 struct per_cpu_pages *pcp;
3533
3534 pset = per_cpu_ptr(zone->pageset, cpu);
3535 pcp = &pset->pcp;
3536
3537 local_irq_save(flags);
3538 free_pcppages_bulk(zone, pcp->count, pcp);
3539 setup_pageset(pset, batch);
3540 local_irq_restore(flags);
3541 }
3542 return 0;
3543}
3544
3545void zone_pcp_update(struct zone *zone)
3546{
3547 stop_machine(__zone_pcp_update, zone, NULL);
3548}
3549
3550static __meminit void zone_pcp_init(struct zone *zone)
3551{
3552
3553
3554
3555
3556
3557 zone->pageset = &boot_pageset;
3558
3559 if (zone->present_pages)
3560 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
3561 zone->name, zone->present_pages,
3562 zone_batchsize(zone));
3563}
3564
3565__meminit int init_currently_empty_zone(struct zone *zone,
3566 unsigned long zone_start_pfn,
3567 unsigned long size,
3568 enum memmap_context context)
3569{
3570 struct pglist_data *pgdat = zone->zone_pgdat;
3571 int ret;
3572 ret = zone_wait_table_init(zone, size);
3573 if (ret)
3574 return ret;
3575 pgdat->nr_zones = zone_idx(zone) + 1;
3576
3577 zone->zone_start_pfn = zone_start_pfn;
3578
3579 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3580 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3581 pgdat->node_id,
3582 (unsigned long)zone_idx(zone),
3583 zone_start_pfn, (zone_start_pfn + size));
3584
3585 zone_init_free_lists(zone);
3586
3587 return 0;
3588}
3589
3590#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3591
3592
3593
3594
3595static int __meminit first_active_region_index_in_nid(int nid)
3596{
3597 int i;
3598
3599 for (i = 0; i < nr_nodemap_entries; i++)
3600 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3601 return i;
3602
3603 return -1;
3604}
3605
3606
3607
3608
3609
3610static int __meminit next_active_region_index_in_nid(int index, int nid)
3611{
3612 for (index = index + 1; index < nr_nodemap_entries; index++)
3613 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3614 return index;
3615
3616 return -1;
3617}
3618
3619#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3620
3621
3622
3623
3624
3625
3626int __meminit __early_pfn_to_nid(unsigned long pfn)
3627{
3628 int i;
3629
3630 for (i = 0; i < nr_nodemap_entries; i++) {
3631 unsigned long start_pfn = early_node_map[i].start_pfn;
3632 unsigned long end_pfn = early_node_map[i].end_pfn;
3633
3634 if (start_pfn <= pfn && pfn < end_pfn)
3635 return early_node_map[i].nid;
3636 }
3637
3638 return -1;
3639}
3640#endif
3641
3642int __meminit early_pfn_to_nid(unsigned long pfn)
3643{
3644 int nid;
3645
3646 nid = __early_pfn_to_nid(pfn);
3647 if (nid >= 0)
3648 return nid;
3649
3650 return 0;
3651}
3652
3653#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3654bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3655{
3656 int nid;
3657
3658 nid = __early_pfn_to_nid(pfn);
3659 if (nid >= 0 && nid != node)
3660 return false;
3661 return true;
3662}
3663#endif
3664
3665
3666#define for_each_active_range_index_in_nid(i, nid) \
3667 for (i = first_active_region_index_in_nid(nid); i != -1; \
3668 i = next_active_region_index_in_nid(i, nid))
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679void __init free_bootmem_with_active_regions(int nid,
3680 unsigned long max_low_pfn)
3681{
3682 int i;
3683
3684 for_each_active_range_index_in_nid(i, nid) {
3685 unsigned long size_pages = 0;
3686 unsigned long end_pfn = early_node_map[i].end_pfn;
3687
3688 if (early_node_map[i].start_pfn >= max_low_pfn)
3689 continue;
3690
3691 if (end_pfn > max_low_pfn)
3692 end_pfn = max_low_pfn;
3693
3694 size_pages = end_pfn - early_node_map[i].start_pfn;
3695 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3696 PFN_PHYS(early_node_map[i].start_pfn),
3697 size_pages << PAGE_SHIFT);
3698 }
3699}
3700
3701#ifdef CONFIG_HAVE_MEMBLOCK
3702u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3703 u64 goal, u64 limit)
3704{
3705 int i;
3706
3707
3708 for_each_active_range_index_in_nid(i, nid) {
3709 u64 addr;
3710 u64 ei_start, ei_last;
3711 u64 final_start, final_end;
3712
3713 ei_last = early_node_map[i].end_pfn;
3714 ei_last <<= PAGE_SHIFT;
3715 ei_start = early_node_map[i].start_pfn;
3716 ei_start <<= PAGE_SHIFT;
3717
3718 final_start = max(ei_start, goal);
3719 final_end = min(ei_last, limit);
3720
3721 if (final_start >= final_end)
3722 continue;
3723
3724 addr = memblock_find_in_range(final_start, final_end, size, align);
3725
3726 if (addr == MEMBLOCK_ERROR)
3727 continue;
3728
3729 return addr;
3730 }
3731
3732 return MEMBLOCK_ERROR;
3733}
3734#endif
3735
3736int __init add_from_early_node_map(struct range *range, int az,
3737 int nr_range, int nid)
3738{
3739 int i;
3740 u64 start, end;
3741
3742
3743 for_each_active_range_index_in_nid(i, nid) {
3744 start = early_node_map[i].start_pfn;
3745 end = early_node_map[i].end_pfn;
3746 nr_range = add_range(range, az, nr_range, start, end);
3747 }
3748 return nr_range;
3749}
3750
3751#ifdef CONFIG_NO_BOOTMEM
3752void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
3753 u64 goal, u64 limit)
3754{
3755 void *ptr;
3756 u64 addr;
3757
3758 if (limit > memblock.current_limit)
3759 limit = memblock.current_limit;
3760
3761 addr = find_memory_core_early(nid, size, align, goal, limit);
3762
3763 if (addr == MEMBLOCK_ERROR)
3764 return NULL;
3765
3766 ptr = phys_to_virt(addr);
3767 memset(ptr, 0, size);
3768 memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
3769
3770
3771
3772
3773 kmemleak_alloc(ptr, size, 0, 0);
3774 return ptr;
3775}
3776#endif
3777
3778
3779void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3780{
3781 int i;
3782 int ret;
3783
3784 for_each_active_range_index_in_nid(i, nid) {
3785 ret = work_fn(early_node_map[i].start_pfn,
3786 early_node_map[i].end_pfn, data);
3787 if (ret)
3788 break;
3789 }
3790}
3791
3792
3793
3794
3795
3796
3797
3798
3799void __init sparse_memory_present_with_active_regions(int nid)
3800{
3801 int i;
3802
3803 for_each_active_range_index_in_nid(i, nid)
3804 memory_present(early_node_map[i].nid,
3805 early_node_map[i].start_pfn,
3806 early_node_map[i].end_pfn);
3807}
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820void __meminit get_pfn_range_for_nid(unsigned int nid,
3821 unsigned long *start_pfn, unsigned long *end_pfn)
3822{
3823 int i;
3824 *start_pfn = -1UL;
3825 *end_pfn = 0;
3826
3827 for_each_active_range_index_in_nid(i, nid) {
3828 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3829 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3830 }
3831
3832 if (*start_pfn == -1UL)
3833 *start_pfn = 0;
3834}
3835
3836
3837
3838
3839
3840
3841static void __init find_usable_zone_for_movable(void)
3842{
3843 int zone_index;
3844 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3845 if (zone_index == ZONE_MOVABLE)
3846 continue;
3847
3848 if (arch_zone_highest_possible_pfn[zone_index] >
3849 arch_zone_lowest_possible_pfn[zone_index])
3850 break;
3851 }
3852
3853 VM_BUG_ON(zone_index == -1);
3854 movable_zone = zone_index;
3855}
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867static void __meminit adjust_zone_range_for_zone_movable(int nid,
3868 unsigned long zone_type,
3869 unsigned long node_start_pfn,
3870 unsigned long node_end_pfn,
3871 unsigned long *zone_start_pfn,
3872 unsigned long *zone_end_pfn)
3873{
3874
3875 if (zone_movable_pfn[nid]) {
3876
3877 if (zone_type == ZONE_MOVABLE) {
3878 *zone_start_pfn = zone_movable_pfn[nid];
3879 *zone_end_pfn = min(node_end_pfn,
3880 arch_zone_highest_possible_pfn[movable_zone]);
3881
3882
3883 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3884 *zone_end_pfn > zone_movable_pfn[nid]) {
3885 *zone_end_pfn = zone_movable_pfn[nid];
3886
3887
3888 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3889 *zone_start_pfn = *zone_end_pfn;
3890 }
3891}
3892
3893
3894
3895
3896
3897static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3898 unsigned long zone_type,
3899 unsigned long *ignored)
3900{
3901 unsigned long node_start_pfn, node_end_pfn;
3902 unsigned long zone_start_pfn, zone_end_pfn;
3903
3904
3905 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3906 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3907 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3908 adjust_zone_range_for_zone_movable(nid, zone_type,
3909 node_start_pfn, node_end_pfn,
3910 &zone_start_pfn, &zone_end_pfn);
3911
3912
3913 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3914 return 0;
3915
3916
3917 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3918 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3919
3920
3921 return zone_end_pfn - zone_start_pfn;
3922}
3923
3924
3925
3926
3927
3928unsigned long __meminit __absent_pages_in_range(int nid,
3929 unsigned long range_start_pfn,
3930 unsigned long range_end_pfn)
3931{
3932 int i = 0;
3933 unsigned long prev_end_pfn = 0, hole_pages = 0;
3934 unsigned long start_pfn;
3935
3936
3937 i = first_active_region_index_in_nid(nid);
3938 if (i == -1)
3939 return 0;
3940
3941 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3942
3943
3944 if (early_node_map[i].start_pfn > range_start_pfn)
3945 hole_pages = prev_end_pfn - range_start_pfn;
3946
3947
3948 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3949
3950
3951 if (prev_end_pfn >= range_end_pfn)
3952 break;
3953
3954
3955 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3956 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3957
3958
3959 if (start_pfn > range_start_pfn) {
3960 BUG_ON(prev_end_pfn > start_pfn);
3961 hole_pages += start_pfn - prev_end_pfn;
3962 }
3963 prev_end_pfn = early_node_map[i].end_pfn;
3964 }
3965
3966
3967 if (range_end_pfn > prev_end_pfn)
3968 hole_pages += range_end_pfn -
3969 max(range_start_pfn, prev_end_pfn);
3970
3971 return hole_pages;
3972}
3973
3974
3975
3976
3977
3978
3979
3980
3981unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3982 unsigned long end_pfn)
3983{
3984 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3985}
3986
3987
3988static unsigned long __meminit zone_absent_pages_in_node(int nid,
3989 unsigned long zone_type,
3990 unsigned long *ignored)
3991{
3992 unsigned long node_start_pfn, node_end_pfn;
3993 unsigned long zone_start_pfn, zone_end_pfn;
3994
3995 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3996 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3997 node_start_pfn);
3998 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3999 node_end_pfn);
4000
4001 adjust_zone_range_for_zone_movable(nid, zone_type,
4002 node_start_pfn, node_end_pfn,
4003 &zone_start_pfn, &zone_end_pfn);
4004 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4005}
4006
4007#else
4008static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4009 unsigned long zone_type,
4010 unsigned long *zones_size)
4011{
4012 return zones_size[zone_type];
4013}
4014
4015static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4016 unsigned long zone_type,
4017 unsigned long *zholes_size)
4018{
4019 if (!zholes_size)
4020 return 0;
4021
4022 return zholes_size[zone_type];
4023}
4024
4025#endif
4026
4027static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4028 unsigned long *zones_size, unsigned long *zholes_size)
4029{
4030 unsigned long realtotalpages, totalpages = 0;
4031 enum zone_type i;
4032
4033 for (i = 0; i < MAX_NR_ZONES; i++)
4034 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4035 zones_size);
4036 pgdat->node_spanned_pages = totalpages;
4037
4038 realtotalpages = totalpages;
4039 for (i = 0; i < MAX_NR_ZONES; i++)
4040 realtotalpages -=
4041 zone_absent_pages_in_node(pgdat->node_id, i,
4042 zholes_size);
4043 pgdat->node_present_pages = realtotalpages;
4044 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4045 realtotalpages);
4046}
4047
4048#ifndef CONFIG_SPARSEMEM
4049
4050
4051
4052
4053
4054
4055
4056static unsigned long __init usemap_size(unsigned long zonesize)
4057{
4058 unsigned long usemapsize;
4059
4060 usemapsize = roundup(zonesize, pageblock_nr_pages);
4061 usemapsize = usemapsize >> pageblock_order;
4062 usemapsize *= NR_PAGEBLOCK_BITS;
4063 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4064
4065 return usemapsize / 8;
4066}
4067
4068static void __init setup_usemap(struct pglist_data *pgdat,
4069 struct zone *zone, unsigned long zonesize)
4070{
4071 unsigned long usemapsize = usemap_size(zonesize);
4072 zone->pageblock_flags = NULL;
4073 if (usemapsize)
4074 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
4075}
4076#else
4077static inline void setup_usemap(struct pglist_data *pgdat,
4078 struct zone *zone, unsigned long zonesize) {}
4079#endif
4080
4081#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4082
4083
4084static inline int pageblock_default_order(void)
4085{
4086 if (HPAGE_SHIFT > PAGE_SHIFT)
4087 return HUGETLB_PAGE_ORDER;
4088
4089 return MAX_ORDER-1;
4090}
4091
4092
4093static inline void __init set_pageblock_order(unsigned int order)
4094{
4095
4096 if (pageblock_order)
4097 return;
4098
4099
4100
4101
4102
4103 pageblock_order = order;
4104}
4105#else
4106
4107
4108
4109
4110
4111
4112
4113static inline int pageblock_default_order(unsigned int order)
4114{
4115 return MAX_ORDER-1;
4116}
4117#define set_pageblock_order(x) do {} while (0)
4118
4119#endif
4120
4121
4122
4123
4124
4125
4126
4127static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4128 unsigned long *zones_size, unsigned long *zholes_size)
4129{
4130 enum zone_type j;
4131 int nid = pgdat->node_id;
4132 unsigned long zone_start_pfn = pgdat->node_start_pfn;
4133 int ret;
4134
4135 pgdat_resize_init(pgdat);
4136 pgdat->nr_zones = 0;
4137 init_waitqueue_head(&pgdat->kswapd_wait);
4138 pgdat->kswapd_max_order = 0;
4139 pgdat_page_cgroup_init(pgdat);
4140
4141 for (j = 0; j < MAX_NR_ZONES; j++) {
4142 struct zone *zone = pgdat->node_zones + j;
4143 unsigned long size, realsize, memmap_pages;
4144 enum lru_list l;
4145
4146 size = zone_spanned_pages_in_node(nid, j, zones_size);
4147 realsize = size - zone_absent_pages_in_node(nid, j,
4148 zholes_size);
4149
4150
4151
4152
4153
4154
4155 memmap_pages =
4156 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4157 if (realsize >= memmap_pages) {
4158 realsize -= memmap_pages;
4159 if (memmap_pages)
4160 printk(KERN_DEBUG
4161 " %s zone: %lu pages used for memmap\n",
4162 zone_names[j], memmap_pages);
4163 } else
4164 printk(KERN_WARNING
4165 " %s zone: %lu pages exceeds realsize %lu\n",
4166 zone_names[j], memmap_pages, realsize);
4167
4168
4169 if (j == 0 && realsize > dma_reserve) {
4170 realsize -= dma_reserve;
4171 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
4172 zone_names[0], dma_reserve);
4173 }
4174
4175 if (!is_highmem_idx(j))
4176 nr_kernel_pages += realsize;
4177 nr_all_pages += realsize;
4178
4179 zone->spanned_pages = size;
4180 zone->present_pages = realsize;
4181#ifdef CONFIG_NUMA
4182 zone->node = nid;
4183 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4184 / 100;
4185 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4186#endif
4187 zone->name = zone_names[j];
4188 spin_lock_init(&zone->lock);
4189 spin_lock_init(&zone->lru_lock);
4190 zone_seqlock_init(zone);
4191 zone->zone_pgdat = pgdat;
4192
4193 zone_pcp_init(zone);
4194 for_each_lru(l) {
4195 INIT_LIST_HEAD(&zone->lru[l].list);
4196 zone->reclaim_stat.nr_saved_scan[l] = 0;
4197 }
4198 zone->reclaim_stat.recent_rotated[0] = 0;
4199 zone->reclaim_stat.recent_rotated[1] = 0;
4200 zone->reclaim_stat.recent_scanned[0] = 0;
4201 zone->reclaim_stat.recent_scanned[1] = 0;
4202 zap_zone_vm_stats(zone);
4203 zone->flags = 0;
4204 if (!size)
4205 continue;
4206
4207 set_pageblock_order(pageblock_default_order());
4208 setup_usemap(pgdat, zone, size);
4209 ret = init_currently_empty_zone(zone, zone_start_pfn,
4210 size, MEMMAP_EARLY);
4211 BUG_ON(ret);
4212 memmap_init(size, nid, j, zone_start_pfn);
4213 zone_start_pfn += size;
4214 }
4215}
4216
4217static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4218{
4219
4220 if (!pgdat->node_spanned_pages)
4221 return;
4222
4223#ifdef CONFIG_FLAT_NODE_MEM_MAP
4224
4225 if (!pgdat->node_mem_map) {
4226 unsigned long size, start, end;
4227 struct page *map;
4228
4229
4230
4231
4232
4233
4234 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4235 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4236 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4237 size = (end - start) * sizeof(struct page);
4238 map = alloc_remap(pgdat->node_id, size);
4239 if (!map)
4240 map = alloc_bootmem_node(pgdat, size);
4241 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4242 }
4243#ifndef CONFIG_NEED_MULTIPLE_NODES
4244
4245
4246
4247 if (pgdat == NODE_DATA(0)) {
4248 mem_map = NODE_DATA(0)->node_mem_map;
4249#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4250 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4251 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4252#endif
4253 }
4254#endif
4255#endif
4256}
4257
4258void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4259 unsigned long node_start_pfn, unsigned long *zholes_size)
4260{
4261 pg_data_t *pgdat = NODE_DATA(nid);
4262
4263 pgdat->node_id = nid;
4264 pgdat->node_start_pfn = node_start_pfn;
4265 calculate_node_totalpages(pgdat, zones_size, zholes_size);
4266
4267 alloc_node_mem_map(pgdat);
4268#ifdef CONFIG_FLAT_NODE_MEM_MAP
4269 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4270 nid, (unsigned long)pgdat,
4271 (unsigned long)pgdat->node_mem_map);
4272#endif
4273
4274 free_area_init_core(pgdat, zones_size, zholes_size);
4275}
4276
4277#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4278
4279#if MAX_NUMNODES > 1
4280
4281
4282
4283static void __init setup_nr_node_ids(void)
4284{
4285 unsigned int node;
4286 unsigned int highest = 0;
4287
4288 for_each_node_mask(node, node_possible_map)
4289 highest = node;
4290 nr_node_ids = highest + 1;
4291}
4292#else
4293static inline void setup_nr_node_ids(void)
4294{
4295}
4296#endif
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4311 unsigned long end_pfn)
4312{
4313 int i;
4314
4315 mminit_dprintk(MMINIT_TRACE, "memory_register",
4316 "Entering add_active_range(%d, %#lx, %#lx) "
4317 "%d entries of %d used\n",
4318 nid, start_pfn, end_pfn,
4319 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
4320
4321 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4322
4323
4324 for (i = 0; i < nr_nodemap_entries; i++) {
4325 if (early_node_map[i].nid != nid)
4326 continue;
4327
4328
4329 if (start_pfn >= early_node_map[i].start_pfn &&
4330 end_pfn <= early_node_map[i].end_pfn)
4331 return;
4332
4333
4334 if (start_pfn <= early_node_map[i].end_pfn &&
4335 end_pfn > early_node_map[i].end_pfn) {
4336 early_node_map[i].end_pfn = end_pfn;
4337 return;
4338 }
4339
4340
4341 if (start_pfn < early_node_map[i].start_pfn &&
4342 end_pfn >= early_node_map[i].start_pfn) {
4343 early_node_map[i].start_pfn = start_pfn;
4344 return;
4345 }
4346 }
4347
4348
4349 if (i >= MAX_ACTIVE_REGIONS) {
4350 printk(KERN_CRIT "More than %d memory regions, truncating\n",
4351 MAX_ACTIVE_REGIONS);
4352 return;
4353 }
4354
4355 early_node_map[i].nid = nid;
4356 early_node_map[i].start_pfn = start_pfn;
4357 early_node_map[i].end_pfn = end_pfn;
4358 nr_nodemap_entries = i + 1;
4359}
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4373 unsigned long end_pfn)
4374{
4375 int i, j;
4376 int removed = 0;
4377
4378 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4379 nid, start_pfn, end_pfn);
4380
4381
4382 for_each_active_range_index_in_nid(i, nid) {
4383 if (early_node_map[i].start_pfn >= start_pfn &&
4384 early_node_map[i].end_pfn <= end_pfn) {
4385
4386 early_node_map[i].start_pfn = 0;
4387 early_node_map[i].end_pfn = 0;
4388 removed = 1;
4389 continue;
4390 }
4391 if (early_node_map[i].start_pfn < start_pfn &&
4392 early_node_map[i].end_pfn > start_pfn) {
4393 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4394 early_node_map[i].end_pfn = start_pfn;
4395 if (temp_end_pfn > end_pfn)
4396 add_active_range(nid, end_pfn, temp_end_pfn);
4397 continue;
4398 }
4399 if (early_node_map[i].start_pfn >= start_pfn &&
4400 early_node_map[i].end_pfn > end_pfn &&
4401 early_node_map[i].start_pfn < end_pfn) {
4402 early_node_map[i].start_pfn = end_pfn;
4403 continue;
4404 }
4405 }
4406
4407 if (!removed)
4408 return;
4409
4410
4411 for (i = nr_nodemap_entries - 1; i > 0; i--) {
4412 if (early_node_map[i].nid != nid)
4413 continue;
4414 if (early_node_map[i].end_pfn)
4415 continue;
4416
4417 for (j = i; j < nr_nodemap_entries - 1; j++)
4418 memcpy(&early_node_map[j], &early_node_map[j+1],
4419 sizeof(early_node_map[j]));
4420 j = nr_nodemap_entries - 1;
4421 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4422 nr_nodemap_entries--;
4423 }
4424}
4425
4426
4427
4428
4429
4430
4431
4432
4433void __init remove_all_active_ranges(void)
4434{
4435 memset(early_node_map, 0, sizeof(early_node_map));
4436 nr_nodemap_entries = 0;
4437}
4438
4439
4440static int __init cmp_node_active_region(const void *a, const void *b)
4441{
4442 struct node_active_region *arange = (struct node_active_region *)a;
4443 struct node_active_region *brange = (struct node_active_region *)b;
4444
4445
4446 if (arange->start_pfn > brange->start_pfn)
4447 return 1;
4448 if (arange->start_pfn < brange->start_pfn)
4449 return -1;
4450
4451 return 0;
4452}
4453
4454
4455void __init sort_node_map(void)
4456{
4457 sort(early_node_map, (size_t)nr_nodemap_entries,
4458 sizeof(struct node_active_region),
4459 cmp_node_active_region, NULL);
4460}
4461
4462
4463static unsigned long __init find_min_pfn_for_node(int nid)
4464{
4465 int i;
4466 unsigned long min_pfn = ULONG_MAX;
4467
4468
4469 for_each_active_range_index_in_nid(i, nid)
4470 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4471
4472 if (min_pfn == ULONG_MAX) {
4473 printk(KERN_WARNING
4474 "Could not find start_pfn for node %d\n", nid);
4475 return 0;
4476 }
4477
4478 return min_pfn;
4479}
4480
4481
4482
4483
4484
4485
4486
4487unsigned long __init find_min_pfn_with_active_regions(void)
4488{
4489 return find_min_pfn_for_node(MAX_NUMNODES);
4490}
4491
4492
4493
4494
4495
4496
4497static unsigned long __init early_calculate_totalpages(void)
4498{
4499 int i;
4500 unsigned long totalpages = 0;
4501
4502 for (i = 0; i < nr_nodemap_entries; i++) {
4503 unsigned long pages = early_node_map[i].end_pfn -
4504 early_node_map[i].start_pfn;
4505 totalpages += pages;
4506 if (pages)
4507 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4508 }
4509 return totalpages;
4510}
4511
4512
4513
4514
4515
4516
4517
4518static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4519{
4520 int i, nid;
4521 unsigned long usable_startpfn;
4522 unsigned long kernelcore_node, kernelcore_remaining;
4523
4524 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4525 unsigned long totalpages = early_calculate_totalpages();
4526 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536 if (required_movablecore) {
4537 unsigned long corepages;
4538
4539
4540
4541
4542
4543 required_movablecore =
4544 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4545 corepages = totalpages - required_movablecore;
4546
4547 required_kernelcore = max(required_kernelcore, corepages);
4548 }
4549
4550
4551 if (!required_kernelcore)
4552 goto out;
4553
4554
4555 find_usable_zone_for_movable();
4556 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4557
4558restart:
4559
4560 kernelcore_node = required_kernelcore / usable_nodes;
4561 for_each_node_state(nid, N_HIGH_MEMORY) {
4562
4563
4564
4565
4566
4567 if (required_kernelcore < kernelcore_node)
4568 kernelcore_node = required_kernelcore / usable_nodes;
4569
4570
4571
4572
4573
4574
4575 kernelcore_remaining = kernelcore_node;
4576
4577
4578 for_each_active_range_index_in_nid(i, nid) {
4579 unsigned long start_pfn, end_pfn;
4580 unsigned long size_pages;
4581
4582 start_pfn = max(early_node_map[i].start_pfn,
4583 zone_movable_pfn[nid]);
4584 end_pfn = early_node_map[i].end_pfn;
4585 if (start_pfn >= end_pfn)
4586 continue;
4587
4588
4589 if (start_pfn < usable_startpfn) {
4590 unsigned long kernel_pages;
4591 kernel_pages = min(end_pfn, usable_startpfn)
4592 - start_pfn;
4593
4594 kernelcore_remaining -= min(kernel_pages,
4595 kernelcore_remaining);
4596 required_kernelcore -= min(kernel_pages,
4597 required_kernelcore);
4598
4599
4600 if (end_pfn <= usable_startpfn) {
4601
4602
4603
4604
4605
4606
4607
4608 zone_movable_pfn[nid] = end_pfn;
4609 continue;
4610 }
4611 start_pfn = usable_startpfn;
4612 }
4613
4614
4615
4616
4617
4618
4619 size_pages = end_pfn - start_pfn;
4620 if (size_pages > kernelcore_remaining)
4621 size_pages = kernelcore_remaining;
4622 zone_movable_pfn[nid] = start_pfn + size_pages;
4623
4624
4625
4626
4627
4628
4629 required_kernelcore -= min(required_kernelcore,
4630 size_pages);
4631 kernelcore_remaining -= size_pages;
4632 if (!kernelcore_remaining)
4633 break;
4634 }
4635 }
4636
4637
4638
4639
4640
4641
4642
4643 usable_nodes--;
4644 if (usable_nodes && required_kernelcore > usable_nodes)
4645 goto restart;
4646
4647
4648 for (nid = 0; nid < MAX_NUMNODES; nid++)
4649 zone_movable_pfn[nid] =
4650 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4651
4652out:
4653
4654 node_states[N_HIGH_MEMORY] = saved_node_state;
4655}
4656
4657
4658static void check_for_regular_memory(pg_data_t *pgdat)
4659{
4660#ifdef CONFIG_HIGHMEM
4661 enum zone_type zone_type;
4662
4663 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4664 struct zone *zone = &pgdat->node_zones[zone_type];
4665 if (zone->present_pages)
4666 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4667 }
4668#endif
4669}
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4685{
4686 unsigned long nid;
4687 int i;
4688
4689
4690 sort_node_map();
4691
4692
4693 memset(arch_zone_lowest_possible_pfn, 0,
4694 sizeof(arch_zone_lowest_possible_pfn));
4695 memset(arch_zone_highest_possible_pfn, 0,
4696 sizeof(arch_zone_highest_possible_pfn));
4697 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4698 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4699 for (i = 1; i < MAX_NR_ZONES; i++) {
4700 if (i == ZONE_MOVABLE)
4701 continue;
4702 arch_zone_lowest_possible_pfn[i] =
4703 arch_zone_highest_possible_pfn[i-1];
4704 arch_zone_highest_possible_pfn[i] =
4705 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4706 }
4707 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4708 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4709
4710
4711 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4712 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4713
4714
4715 printk("Zone PFN ranges:\n");
4716 for (i = 0; i < MAX_NR_ZONES; i++) {
4717 if (i == ZONE_MOVABLE)
4718 continue;
4719 printk(" %-8s ", zone_names[i]);
4720 if (arch_zone_lowest_possible_pfn[i] ==
4721 arch_zone_highest_possible_pfn[i])
4722 printk("empty\n");
4723 else
4724 printk("%0#10lx -> %0#10lx\n",
4725 arch_zone_lowest_possible_pfn[i],
4726 arch_zone_highest_possible_pfn[i]);
4727 }
4728
4729
4730 printk("Movable zone start PFN for each node\n");
4731 for (i = 0; i < MAX_NUMNODES; i++) {
4732 if (zone_movable_pfn[i])
4733 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4734 }
4735
4736
4737 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4738 for (i = 0; i < nr_nodemap_entries; i++)
4739 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4740 early_node_map[i].start_pfn,
4741 early_node_map[i].end_pfn);
4742
4743
4744 mminit_verify_pageflags_layout();
4745 setup_nr_node_ids();
4746 for_each_online_node(nid) {
4747 pg_data_t *pgdat = NODE_DATA(nid);
4748 free_area_init_node(nid, NULL,
4749 find_min_pfn_for_node(nid), NULL);
4750
4751
4752 if (pgdat->node_present_pages)
4753 node_set_state(nid, N_HIGH_MEMORY);
4754 check_for_regular_memory(pgdat);
4755 }
4756}
4757
4758static int __init cmdline_parse_core(char *p, unsigned long *core)
4759{
4760 unsigned long long coremem;
4761 if (!p)
4762 return -EINVAL;
4763
4764 coremem = memparse(p, &p);
4765 *core = coremem >> PAGE_SHIFT;
4766
4767
4768 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4769
4770 return 0;
4771}
4772
4773
4774
4775
4776
4777static int __init cmdline_parse_kernelcore(char *p)
4778{
4779 return cmdline_parse_core(p, &required_kernelcore);
4780}
4781
4782
4783
4784
4785
4786static int __init cmdline_parse_movablecore(char *p)
4787{
4788 return cmdline_parse_core(p, &required_movablecore);
4789}
4790
4791early_param("kernelcore", cmdline_parse_kernelcore);
4792early_param("movablecore", cmdline_parse_movablecore);
4793
4794#endif
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807void __init set_dma_reserve(unsigned long new_dma_reserve)
4808{
4809 dma_reserve = new_dma_reserve;
4810}
4811
4812#ifndef CONFIG_NEED_MULTIPLE_NODES
4813struct pglist_data __refdata contig_page_data = {
4814#ifndef CONFIG_NO_BOOTMEM
4815 .bdata = &bootmem_node_data[0]
4816#endif
4817 };
4818EXPORT_SYMBOL(contig_page_data);
4819#endif
4820
4821void __init free_area_init(unsigned long *zones_size)
4822{
4823 free_area_init_node(0, zones_size,
4824 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4825}
4826
4827static int page_alloc_cpu_notify(struct notifier_block *self,
4828 unsigned long action, void *hcpu)
4829{
4830 int cpu = (unsigned long)hcpu;
4831
4832 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4833 drain_pages(cpu);
4834
4835
4836
4837
4838
4839
4840
4841 vm_events_fold_cpu(cpu);
4842
4843
4844
4845
4846
4847
4848
4849
4850 refresh_cpu_vm_stats(cpu);
4851 }
4852 return NOTIFY_OK;
4853}
4854
4855void __init page_alloc_init(void)
4856{
4857 hotcpu_notifier(page_alloc_cpu_notify, 0);
4858}
4859
4860
4861
4862
4863
4864static void calculate_totalreserve_pages(void)
4865{
4866 struct pglist_data *pgdat;
4867 unsigned long reserve_pages = 0;
4868 enum zone_type i, j;
4869
4870 for_each_online_pgdat(pgdat) {
4871 for (i = 0; i < MAX_NR_ZONES; i++) {
4872 struct zone *zone = pgdat->node_zones + i;
4873 unsigned long max = 0;
4874
4875
4876 for (j = i; j < MAX_NR_ZONES; j++) {
4877 if (zone->lowmem_reserve[j] > max)
4878 max = zone->lowmem_reserve[j];
4879 }
4880
4881
4882 max += high_wmark_pages(zone);
4883
4884 if (max > zone->present_pages)
4885 max = zone->present_pages;
4886 reserve_pages += max;
4887 }
4888 }
4889 totalreserve_pages = reserve_pages;
4890}
4891
4892
4893
4894
4895
4896
4897
4898static void setup_per_zone_lowmem_reserve(void)
4899{
4900 struct pglist_data *pgdat;
4901 enum zone_type j, idx;
4902
4903 for_each_online_pgdat(pgdat) {
4904 for (j = 0; j < MAX_NR_ZONES; j++) {
4905 struct zone *zone = pgdat->node_zones + j;
4906 unsigned long present_pages = zone->present_pages;
4907
4908 zone->lowmem_reserve[j] = 0;
4909
4910 idx = j;
4911 while (idx) {
4912 struct zone *lower_zone;
4913
4914 idx--;
4915
4916 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4917 sysctl_lowmem_reserve_ratio[idx] = 1;
4918
4919 lower_zone = pgdat->node_zones + idx;
4920 lower_zone->lowmem_reserve[j] = present_pages /
4921 sysctl_lowmem_reserve_ratio[idx];
4922 present_pages += lower_zone->present_pages;
4923 }
4924 }
4925 }
4926
4927
4928 calculate_totalreserve_pages();
4929}
4930
4931
4932
4933
4934
4935
4936
4937
4938void setup_per_zone_wmarks(void)
4939{
4940 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4941 unsigned long lowmem_pages = 0;
4942 struct zone *zone;
4943 unsigned long flags;
4944
4945
4946 for_each_zone(zone) {
4947 if (!is_highmem(zone))
4948 lowmem_pages += zone->present_pages;
4949 }
4950
4951 for_each_zone(zone) {
4952 u64 tmp;
4953
4954 spin_lock_irqsave(&zone->lock, flags);
4955 tmp = (u64)pages_min * zone->present_pages;
4956 do_div(tmp, lowmem_pages);
4957 if (is_highmem(zone)) {
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967 int min_pages;
4968
4969 min_pages = zone->present_pages / 1024;
4970 if (min_pages < SWAP_CLUSTER_MAX)
4971 min_pages = SWAP_CLUSTER_MAX;
4972 if (min_pages > 128)
4973 min_pages = 128;
4974 zone->watermark[WMARK_MIN] = min_pages;
4975 } else {
4976
4977
4978
4979
4980 zone->watermark[WMARK_MIN] = tmp;
4981 }
4982
4983 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
4984 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4985 setup_zone_migrate_reserve(zone);
4986 spin_unlock_irqrestore(&zone->lock, flags);
4987 }
4988
4989
4990 calculate_totalreserve_pages();
4991}
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014void calculate_zone_inactive_ratio(struct zone *zone)
5015{
5016 unsigned int gb, ratio;
5017
5018
5019 gb = zone->present_pages >> (30 - PAGE_SHIFT);
5020 if (gb)
5021 ratio = int_sqrt(10 * gb);
5022 else
5023 ratio = 1;
5024
5025 zone->inactive_ratio = ratio;
5026}
5027
5028static void __init setup_per_zone_inactive_ratio(void)
5029{
5030 struct zone *zone;
5031
5032 for_each_zone(zone)
5033 calculate_zone_inactive_ratio(zone);
5034}
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060static int __init init_per_zone_wmark_min(void)
5061{
5062 unsigned long lowmem_kbytes;
5063
5064 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5065
5066 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5067 if (min_free_kbytes < 128)
5068 min_free_kbytes = 128;
5069 if (min_free_kbytes > 65536)
5070 min_free_kbytes = 65536;
5071 setup_per_zone_wmarks();
5072 setup_per_zone_lowmem_reserve();
5073 setup_per_zone_inactive_ratio();
5074 return 0;
5075}
5076module_init(init_per_zone_wmark_min)
5077
5078
5079
5080
5081
5082
5083int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
5084 void __user *buffer, size_t *length, loff_t *ppos)
5085{
5086 proc_dointvec(table, write, buffer, length, ppos);
5087 if (write)
5088 setup_per_zone_wmarks();
5089 return 0;
5090}
5091
5092#ifdef CONFIG_NUMA
5093int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5094 void __user *buffer, size_t *length, loff_t *ppos)
5095{
5096 struct zone *zone;
5097 int rc;
5098
5099 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5100 if (rc)
5101 return rc;
5102
5103 for_each_zone(zone)
5104 zone->min_unmapped_pages = (zone->present_pages *
5105 sysctl_min_unmapped_ratio) / 100;
5106 return 0;
5107}
5108
5109int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5110 void __user *buffer, size_t *length, loff_t *ppos)
5111{
5112 struct zone *zone;
5113 int rc;
5114
5115 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5116 if (rc)
5117 return rc;
5118
5119 for_each_zone(zone)
5120 zone->min_slab_pages = (zone->present_pages *
5121 sysctl_min_slab_ratio) / 100;
5122 return 0;
5123}
5124#endif
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5136 void __user *buffer, size_t *length, loff_t *ppos)
5137{
5138 proc_dointvec_minmax(table, write, buffer, length, ppos);
5139 setup_per_zone_lowmem_reserve();
5140 return 0;
5141}
5142
5143
5144
5145
5146
5147
5148
5149int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5150 void __user *buffer, size_t *length, loff_t *ppos)
5151{
5152 struct zone *zone;
5153 unsigned int cpu;
5154 int ret;
5155
5156 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5157 if (!write || (ret == -EINVAL))
5158 return ret;
5159 for_each_populated_zone(zone) {
5160 for_each_possible_cpu(cpu) {
5161 unsigned long high;
5162 high = zone->present_pages / percpu_pagelist_fraction;
5163 setup_pagelist_highmark(
5164 per_cpu_ptr(zone->pageset, cpu), high);
5165 }
5166 }
5167 return 0;
5168}
5169
5170int hashdist = HASHDIST_DEFAULT;
5171
5172#ifdef CONFIG_NUMA
5173static int __init set_hashdist(char *str)
5174{
5175 if (!str)
5176 return 0;
5177 hashdist = simple_strtoul(str, &str, 0);
5178 return 1;
5179}
5180__setup("hashdist=", set_hashdist);
5181#endif
5182
5183
5184
5185
5186
5187
5188
5189void *__init alloc_large_system_hash(const char *tablename,
5190 unsigned long bucketsize,
5191 unsigned long numentries,
5192 int scale,
5193 int flags,
5194 unsigned int *_hash_shift,
5195 unsigned int *_hash_mask,
5196 unsigned long limit)
5197{
5198 unsigned long long max = limit;
5199 unsigned long log2qty, size;
5200 void *table = NULL;
5201
5202
5203 if (!numentries) {
5204
5205 numentries = nr_kernel_pages;
5206 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5207 numentries >>= 20 - PAGE_SHIFT;
5208 numentries <<= 20 - PAGE_SHIFT;
5209
5210
5211 if (scale > PAGE_SHIFT)
5212 numentries >>= (scale - PAGE_SHIFT);
5213 else
5214 numentries <<= (PAGE_SHIFT - scale);
5215
5216
5217 if (unlikely(flags & HASH_SMALL)) {
5218
5219 WARN_ON(!(flags & HASH_EARLY));
5220 if (!(numentries >> *_hash_shift)) {
5221 numentries = 1UL << *_hash_shift;
5222 BUG_ON(!numentries);
5223 }
5224 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5225 numentries = PAGE_SIZE / bucketsize;
5226 }
5227 numentries = roundup_pow_of_two(numentries);
5228
5229
5230 if (max == 0) {
5231 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5232 do_div(max, bucketsize);
5233 }
5234
5235 if (numentries > max)
5236 numentries = max;
5237
5238 log2qty = ilog2(numentries);
5239
5240 do {
5241 size = bucketsize << log2qty;
5242 if (flags & HASH_EARLY)
5243 table = alloc_bootmem_nopanic(size);
5244 else if (hashdist)
5245 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5246 else {
5247
5248
5249
5250
5251
5252 if (get_order(size) < MAX_ORDER) {
5253 table = alloc_pages_exact(size, GFP_ATOMIC);
5254 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5255 }
5256 }
5257 } while (!table && size > PAGE_SIZE && --log2qty);
5258
5259 if (!table)
5260 panic("Failed to allocate %s hash table\n", tablename);
5261
5262 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5263 tablename,
5264 (1UL << log2qty),
5265 ilog2(size) - PAGE_SHIFT,
5266 size);
5267
5268 if (_hash_shift)
5269 *_hash_shift = log2qty;
5270 if (_hash_mask)
5271 *_hash_mask = (1 << log2qty) - 1;
5272
5273 return table;
5274}
5275
5276
5277static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5278 unsigned long pfn)
5279{
5280#ifdef CONFIG_SPARSEMEM
5281 return __pfn_to_section(pfn)->pageblock_flags;
5282#else
5283 return zone->pageblock_flags;
5284#endif
5285}
5286
5287static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5288{
5289#ifdef CONFIG_SPARSEMEM
5290 pfn &= (PAGES_PER_SECTION-1);
5291 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5292#else
5293 pfn = pfn - zone->zone_start_pfn;
5294 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5295#endif
5296}
5297
5298
5299
5300
5301
5302
5303
5304
5305unsigned long get_pageblock_flags_group(struct page *page,
5306 int start_bitidx, int end_bitidx)
5307{
5308 struct zone *zone;
5309 unsigned long *bitmap;
5310 unsigned long pfn, bitidx;
5311 unsigned long flags = 0;
5312 unsigned long value = 1;
5313
5314 zone = page_zone(page);
5315 pfn = page_to_pfn(page);
5316 bitmap = get_pageblock_bitmap(zone, pfn);
5317 bitidx = pfn_to_bitidx(zone, pfn);
5318
5319 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5320 if (test_bit(bitidx + start_bitidx, bitmap))
5321 flags |= value;
5322
5323 return flags;
5324}
5325
5326
5327
5328
5329
5330
5331
5332
5333void set_pageblock_flags_group(struct page *page, unsigned long flags,
5334 int start_bitidx, int end_bitidx)
5335{
5336 struct zone *zone;
5337 unsigned long *bitmap;
5338 unsigned long pfn, bitidx;
5339 unsigned long value = 1;
5340
5341 zone = page_zone(page);
5342 pfn = page_to_pfn(page);
5343 bitmap = get_pageblock_bitmap(zone, pfn);
5344 bitidx = pfn_to_bitidx(zone, pfn);
5345 VM_BUG_ON(pfn < zone->zone_start_pfn);
5346 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5347
5348 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5349 if (flags & value)
5350 __set_bit(bitidx + start_bitidx, bitmap);
5351 else
5352 __clear_bit(bitidx + start_bitidx, bitmap);
5353}
5354
5355
5356
5357
5358
5359
5360
5361static int
5362__count_immobile_pages(struct zone *zone, struct page *page, int count)
5363{
5364 unsigned long pfn, iter, found;
5365
5366
5367
5368
5369 if (zone_idx(zone) == ZONE_MOVABLE)
5370 return true;
5371
5372 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5373 return true;
5374
5375 pfn = page_to_pfn(page);
5376 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5377 unsigned long check = pfn + iter;
5378
5379 if (!pfn_valid_within(check))
5380 continue;
5381
5382 page = pfn_to_page(check);
5383 if (!page_count(page)) {
5384 if (PageBuddy(page))
5385 iter += (1 << page_order(page)) - 1;
5386 continue;
5387 }
5388 if (!PageLRU(page))
5389 found++;
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403 if (found > count)
5404 return false;
5405 }
5406 return true;
5407}
5408
5409bool is_pageblock_removable_nolock(struct page *page)
5410{
5411 struct zone *zone = page_zone(page);
5412 return __count_immobile_pages(zone, page, 0);
5413}
5414
5415int set_migratetype_isolate(struct page *page)
5416{
5417 struct zone *zone;
5418 unsigned long flags, pfn;
5419 struct memory_isolate_notify arg;
5420 int notifier_ret;
5421 int ret = -EBUSY;
5422 int zone_idx;
5423
5424 zone = page_zone(page);
5425 zone_idx = zone_idx(zone);
5426
5427 spin_lock_irqsave(&zone->lock, flags);
5428
5429 pfn = page_to_pfn(page);
5430 arg.start_pfn = pfn;
5431 arg.nr_pages = pageblock_nr_pages;
5432 arg.pages_found = 0;
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5446 notifier_ret = notifier_to_errno(notifier_ret);
5447 if (notifier_ret)
5448 goto out;
5449
5450
5451
5452
5453 if (__count_immobile_pages(zone, page, arg.pages_found))
5454 ret = 0;
5455
5456
5457
5458
5459
5460
5461out:
5462 if (!ret) {
5463 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5464 move_freepages_block(zone, page, MIGRATE_ISOLATE);
5465 }
5466
5467 spin_unlock_irqrestore(&zone->lock, flags);
5468 if (!ret)
5469 drain_all_pages();
5470 return ret;
5471}
5472
5473void unset_migratetype_isolate(struct page *page)
5474{
5475 struct zone *zone;
5476 unsigned long flags;
5477 zone = page_zone(page);
5478 spin_lock_irqsave(&zone->lock, flags);
5479 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5480 goto out;
5481 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5482 move_freepages_block(zone, page, MIGRATE_MOVABLE);
5483out:
5484 spin_unlock_irqrestore(&zone->lock, flags);
5485}
5486
5487#ifdef CONFIG_MEMORY_HOTREMOVE
5488
5489
5490
5491void
5492__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5493{
5494 struct page *page;
5495 struct zone *zone;
5496 int order, i;
5497 unsigned long pfn;
5498 unsigned long flags;
5499
5500 for (pfn = start_pfn; pfn < end_pfn; pfn++)
5501 if (pfn_valid(pfn))
5502 break;
5503 if (pfn == end_pfn)
5504 return;
5505 zone = page_zone(pfn_to_page(pfn));
5506 spin_lock_irqsave(&zone->lock, flags);
5507 pfn = start_pfn;
5508 while (pfn < end_pfn) {
5509 if (!pfn_valid(pfn)) {
5510 pfn++;
5511 continue;
5512 }
5513 page = pfn_to_page(pfn);
5514 BUG_ON(page_count(page));
5515 BUG_ON(!PageBuddy(page));
5516 order = page_order(page);
5517#ifdef CONFIG_DEBUG_VM
5518 printk(KERN_INFO "remove from free list %lx %d %lx\n",
5519 pfn, 1 << order, end_pfn);
5520#endif
5521 list_del(&page->lru);
5522 rmv_page_order(page);
5523 zone->free_area[order].nr_free--;
5524 __mod_zone_page_state(zone, NR_FREE_PAGES,
5525 - (1UL << order));
5526 for (i = 0; i < (1 << order); i++)
5527 SetPageReserved((page+i));
5528 pfn += (1 << order);
5529 }
5530 spin_unlock_irqrestore(&zone->lock, flags);
5531}
5532#endif
5533
5534#ifdef CONFIG_MEMORY_FAILURE
5535bool is_free_buddy_page(struct page *page)
5536{
5537 struct zone *zone = page_zone(page);
5538 unsigned long pfn = page_to_pfn(page);
5539 unsigned long flags;
5540 int order;
5541
5542 spin_lock_irqsave(&zone->lock, flags);
5543 for (order = 0; order < MAX_ORDER; order++) {
5544 struct page *page_head = page - (pfn & ((1 << order) - 1));
5545
5546 if (PageBuddy(page_head) && page_order(page_head) >= order)
5547 break;
5548 }
5549 spin_unlock_irqrestore(&zone->lock, flags);
5550
5551 return order < MAX_ORDER;
5552}
5553#endif
5554
5555static struct trace_print_flags pageflag_names[] = {
5556 {1UL << PG_locked, "locked" },
5557 {1UL << PG_error, "error" },
5558 {1UL << PG_referenced, "referenced" },
5559 {1UL << PG_uptodate, "uptodate" },
5560 {1UL << PG_dirty, "dirty" },
5561 {1UL << PG_lru, "lru" },
5562 {1UL << PG_active, "active" },
5563 {1UL << PG_slab, "slab" },
5564 {1UL << PG_owner_priv_1, "owner_priv_1" },
5565 {1UL << PG_arch_1, "arch_1" },
5566 {1UL << PG_reserved, "reserved" },
5567 {1UL << PG_private, "private" },
5568 {1UL << PG_private_2, "private_2" },
5569 {1UL << PG_writeback, "writeback" },
5570#ifdef CONFIG_PAGEFLAGS_EXTENDED
5571 {1UL << PG_head, "head" },
5572 {1UL << PG_tail, "tail" },
5573#else
5574 {1UL << PG_compound, "compound" },
5575#endif
5576 {1UL << PG_swapcache, "swapcache" },
5577 {1UL << PG_mappedtodisk, "mappedtodisk" },
5578 {1UL << PG_reclaim, "reclaim" },
5579 {1UL << PG_swapbacked, "swapbacked" },
5580 {1UL << PG_unevictable, "unevictable" },
5581#ifdef CONFIG_MMU
5582 {1UL << PG_mlocked, "mlocked" },
5583#endif
5584#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5585 {1UL << PG_uncached, "uncached" },
5586#endif
5587#ifdef CONFIG_MEMORY_FAILURE
5588 {1UL << PG_hwpoison, "hwpoison" },
5589#endif
5590 {-1UL, NULL },
5591};
5592
5593static void dump_page_flags(unsigned long flags)
5594{
5595 const char *delim = "";
5596 unsigned long mask;
5597 int i;
5598
5599 printk(KERN_ALERT "page flags: %#lx(", flags);
5600
5601
5602 flags &= (1UL << NR_PAGEFLAGS) - 1;
5603
5604 for (i = 0; pageflag_names[i].name && flags; i++) {
5605
5606 mask = pageflag_names[i].mask;
5607 if ((flags & mask) != mask)
5608 continue;
5609
5610 flags &= ~mask;
5611 printk("%s%s", delim, pageflag_names[i].name);
5612 delim = "|";
5613 }
5614
5615
5616 if (flags)
5617 printk("%s%#lx", delim, flags);
5618
5619 printk(")\n");
5620}
5621
5622void dump_page(struct page *page)
5623{
5624 printk(KERN_ALERT
5625 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5626 page, atomic_read(&page->_count), page_mapcount(page),
5627 page->mapping, page->index);
5628 dump_page_flags(page->flags);
5629}
5630