1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h>
25#include <linux/compiler.h>
26#include <linux/kernel.h>
27#include <linux/kmemcheck.h>
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
33#include <linux/oom.h>
34#include <linux/notifier.h>
35#include <linux/topology.h>
36#include <linux/sysctl.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
39#include <linux/memory_hotplug.h>
40#include <linux/nodemask.h>
41#include <linux/vmalloc.h>
42#include <linux/mempolicy.h>
43#include <linux/stop_machine.h>
44#include <linux/sort.h>
45#include <linux/pfn.h>
46#include <linux/backing-dev.h>
47#include <linux/fault-inject.h>
48#include <linux/page-isolation.h>
49#include <linux/page_cgroup.h>
50#include <linux/debugobjects.h>
51#include <linux/kmemleak.h>
52#include <linux/memory.h>
53#include <linux/compaction.h>
54#include <trace/events/kmem.h>
55#include <linux/ftrace_event.h>
56#include <linux/memcontrol.h>
57
58#include <asm/tlbflush.h>
59#include <asm/div64.h>
60#include "internal.h"
61
62#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
63DEFINE_PER_CPU(int, numa_node);
64EXPORT_PER_CPU_SYMBOL(numa_node);
65#endif
66
67#ifdef CONFIG_HAVE_MEMORYLESS_NODES
68
69
70
71
72
73
74DEFINE_PER_CPU(int, _numa_mem_);
75EXPORT_PER_CPU_SYMBOL(_numa_mem_);
76#endif
77
78
79
80
81nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
82 [N_POSSIBLE] = NODE_MASK_ALL,
83 [N_ONLINE] = { { [0] = 1UL } },
84#ifndef CONFIG_NUMA
85 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
86#ifdef CONFIG_HIGHMEM
87 [N_HIGH_MEMORY] = { { [0] = 1UL } },
88#endif
89 [N_CPU] = { { [0] = 1UL } },
90#endif
91};
92EXPORT_SYMBOL(node_states);
93
94unsigned long totalram_pages __read_mostly;
95unsigned long totalreserve_pages __read_mostly;
96int percpu_pagelist_fraction;
97gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
98
99#ifdef CONFIG_PM_SLEEP
100
101
102
103
104
105
106
107
108
109static gfp_t saved_gfp_mask;
110
111void pm_restore_gfp_mask(void)
112{
113 WARN_ON(!mutex_is_locked(&pm_mutex));
114 if (saved_gfp_mask) {
115 gfp_allowed_mask = saved_gfp_mask;
116 saved_gfp_mask = 0;
117 }
118}
119
120void pm_restrict_gfp_mask(void)
121{
122 WARN_ON(!mutex_is_locked(&pm_mutex));
123 WARN_ON(saved_gfp_mask);
124 saved_gfp_mask = gfp_allowed_mask;
125 gfp_allowed_mask &= ~GFP_IOFS;
126}
127#endif
128
129#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
130int pageblock_order __read_mostly;
131#endif
132
133static void __free_pages_ok(struct page *page, unsigned int order);
134
135
136
137
138
139
140
141
142
143
144
145
146int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
147#ifdef CONFIG_ZONE_DMA
148 256,
149#endif
150#ifdef CONFIG_ZONE_DMA32
151 256,
152#endif
153#ifdef CONFIG_HIGHMEM
154 32,
155#endif
156 32,
157};
158
159EXPORT_SYMBOL(totalram_pages);
160
161static char * const zone_names[MAX_NR_ZONES] = {
162#ifdef CONFIG_ZONE_DMA
163 "DMA",
164#endif
165#ifdef CONFIG_ZONE_DMA32
166 "DMA32",
167#endif
168 "Normal",
169#ifdef CONFIG_HIGHMEM
170 "HighMem",
171#endif
172 "Movable",
173};
174
175int min_free_kbytes = 1024;
176
177static unsigned long __meminitdata nr_kernel_pages;
178static unsigned long __meminitdata nr_all_pages;
179static unsigned long __meminitdata dma_reserve;
180
181#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
182
183
184
185
186
187
188
189 #ifdef CONFIG_MAX_ACTIVE_REGIONS
190
191 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
192 #else
193 #if MAX_NUMNODES >= 32
194
195 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
196 #else
197
198 #define MAX_ACTIVE_REGIONS 256
199 #endif
200 #endif
201
202 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
203 static int __meminitdata nr_nodemap_entries;
204 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
205 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
206 static unsigned long __initdata required_kernelcore;
207 static unsigned long __initdata required_movablecore;
208 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
209
210
211 int movable_zone;
212 EXPORT_SYMBOL(movable_zone);
213#endif
214
215#if MAX_NUMNODES > 1
216int nr_node_ids __read_mostly = MAX_NUMNODES;
217int nr_online_nodes __read_mostly = 1;
218EXPORT_SYMBOL(nr_node_ids);
219EXPORT_SYMBOL(nr_online_nodes);
220#endif
221
222int page_group_by_mobility_disabled __read_mostly;
223
224static void set_pageblock_migratetype(struct page *page, int migratetype)
225{
226
227 if (unlikely(page_group_by_mobility_disabled))
228 migratetype = MIGRATE_UNMOVABLE;
229
230 set_pageblock_flags_group(page, (unsigned long)migratetype,
231 PB_migrate, PB_migrate_end);
232}
233
234bool oom_killer_disabled __read_mostly;
235
236#ifdef CONFIG_DEBUG_VM
237static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
238{
239 int ret = 0;
240 unsigned seq;
241 unsigned long pfn = page_to_pfn(page);
242
243 do {
244 seq = zone_span_seqbegin(zone);
245 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
246 ret = 1;
247 else if (pfn < zone->zone_start_pfn)
248 ret = 1;
249 } while (zone_span_seqretry(zone, seq));
250
251 return ret;
252}
253
254static int page_is_consistent(struct zone *zone, struct page *page)
255{
256 if (!pfn_valid_within(page_to_pfn(page)))
257 return 0;
258 if (zone != page_zone(page))
259 return 0;
260
261 return 1;
262}
263
264
265
266static int bad_range(struct zone *zone, struct page *page)
267{
268 if (page_outside_zone_boundaries(zone, page))
269 return 1;
270 if (!page_is_consistent(zone, page))
271 return 1;
272
273 return 0;
274}
275#else
276static inline int bad_range(struct zone *zone, struct page *page)
277{
278 return 0;
279}
280#endif
281
282static void bad_page(struct page *page)
283{
284 static unsigned long resume;
285 static unsigned long nr_shown;
286 static unsigned long nr_unshown;
287
288
289 if (PageHWPoison(page)) {
290 reset_page_mapcount(page);
291 return;
292 }
293
294
295
296
297
298 if (nr_shown == 60) {
299 if (time_before(jiffies, resume)) {
300 nr_unshown++;
301 goto out;
302 }
303 if (nr_unshown) {
304 printk(KERN_ALERT
305 "BUG: Bad page state: %lu messages suppressed\n",
306 nr_unshown);
307 nr_unshown = 0;
308 }
309 nr_shown = 0;
310 }
311 if (nr_shown++ == 0)
312 resume = jiffies + 60 * HZ;
313
314 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
315 current->comm, page_to_pfn(page));
316 dump_page(page);
317
318 dump_stack();
319out:
320
321 reset_page_mapcount(page);
322 add_taint(TAINT_BAD_PAGE);
323}
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static void free_compound_page(struct page *page)
341{
342 __free_pages_ok(page, compound_order(page));
343}
344
345void prep_compound_page(struct page *page, unsigned long order)
346{
347 int i;
348 int nr_pages = 1 << order;
349
350 set_compound_page_dtor(page, free_compound_page);
351 set_compound_order(page, order);
352 __SetPageHead(page);
353 for (i = 1; i < nr_pages; i++) {
354 struct page *p = page + i;
355
356 __SetPageTail(p);
357 p->first_page = page;
358 }
359}
360
361
362static int destroy_compound_page(struct page *page, unsigned long order)
363{
364 int i;
365 int nr_pages = 1 << order;
366 int bad = 0;
367
368 if (unlikely(compound_order(page) != order) ||
369 unlikely(!PageHead(page))) {
370 bad_page(page);
371 bad++;
372 }
373
374 __ClearPageHead(page);
375
376 for (i = 1; i < nr_pages; i++) {
377 struct page *p = page + i;
378
379 if (unlikely(!PageTail(p) || (p->first_page != page))) {
380 bad_page(page);
381 bad++;
382 }
383 __ClearPageTail(p);
384 }
385
386 return bad;
387}
388
389static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
390{
391 int i;
392
393
394
395
396
397 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
398 for (i = 0; i < (1 << order); i++)
399 clear_highpage(page + i);
400}
401
402static inline void set_page_order(struct page *page, int order)
403{
404 set_page_private(page, order);
405 __SetPageBuddy(page);
406}
407
408static inline void rmv_page_order(struct page *page)
409{
410 __ClearPageBuddy(page);
411 set_page_private(page, 0);
412}
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431static inline unsigned long
432__find_buddy_index(unsigned long page_idx, unsigned int order)
433{
434 return page_idx ^ (1 << order);
435}
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450static inline int page_is_buddy(struct page *page, struct page *buddy,
451 int order)
452{
453 if (!pfn_valid_within(page_to_pfn(buddy)))
454 return 0;
455
456 if (page_zone_id(page) != page_zone_id(buddy))
457 return 0;
458
459 if (PageBuddy(buddy) && page_order(buddy) == order) {
460 VM_BUG_ON(page_count(buddy) != 0);
461 return 1;
462 }
463 return 0;
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490static inline void __free_one_page(struct page *page,
491 struct zone *zone, unsigned int order,
492 int migratetype)
493{
494 unsigned long page_idx;
495 unsigned long combined_idx;
496 unsigned long uninitialized_var(buddy_idx);
497 struct page *buddy;
498
499 if (unlikely(PageCompound(page)))
500 if (unlikely(destroy_compound_page(page, order)))
501 return;
502
503 VM_BUG_ON(migratetype == -1);
504
505 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
506
507 VM_BUG_ON(page_idx & ((1 << order) - 1));
508 VM_BUG_ON(bad_range(zone, page));
509
510 while (order < MAX_ORDER-1) {
511 buddy_idx = __find_buddy_index(page_idx, order);
512 buddy = page + (buddy_idx - page_idx);
513 if (!page_is_buddy(page, buddy, order))
514 break;
515
516
517 list_del(&buddy->lru);
518 zone->free_area[order].nr_free--;
519 rmv_page_order(buddy);
520 combined_idx = buddy_idx & page_idx;
521 page = page + (combined_idx - page_idx);
522 page_idx = combined_idx;
523 order++;
524 }
525 set_page_order(page, order);
526
527
528
529
530
531
532
533
534
535 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
536 struct page *higher_page, *higher_buddy;
537 combined_idx = buddy_idx & page_idx;
538 higher_page = page + (combined_idx - page_idx);
539 buddy_idx = __find_buddy_index(combined_idx, order + 1);
540 higher_buddy = page + (buddy_idx - combined_idx);
541 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
542 list_add_tail(&page->lru,
543 &zone->free_area[order].free_list[migratetype]);
544 goto out;
545 }
546 }
547
548 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
549out:
550 zone->free_area[order].nr_free++;
551}
552
553
554
555
556
557
558static inline void free_page_mlock(struct page *page)
559{
560 __dec_zone_page_state(page, NR_MLOCK);
561 __count_vm_event(UNEVICTABLE_MLOCKFREED);
562}
563
564static inline int free_pages_check(struct page *page)
565{
566 if (unlikely(page_mapcount(page) |
567 (page->mapping != NULL) |
568 (atomic_read(&page->_count) != 0) |
569 (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
570 (mem_cgroup_bad_page_check(page)))) {
571 bad_page(page);
572 return 1;
573 }
574 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
575 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
576 return 0;
577}
578
579
580
581
582
583
584
585
586
587
588
589
590static void free_pcppages_bulk(struct zone *zone, int count,
591 struct per_cpu_pages *pcp)
592{
593 int migratetype = 0;
594 int batch_free = 0;
595 int to_free = count;
596
597 spin_lock(&zone->lock);
598 zone->all_unreclaimable = 0;
599 zone->pages_scanned = 0;
600
601 while (to_free) {
602 struct page *page;
603 struct list_head *list;
604
605
606
607
608
609
610
611
612 do {
613 batch_free++;
614 if (++migratetype == MIGRATE_PCPTYPES)
615 migratetype = 0;
616 list = &pcp->lists[migratetype];
617 } while (list_empty(list));
618
619
620 if (batch_free == MIGRATE_PCPTYPES)
621 batch_free = to_free;
622
623 do {
624 page = list_entry(list->prev, struct page, lru);
625
626 list_del(&page->lru);
627
628 __free_one_page(page, zone, 0, page_private(page));
629 trace_mm_page_pcpu_drain(page, 0, page_private(page));
630 } while (--to_free && --batch_free && !list_empty(list));
631 }
632 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
633 spin_unlock(&zone->lock);
634}
635
636static void free_one_page(struct zone *zone, struct page *page, int order,
637 int migratetype)
638{
639 spin_lock(&zone->lock);
640 zone->all_unreclaimable = 0;
641 zone->pages_scanned = 0;
642
643 __free_one_page(page, zone, order, migratetype);
644 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
645 spin_unlock(&zone->lock);
646}
647
648static bool free_pages_prepare(struct page *page, unsigned int order)
649{
650 int i;
651 int bad = 0;
652
653 trace_mm_page_free_direct(page, order);
654 kmemcheck_free_shadow(page, order);
655
656 if (PageAnon(page))
657 page->mapping = NULL;
658 for (i = 0; i < (1 << order); i++)
659 bad += free_pages_check(page + i);
660 if (bad)
661 return false;
662
663 if (!PageHighMem(page)) {
664 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
665 debug_check_no_obj_freed(page_address(page),
666 PAGE_SIZE << order);
667 }
668 arch_free_page(page, order);
669 kernel_map_pages(page, 1 << order, 0);
670
671 return true;
672}
673
674static void __free_pages_ok(struct page *page, unsigned int order)
675{
676 unsigned long flags;
677 int wasMlocked = __TestClearPageMlocked(page);
678
679 if (!free_pages_prepare(page, order))
680 return;
681
682 local_irq_save(flags);
683 if (unlikely(wasMlocked))
684 free_page_mlock(page);
685 __count_vm_events(PGFREE, 1 << order);
686 free_one_page(page_zone(page), page, order,
687 get_pageblock_migratetype(page));
688 local_irq_restore(flags);
689}
690
691
692
693
694void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
695{
696 if (order == 0) {
697 __ClearPageReserved(page);
698 set_page_count(page, 0);
699 set_page_refcounted(page);
700 __free_page(page);
701 } else {
702 int loop;
703
704 prefetchw(page);
705 for (loop = 0; loop < BITS_PER_LONG; loop++) {
706 struct page *p = &page[loop];
707
708 if (loop + 1 < BITS_PER_LONG)
709 prefetchw(p + 1);
710 __ClearPageReserved(p);
711 set_page_count(p, 0);
712 }
713
714 set_page_refcounted(page);
715 __free_pages(page, order);
716 }
717}
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734static inline void expand(struct zone *zone, struct page *page,
735 int low, int high, struct free_area *area,
736 int migratetype)
737{
738 unsigned long size = 1 << high;
739
740 while (high > low) {
741 area--;
742 high--;
743 size >>= 1;
744 VM_BUG_ON(bad_range(zone, &page[size]));
745 list_add(&page[size].lru, &area->free_list[migratetype]);
746 area->nr_free++;
747 set_page_order(&page[size], high);
748 }
749}
750
751
752
753
754static inline int check_new_page(struct page *page)
755{
756 if (unlikely(page_mapcount(page) |
757 (page->mapping != NULL) |
758 (atomic_read(&page->_count) != 0) |
759 (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
760 (mem_cgroup_bad_page_check(page)))) {
761 bad_page(page);
762 return 1;
763 }
764 return 0;
765}
766
767static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
768{
769 int i;
770
771 for (i = 0; i < (1 << order); i++) {
772 struct page *p = page + i;
773 if (unlikely(check_new_page(p)))
774 return 1;
775 }
776
777 set_page_private(page, 0);
778 set_page_refcounted(page);
779
780 arch_alloc_page(page, order);
781 kernel_map_pages(page, 1 << order, 1);
782
783 if (gfp_flags & __GFP_ZERO)
784 prep_zero_page(page, order, gfp_flags);
785
786 if (order && (gfp_flags & __GFP_COMP))
787 prep_compound_page(page, order);
788
789 return 0;
790}
791
792
793
794
795
796static inline
797struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
798 int migratetype)
799{
800 unsigned int current_order;
801 struct free_area * area;
802 struct page *page;
803
804
805 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
806 area = &(zone->free_area[current_order]);
807 if (list_empty(&area->free_list[migratetype]))
808 continue;
809
810 page = list_entry(area->free_list[migratetype].next,
811 struct page, lru);
812 list_del(&page->lru);
813 rmv_page_order(page);
814 area->nr_free--;
815 expand(zone, page, order, current_order, area, migratetype);
816 return page;
817 }
818
819 return NULL;
820}
821
822
823
824
825
826
827static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
828 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
829 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
830 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
831 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE },
832};
833
834
835
836
837
838
839static int move_freepages(struct zone *zone,
840 struct page *start_page, struct page *end_page,
841 int migratetype)
842{
843 struct page *page;
844 unsigned long order;
845 int pages_moved = 0;
846
847#ifndef CONFIG_HOLES_IN_ZONE
848
849
850
851
852
853
854
855 BUG_ON(page_zone(start_page) != page_zone(end_page));
856#endif
857
858 for (page = start_page; page <= end_page;) {
859
860 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
861
862 if (!pfn_valid_within(page_to_pfn(page))) {
863 page++;
864 continue;
865 }
866
867 if (!PageBuddy(page)) {
868 page++;
869 continue;
870 }
871
872 order = page_order(page);
873 list_move(&page->lru,
874 &zone->free_area[order].free_list[migratetype]);
875 page += 1 << order;
876 pages_moved += 1 << order;
877 }
878
879 return pages_moved;
880}
881
882static int move_freepages_block(struct zone *zone, struct page *page,
883 int migratetype)
884{
885 unsigned long start_pfn, end_pfn;
886 struct page *start_page, *end_page;
887
888 start_pfn = page_to_pfn(page);
889 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
890 start_page = pfn_to_page(start_pfn);
891 end_page = start_page + pageblock_nr_pages - 1;
892 end_pfn = start_pfn + pageblock_nr_pages - 1;
893
894
895 if (start_pfn < zone->zone_start_pfn)
896 start_page = page;
897 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
898 return 0;
899
900 return move_freepages(zone, start_page, end_page, migratetype);
901}
902
903static void change_pageblock_range(struct page *pageblock_page,
904 int start_order, int migratetype)
905{
906 int nr_pageblocks = 1 << (start_order - pageblock_order);
907
908 while (nr_pageblocks--) {
909 set_pageblock_migratetype(pageblock_page, migratetype);
910 pageblock_page += pageblock_nr_pages;
911 }
912}
913
914
915static inline struct page *
916__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
917{
918 struct free_area * area;
919 int current_order;
920 struct page *page;
921 int migratetype, i;
922
923
924 for (current_order = MAX_ORDER-1; current_order >= order;
925 --current_order) {
926 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
927 migratetype = fallbacks[start_migratetype][i];
928
929
930 if (migratetype == MIGRATE_RESERVE)
931 continue;
932
933 area = &(zone->free_area[current_order]);
934 if (list_empty(&area->free_list[migratetype]))
935 continue;
936
937 page = list_entry(area->free_list[migratetype].next,
938 struct page, lru);
939 area->nr_free--;
940
941
942
943
944
945
946
947 if (unlikely(current_order >= (pageblock_order >> 1)) ||
948 start_migratetype == MIGRATE_RECLAIMABLE ||
949 page_group_by_mobility_disabled) {
950 unsigned long pages;
951 pages = move_freepages_block(zone, page,
952 start_migratetype);
953
954
955 if (pages >= (1 << (pageblock_order-1)) ||
956 page_group_by_mobility_disabled)
957 set_pageblock_migratetype(page,
958 start_migratetype);
959
960 migratetype = start_migratetype;
961 }
962
963
964 list_del(&page->lru);
965 rmv_page_order(page);
966
967
968 if (current_order >= pageblock_order)
969 change_pageblock_range(page, current_order,
970 start_migratetype);
971
972 expand(zone, page, order, current_order, area, migratetype);
973
974 trace_mm_page_alloc_extfrag(page, order, current_order,
975 start_migratetype, migratetype);
976
977 return page;
978 }
979 }
980
981 return NULL;
982}
983
984
985
986
987
988static struct page *__rmqueue(struct zone *zone, unsigned int order,
989 int migratetype)
990{
991 struct page *page;
992
993retry_reserve:
994 page = __rmqueue_smallest(zone, order, migratetype);
995
996 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
997 page = __rmqueue_fallback(zone, order, migratetype);
998
999
1000
1001
1002
1003
1004 if (!page) {
1005 migratetype = MIGRATE_RESERVE;
1006 goto retry_reserve;
1007 }
1008 }
1009
1010 trace_mm_page_alloc_zone_locked(page, order, migratetype);
1011 return page;
1012}
1013
1014
1015
1016
1017
1018
1019static int rmqueue_bulk(struct zone *zone, unsigned int order,
1020 unsigned long count, struct list_head *list,
1021 int migratetype, int cold)
1022{
1023 int i;
1024
1025 spin_lock(&zone->lock);
1026 for (i = 0; i < count; ++i) {
1027 struct page *page = __rmqueue(zone, order, migratetype);
1028 if (unlikely(page == NULL))
1029 break;
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 if (likely(cold == 0))
1041 list_add(&page->lru, list);
1042 else
1043 list_add_tail(&page->lru, list);
1044 set_page_private(page, migratetype);
1045 list = &page->lru;
1046 }
1047 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1048 spin_unlock(&zone->lock);
1049 return i;
1050}
1051
1052#ifdef CONFIG_NUMA
1053
1054
1055
1056
1057
1058
1059
1060
1061void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1062{
1063 unsigned long flags;
1064 int to_drain;
1065
1066 local_irq_save(flags);
1067 if (pcp->count >= pcp->batch)
1068 to_drain = pcp->batch;
1069 else
1070 to_drain = pcp->count;
1071 free_pcppages_bulk(zone, to_drain, pcp);
1072 pcp->count -= to_drain;
1073 local_irq_restore(flags);
1074}
1075#endif
1076
1077
1078
1079
1080
1081
1082
1083
1084static void drain_pages(unsigned int cpu)
1085{
1086 unsigned long flags;
1087 struct zone *zone;
1088
1089 for_each_populated_zone(zone) {
1090 struct per_cpu_pageset *pset;
1091 struct per_cpu_pages *pcp;
1092
1093 local_irq_save(flags);
1094 pset = per_cpu_ptr(zone->pageset, cpu);
1095
1096 pcp = &pset->pcp;
1097 if (pcp->count) {
1098 free_pcppages_bulk(zone, pcp->count, pcp);
1099 pcp->count = 0;
1100 }
1101 local_irq_restore(flags);
1102 }
1103}
1104
1105
1106
1107
1108void drain_local_pages(void *arg)
1109{
1110 drain_pages(smp_processor_id());
1111}
1112
1113
1114
1115
1116void drain_all_pages(void)
1117{
1118 on_each_cpu(drain_local_pages, NULL, 1);
1119}
1120
1121#ifdef CONFIG_HIBERNATION
1122
1123void mark_free_pages(struct zone *zone)
1124{
1125 unsigned long pfn, max_zone_pfn;
1126 unsigned long flags;
1127 int order, t;
1128 struct list_head *curr;
1129
1130 if (!zone->spanned_pages)
1131 return;
1132
1133 spin_lock_irqsave(&zone->lock, flags);
1134
1135 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1136 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1137 if (pfn_valid(pfn)) {
1138 struct page *page = pfn_to_page(pfn);
1139
1140 if (!swsusp_page_is_forbidden(page))
1141 swsusp_unset_page_free(page);
1142 }
1143
1144 for_each_migratetype_order(order, t) {
1145 list_for_each(curr, &zone->free_area[order].free_list[t]) {
1146 unsigned long i;
1147
1148 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1149 for (i = 0; i < (1UL << order); i++)
1150 swsusp_set_page_free(pfn_to_page(pfn + i));
1151 }
1152 }
1153 spin_unlock_irqrestore(&zone->lock, flags);
1154}
1155#endif
1156
1157
1158
1159
1160
1161void free_hot_cold_page(struct page *page, int cold)
1162{
1163 struct zone *zone = page_zone(page);
1164 struct per_cpu_pages *pcp;
1165 unsigned long flags;
1166 int migratetype;
1167 int wasMlocked = __TestClearPageMlocked(page);
1168
1169 if (!free_pages_prepare(page, 0))
1170 return;
1171
1172 migratetype = get_pageblock_migratetype(page);
1173 set_page_private(page, migratetype);
1174 local_irq_save(flags);
1175 if (unlikely(wasMlocked))
1176 free_page_mlock(page);
1177 __count_vm_event(PGFREE);
1178
1179
1180
1181
1182
1183
1184
1185
1186 if (migratetype >= MIGRATE_PCPTYPES) {
1187 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1188 free_one_page(zone, page, 0, migratetype);
1189 goto out;
1190 }
1191 migratetype = MIGRATE_MOVABLE;
1192 }
1193
1194 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1195 if (cold)
1196 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1197 else
1198 list_add(&page->lru, &pcp->lists[migratetype]);
1199 pcp->count++;
1200 if (pcp->count >= pcp->high) {
1201 free_pcppages_bulk(zone, pcp->batch, pcp);
1202 pcp->count -= pcp->batch;
1203 }
1204
1205out:
1206 local_irq_restore(flags);
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217void split_page(struct page *page, unsigned int order)
1218{
1219 int i;
1220
1221 VM_BUG_ON(PageCompound(page));
1222 VM_BUG_ON(!page_count(page));
1223
1224#ifdef CONFIG_KMEMCHECK
1225
1226
1227
1228
1229 if (kmemcheck_page_is_tracked(page))
1230 split_page(virt_to_page(page[0].shadow), order);
1231#endif
1232
1233 for (i = 1; i < (1 << order); i++)
1234 set_page_refcounted(page + i);
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247int split_free_page(struct page *page)
1248{
1249 unsigned int order;
1250 unsigned long watermark;
1251 struct zone *zone;
1252
1253 BUG_ON(!PageBuddy(page));
1254
1255 zone = page_zone(page);
1256 order = page_order(page);
1257
1258
1259 watermark = low_wmark_pages(zone) + (1 << order);
1260 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1261 return 0;
1262
1263
1264 list_del(&page->lru);
1265 zone->free_area[order].nr_free--;
1266 rmv_page_order(page);
1267 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1268
1269
1270 set_page_refcounted(page);
1271 split_page(page, order);
1272
1273 if (order >= pageblock_order - 1) {
1274 struct page *endpage = page + (1 << order) - 1;
1275 for (; page < endpage; page += pageblock_nr_pages)
1276 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1277 }
1278
1279 return 1 << order;
1280}
1281
1282
1283
1284
1285
1286
1287static inline
1288struct page *buffered_rmqueue(struct zone *preferred_zone,
1289 struct zone *zone, int order, gfp_t gfp_flags,
1290 int migratetype)
1291{
1292 unsigned long flags;
1293 struct page *page;
1294 int cold = !!(gfp_flags & __GFP_COLD);
1295
1296again:
1297 if (likely(order == 0)) {
1298 struct per_cpu_pages *pcp;
1299 struct list_head *list;
1300
1301 local_irq_save(flags);
1302 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1303 list = &pcp->lists[migratetype];
1304 if (list_empty(list)) {
1305 pcp->count += rmqueue_bulk(zone, 0,
1306 pcp->batch, list,
1307 migratetype, cold);
1308 if (unlikely(list_empty(list)))
1309 goto failed;
1310 }
1311
1312 if (cold)
1313 page = list_entry(list->prev, struct page, lru);
1314 else
1315 page = list_entry(list->next, struct page, lru);
1316
1317 list_del(&page->lru);
1318 pcp->count--;
1319 } else {
1320 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 WARN_ON_ONCE(order > 1);
1332 }
1333 spin_lock_irqsave(&zone->lock, flags);
1334 page = __rmqueue(zone, order, migratetype);
1335 spin_unlock(&zone->lock);
1336 if (!page)
1337 goto failed;
1338 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1339 }
1340
1341 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1342 zone_statistics(preferred_zone, zone, gfp_flags);
1343 local_irq_restore(flags);
1344
1345 VM_BUG_ON(bad_range(zone, page));
1346 if (prep_new_page(page, order, gfp_flags))
1347 goto again;
1348 return page;
1349
1350failed:
1351 local_irq_restore(flags);
1352 return NULL;
1353}
1354
1355
1356#define ALLOC_WMARK_MIN WMARK_MIN
1357#define ALLOC_WMARK_LOW WMARK_LOW
1358#define ALLOC_WMARK_HIGH WMARK_HIGH
1359#define ALLOC_NO_WATERMARKS 0x04
1360
1361
1362#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1363
1364#define ALLOC_HARDER 0x10
1365#define ALLOC_HIGH 0x20
1366#define ALLOC_CPUSET 0x40
1367
1368#ifdef CONFIG_FAIL_PAGE_ALLOC
1369
1370static struct fail_page_alloc_attr {
1371 struct fault_attr attr;
1372
1373 u32 ignore_gfp_highmem;
1374 u32 ignore_gfp_wait;
1375 u32 min_order;
1376
1377#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1378
1379 struct dentry *ignore_gfp_highmem_file;
1380 struct dentry *ignore_gfp_wait_file;
1381 struct dentry *min_order_file;
1382
1383#endif
1384
1385} fail_page_alloc = {
1386 .attr = FAULT_ATTR_INITIALIZER,
1387 .ignore_gfp_wait = 1,
1388 .ignore_gfp_highmem = 1,
1389 .min_order = 1,
1390};
1391
1392static int __init setup_fail_page_alloc(char *str)
1393{
1394 return setup_fault_attr(&fail_page_alloc.attr, str);
1395}
1396__setup("fail_page_alloc=", setup_fail_page_alloc);
1397
1398static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1399{
1400 if (order < fail_page_alloc.min_order)
1401 return 0;
1402 if (gfp_mask & __GFP_NOFAIL)
1403 return 0;
1404 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1405 return 0;
1406 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1407 return 0;
1408
1409 return should_fail(&fail_page_alloc.attr, 1 << order);
1410}
1411
1412#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1413
1414static int __init fail_page_alloc_debugfs(void)
1415{
1416 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1417 struct dentry *dir;
1418 int err;
1419
1420 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1421 "fail_page_alloc");
1422 if (err)
1423 return err;
1424 dir = fail_page_alloc.attr.dentries.dir;
1425
1426 fail_page_alloc.ignore_gfp_wait_file =
1427 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1428 &fail_page_alloc.ignore_gfp_wait);
1429
1430 fail_page_alloc.ignore_gfp_highmem_file =
1431 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1432 &fail_page_alloc.ignore_gfp_highmem);
1433 fail_page_alloc.min_order_file =
1434 debugfs_create_u32("min-order", mode, dir,
1435 &fail_page_alloc.min_order);
1436
1437 if (!fail_page_alloc.ignore_gfp_wait_file ||
1438 !fail_page_alloc.ignore_gfp_highmem_file ||
1439 !fail_page_alloc.min_order_file) {
1440 err = -ENOMEM;
1441 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1442 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1443 debugfs_remove(fail_page_alloc.min_order_file);
1444 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1445 }
1446
1447 return err;
1448}
1449
1450late_initcall(fail_page_alloc_debugfs);
1451
1452#endif
1453
1454#else
1455
1456static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1457{
1458 return 0;
1459}
1460
1461#endif
1462
1463
1464
1465
1466
1467static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1468 int classzone_idx, int alloc_flags, long free_pages)
1469{
1470
1471 long min = mark;
1472 int o;
1473
1474 free_pages -= (1 << order) + 1;
1475 if (alloc_flags & ALLOC_HIGH)
1476 min -= min / 2;
1477 if (alloc_flags & ALLOC_HARDER)
1478 min -= min / 4;
1479
1480 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1481 return false;
1482 for (o = 0; o < order; o++) {
1483
1484 free_pages -= z->free_area[o].nr_free << o;
1485
1486
1487 min >>= 1;
1488
1489 if (free_pages <= min)
1490 return false;
1491 }
1492 return true;
1493}
1494
1495bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1496 int classzone_idx, int alloc_flags)
1497{
1498 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1499 zone_page_state(z, NR_FREE_PAGES));
1500}
1501
1502bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1503 int classzone_idx, int alloc_flags)
1504{
1505 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1506
1507 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1508 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1509
1510 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1511 free_pages);
1512}
1513
1514#ifdef CONFIG_NUMA
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1538{
1539 struct zonelist_cache *zlc;
1540 nodemask_t *allowednodes;
1541
1542 zlc = zonelist->zlcache_ptr;
1543 if (!zlc)
1544 return NULL;
1545
1546 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1547 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1548 zlc->last_full_zap = jiffies;
1549 }
1550
1551 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1552 &cpuset_current_mems_allowed :
1553 &node_states[N_HIGH_MEMORY];
1554 return allowednodes;
1555}
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1580 nodemask_t *allowednodes)
1581{
1582 struct zonelist_cache *zlc;
1583 int i;
1584 int n;
1585
1586 zlc = zonelist->zlcache_ptr;
1587 if (!zlc)
1588 return 1;
1589
1590 i = z - zonelist->_zonerefs;
1591 n = zlc->z_to_n[i];
1592
1593
1594 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1595}
1596
1597
1598
1599
1600
1601
1602static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1603{
1604 struct zonelist_cache *zlc;
1605 int i;
1606
1607 zlc = zonelist->zlcache_ptr;
1608 if (!zlc)
1609 return;
1610
1611 i = z - zonelist->_zonerefs;
1612
1613 set_bit(i, zlc->fullzones);
1614}
1615
1616#else
1617
1618static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1619{
1620 return NULL;
1621}
1622
1623static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1624 nodemask_t *allowednodes)
1625{
1626 return 1;
1627}
1628
1629static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1630{
1631}
1632#endif
1633
1634
1635
1636
1637
1638static struct page *
1639get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1640 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1641 struct zone *preferred_zone, int migratetype)
1642{
1643 struct zoneref *z;
1644 struct page *page = NULL;
1645 int classzone_idx;
1646 struct zone *zone;
1647 nodemask_t *allowednodes = NULL;
1648 int zlc_active = 0;
1649 int did_zlc_setup = 0;
1650
1651 classzone_idx = zone_idx(preferred_zone);
1652zonelist_scan:
1653
1654
1655
1656
1657 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1658 high_zoneidx, nodemask) {
1659 if (NUMA_BUILD && zlc_active &&
1660 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1661 continue;
1662 if ((alloc_flags & ALLOC_CPUSET) &&
1663 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1664 goto try_next_zone;
1665
1666 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1667 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1668 unsigned long mark;
1669 int ret;
1670
1671 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1672 if (zone_watermark_ok(zone, order, mark,
1673 classzone_idx, alloc_flags))
1674 goto try_this_zone;
1675
1676 if (zone_reclaim_mode == 0)
1677 goto this_zone_full;
1678
1679 ret = zone_reclaim(zone, gfp_mask, order);
1680 switch (ret) {
1681 case ZONE_RECLAIM_NOSCAN:
1682
1683 goto try_next_zone;
1684 case ZONE_RECLAIM_FULL:
1685
1686 goto this_zone_full;
1687 default:
1688
1689 if (!zone_watermark_ok(zone, order, mark,
1690 classzone_idx, alloc_flags))
1691 goto this_zone_full;
1692 }
1693 }
1694
1695try_this_zone:
1696 page = buffered_rmqueue(preferred_zone, zone, order,
1697 gfp_mask, migratetype);
1698 if (page)
1699 break;
1700this_zone_full:
1701 if (NUMA_BUILD)
1702 zlc_mark_zone_full(zonelist, z);
1703try_next_zone:
1704 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1705
1706
1707
1708
1709 allowednodes = zlc_setup(zonelist, alloc_flags);
1710 zlc_active = 1;
1711 did_zlc_setup = 1;
1712 }
1713 }
1714
1715 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1716
1717 zlc_active = 0;
1718 goto zonelist_scan;
1719 }
1720 return page;
1721}
1722
1723
1724
1725
1726
1727static inline bool should_suppress_show_mem(void)
1728{
1729 bool ret = false;
1730
1731#if NODES_SHIFT > 8
1732 ret = in_interrupt();
1733#endif
1734 return ret;
1735}
1736
1737static inline int
1738should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1739 unsigned long pages_reclaimed)
1740{
1741
1742 if (gfp_mask & __GFP_NORETRY)
1743 return 0;
1744
1745
1746
1747
1748
1749
1750 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1751 return 1;
1752
1753
1754
1755
1756
1757
1758
1759
1760 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1761 return 1;
1762
1763
1764
1765
1766
1767 if (gfp_mask & __GFP_NOFAIL)
1768 return 1;
1769
1770 return 0;
1771}
1772
1773static inline struct page *
1774__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1775 struct zonelist *zonelist, enum zone_type high_zoneidx,
1776 nodemask_t *nodemask, struct zone *preferred_zone,
1777 int migratetype)
1778{
1779 struct page *page;
1780
1781
1782 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
1783 schedule_timeout_uninterruptible(1);
1784 return NULL;
1785 }
1786
1787
1788
1789
1790
1791
1792 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1793 order, zonelist, high_zoneidx,
1794 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1795 preferred_zone, migratetype);
1796 if (page)
1797 goto out;
1798
1799 if (!(gfp_mask & __GFP_NOFAIL)) {
1800
1801 if (order > PAGE_ALLOC_COSTLY_ORDER)
1802 goto out;
1803
1804 if (high_zoneidx < ZONE_NORMAL)
1805 goto out;
1806
1807
1808
1809
1810
1811
1812
1813 if (gfp_mask & __GFP_THISNODE)
1814 goto out;
1815 }
1816
1817 out_of_memory(zonelist, gfp_mask, order, nodemask);
1818
1819out:
1820 clear_zonelist_oom(zonelist, gfp_mask);
1821 return page;
1822}
1823
1824#ifdef CONFIG_COMPACTION
1825
1826static struct page *
1827__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1828 struct zonelist *zonelist, enum zone_type high_zoneidx,
1829 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1830 int migratetype, unsigned long *did_some_progress,
1831 bool sync_migration)
1832{
1833 struct page *page;
1834
1835 if (!order || compaction_deferred(preferred_zone))
1836 return NULL;
1837
1838 current->flags |= PF_MEMALLOC;
1839 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1840 nodemask, sync_migration);
1841 current->flags &= ~PF_MEMALLOC;
1842 if (*did_some_progress != COMPACT_SKIPPED) {
1843
1844
1845 drain_pages(get_cpu());
1846 put_cpu();
1847
1848 page = get_page_from_freelist(gfp_mask, nodemask,
1849 order, zonelist, high_zoneidx,
1850 alloc_flags, preferred_zone,
1851 migratetype);
1852 if (page) {
1853 preferred_zone->compact_considered = 0;
1854 preferred_zone->compact_defer_shift = 0;
1855 count_vm_event(COMPACTSUCCESS);
1856 return page;
1857 }
1858
1859
1860
1861
1862
1863
1864 count_vm_event(COMPACTFAIL);
1865 defer_compaction(preferred_zone);
1866
1867 cond_resched();
1868 }
1869
1870 return NULL;
1871}
1872#else
1873static inline struct page *
1874__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1875 struct zonelist *zonelist, enum zone_type high_zoneidx,
1876 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1877 int migratetype, unsigned long *did_some_progress,
1878 bool sync_migration)
1879{
1880 return NULL;
1881}
1882#endif
1883
1884
1885static inline struct page *
1886__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1887 struct zonelist *zonelist, enum zone_type high_zoneidx,
1888 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1889 int migratetype, unsigned long *did_some_progress)
1890{
1891 struct page *page = NULL;
1892 struct reclaim_state reclaim_state;
1893 bool drained = false;
1894
1895 cond_resched();
1896
1897
1898 cpuset_memory_pressure_bump();
1899 current->flags |= PF_MEMALLOC;
1900 lockdep_set_current_reclaim_state(gfp_mask);
1901 reclaim_state.reclaimed_slab = 0;
1902 current->reclaim_state = &reclaim_state;
1903
1904 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1905
1906 current->reclaim_state = NULL;
1907 lockdep_clear_current_reclaim_state();
1908 current->flags &= ~PF_MEMALLOC;
1909
1910 cond_resched();
1911
1912 if (unlikely(!(*did_some_progress)))
1913 return NULL;
1914
1915retry:
1916 page = get_page_from_freelist(gfp_mask, nodemask, order,
1917 zonelist, high_zoneidx,
1918 alloc_flags, preferred_zone,
1919 migratetype);
1920
1921
1922
1923
1924
1925 if (!page && !drained) {
1926 drain_all_pages();
1927 drained = true;
1928 goto retry;
1929 }
1930
1931 return page;
1932}
1933
1934
1935
1936
1937
1938static inline struct page *
1939__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1940 struct zonelist *zonelist, enum zone_type high_zoneidx,
1941 nodemask_t *nodemask, struct zone *preferred_zone,
1942 int migratetype)
1943{
1944 struct page *page;
1945
1946 do {
1947 page = get_page_from_freelist(gfp_mask, nodemask, order,
1948 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1949 preferred_zone, migratetype);
1950
1951 if (!page && gfp_mask & __GFP_NOFAIL)
1952 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
1953 } while (!page && (gfp_mask & __GFP_NOFAIL));
1954
1955 return page;
1956}
1957
1958static inline
1959void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1960 enum zone_type high_zoneidx,
1961 enum zone_type classzone_idx)
1962{
1963 struct zoneref *z;
1964 struct zone *zone;
1965
1966 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1967 wakeup_kswapd(zone, order, classzone_idx);
1968}
1969
1970static inline int
1971gfp_to_alloc_flags(gfp_t gfp_mask)
1972{
1973 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1974 const gfp_t wait = gfp_mask & __GFP_WAIT;
1975
1976
1977 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
1978
1979
1980
1981
1982
1983
1984
1985 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1986
1987 if (!wait) {
1988
1989
1990
1991
1992 if (!(gfp_mask & __GFP_NOMEMALLOC))
1993 alloc_flags |= ALLOC_HARDER;
1994
1995
1996
1997
1998 alloc_flags &= ~ALLOC_CPUSET;
1999 } else if (unlikely(rt_task(current)) && !in_interrupt())
2000 alloc_flags |= ALLOC_HARDER;
2001
2002 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2003 if (!in_interrupt() &&
2004 ((current->flags & PF_MEMALLOC) ||
2005 unlikely(test_thread_flag(TIF_MEMDIE))))
2006 alloc_flags |= ALLOC_NO_WATERMARKS;
2007 }
2008
2009 return alloc_flags;
2010}
2011
2012static inline struct page *
2013__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2014 struct zonelist *zonelist, enum zone_type high_zoneidx,
2015 nodemask_t *nodemask, struct zone *preferred_zone,
2016 int migratetype)
2017{
2018 const gfp_t wait = gfp_mask & __GFP_WAIT;
2019 struct page *page = NULL;
2020 int alloc_flags;
2021 unsigned long pages_reclaimed = 0;
2022 unsigned long did_some_progress;
2023 bool sync_migration = false;
2024
2025
2026
2027
2028
2029
2030
2031 if (order >= MAX_ORDER) {
2032 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2033 return NULL;
2034 }
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2045 goto nopage;
2046
2047restart:
2048 if (!(gfp_mask & __GFP_NO_KSWAPD))
2049 wake_all_kswapd(order, zonelist, high_zoneidx,
2050 zone_idx(preferred_zone));
2051
2052
2053
2054
2055
2056
2057 alloc_flags = gfp_to_alloc_flags(gfp_mask);
2058
2059
2060
2061
2062
2063 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2064 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2065 &preferred_zone);
2066
2067
2068 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2069 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2070 preferred_zone, migratetype);
2071 if (page)
2072 goto got_pg;
2073
2074rebalance:
2075
2076 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2077 page = __alloc_pages_high_priority(gfp_mask, order,
2078 zonelist, high_zoneidx, nodemask,
2079 preferred_zone, migratetype);
2080 if (page)
2081 goto got_pg;
2082 }
2083
2084
2085 if (!wait)
2086 goto nopage;
2087
2088
2089 if (current->flags & PF_MEMALLOC)
2090 goto nopage;
2091
2092
2093 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2094 goto nopage;
2095
2096
2097
2098
2099
2100 page = __alloc_pages_direct_compact(gfp_mask, order,
2101 zonelist, high_zoneidx,
2102 nodemask,
2103 alloc_flags, preferred_zone,
2104 migratetype, &did_some_progress,
2105 sync_migration);
2106 if (page)
2107 goto got_pg;
2108 sync_migration = !(gfp_mask & __GFP_NO_KSWAPD);
2109
2110
2111 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2112 zonelist, high_zoneidx,
2113 nodemask,
2114 alloc_flags, preferred_zone,
2115 migratetype, &did_some_progress);
2116 if (page)
2117 goto got_pg;
2118
2119
2120
2121
2122
2123 if (!did_some_progress) {
2124 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2125 if (oom_killer_disabled)
2126 goto nopage;
2127 page = __alloc_pages_may_oom(gfp_mask, order,
2128 zonelist, high_zoneidx,
2129 nodemask, preferred_zone,
2130 migratetype);
2131 if (page)
2132 goto got_pg;
2133
2134 if (!(gfp_mask & __GFP_NOFAIL)) {
2135
2136
2137
2138
2139
2140
2141 if (order > PAGE_ALLOC_COSTLY_ORDER)
2142 goto nopage;
2143
2144
2145
2146
2147
2148 if (high_zoneidx < ZONE_NORMAL)
2149 goto nopage;
2150 }
2151
2152 goto restart;
2153 }
2154 }
2155
2156
2157 pages_reclaimed += did_some_progress;
2158 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2159
2160 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2161 goto rebalance;
2162 } else {
2163
2164
2165
2166
2167
2168 page = __alloc_pages_direct_compact(gfp_mask, order,
2169 zonelist, high_zoneidx,
2170 nodemask,
2171 alloc_flags, preferred_zone,
2172 migratetype, &did_some_progress,
2173 sync_migration);
2174 if (page)
2175 goto got_pg;
2176 }
2177
2178nopage:
2179 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2180 unsigned int filter = SHOW_MEM_FILTER_NODES;
2181
2182
2183
2184
2185
2186
2187 if (!(gfp_mask & __GFP_NOMEMALLOC))
2188 if (test_thread_flag(TIF_MEMDIE) ||
2189 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2190 filter &= ~SHOW_MEM_FILTER_NODES;
2191 if (in_interrupt() || !wait)
2192 filter &= ~SHOW_MEM_FILTER_NODES;
2193
2194 pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n",
2195 current->comm, order, gfp_mask);
2196 dump_stack();
2197 if (!should_suppress_show_mem())
2198 show_mem(filter);
2199 }
2200 return page;
2201got_pg:
2202 if (kmemcheck_enabled)
2203 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2204 return page;
2205
2206}
2207
2208
2209
2210
2211struct page *
2212__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2213 struct zonelist *zonelist, nodemask_t *nodemask)
2214{
2215 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2216 struct zone *preferred_zone;
2217 struct page *page;
2218 int migratetype = allocflags_to_migratetype(gfp_mask);
2219
2220 gfp_mask &= gfp_allowed_mask;
2221
2222 lockdep_trace_alloc(gfp_mask);
2223
2224 might_sleep_if(gfp_mask & __GFP_WAIT);
2225
2226 if (should_fail_alloc_page(gfp_mask, order))
2227 return NULL;
2228
2229
2230
2231
2232
2233
2234 if (unlikely(!zonelist->_zonerefs->zone))
2235 return NULL;
2236
2237 get_mems_allowed();
2238
2239 first_zones_zonelist(zonelist, high_zoneidx,
2240 nodemask ? : &cpuset_current_mems_allowed,
2241 &preferred_zone);
2242 if (!preferred_zone) {
2243 put_mems_allowed();
2244 return NULL;
2245 }
2246
2247
2248 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2249 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2250 preferred_zone, migratetype);
2251 if (unlikely(!page))
2252 page = __alloc_pages_slowpath(gfp_mask, order,
2253 zonelist, high_zoneidx, nodemask,
2254 preferred_zone, migratetype);
2255 put_mems_allowed();
2256
2257 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2258 return page;
2259}
2260EXPORT_SYMBOL(__alloc_pages_nodemask);
2261
2262
2263
2264
2265unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2266{
2267 struct page *page;
2268
2269
2270
2271
2272
2273 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2274
2275 page = alloc_pages(gfp_mask, order);
2276 if (!page)
2277 return 0;
2278 return (unsigned long) page_address(page);
2279}
2280EXPORT_SYMBOL(__get_free_pages);
2281
2282unsigned long get_zeroed_page(gfp_t gfp_mask)
2283{
2284 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2285}
2286EXPORT_SYMBOL(get_zeroed_page);
2287
2288void __pagevec_free(struct pagevec *pvec)
2289{
2290 int i = pagevec_count(pvec);
2291
2292 while (--i >= 0) {
2293 trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2294 free_hot_cold_page(pvec->pages[i], pvec->cold);
2295 }
2296}
2297
2298void __free_pages(struct page *page, unsigned int order)
2299{
2300 if (put_page_testzero(page)) {
2301 if (order == 0)
2302 free_hot_cold_page(page, 0);
2303 else
2304 __free_pages_ok(page, order);
2305 }
2306}
2307
2308EXPORT_SYMBOL(__free_pages);
2309
2310void free_pages(unsigned long addr, unsigned int order)
2311{
2312 if (addr != 0) {
2313 VM_BUG_ON(!virt_addr_valid((void *)addr));
2314 __free_pages(virt_to_page((void *)addr), order);
2315 }
2316}
2317
2318EXPORT_SYMBOL(free_pages);
2319
2320static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2321{
2322 if (addr) {
2323 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2324 unsigned long used = addr + PAGE_ALIGN(size);
2325
2326 split_page(virt_to_page((void *)addr), order);
2327 while (used < alloc_end) {
2328 free_page(used);
2329 used += PAGE_SIZE;
2330 }
2331 }
2332 return (void *)addr;
2333}
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2349{
2350 unsigned int order = get_order(size);
2351 unsigned long addr;
2352
2353 addr = __get_free_pages(gfp_mask, order);
2354 return make_alloc_exact(addr, order, size);
2355}
2356EXPORT_SYMBOL(alloc_pages_exact);
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2371{
2372 unsigned order = get_order(size);
2373 struct page *p = alloc_pages_node(nid, gfp_mask, order);
2374 if (!p)
2375 return NULL;
2376 return make_alloc_exact((unsigned long)page_address(p), order, size);
2377}
2378EXPORT_SYMBOL(alloc_pages_exact_nid);
2379
2380
2381
2382
2383
2384
2385
2386
2387void free_pages_exact(void *virt, size_t size)
2388{
2389 unsigned long addr = (unsigned long)virt;
2390 unsigned long end = addr + PAGE_ALIGN(size);
2391
2392 while (addr < end) {
2393 free_page(addr);
2394 addr += PAGE_SIZE;
2395 }
2396}
2397EXPORT_SYMBOL(free_pages_exact);
2398
2399static unsigned int nr_free_zone_pages(int offset)
2400{
2401 struct zoneref *z;
2402 struct zone *zone;
2403
2404
2405 unsigned int sum = 0;
2406
2407 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2408
2409 for_each_zone_zonelist(zone, z, zonelist, offset) {
2410 unsigned long size = zone->present_pages;
2411 unsigned long high = high_wmark_pages(zone);
2412 if (size > high)
2413 sum += size - high;
2414 }
2415
2416 return sum;
2417}
2418
2419
2420
2421
2422unsigned int nr_free_buffer_pages(void)
2423{
2424 return nr_free_zone_pages(gfp_zone(GFP_USER));
2425}
2426EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2427
2428
2429
2430
2431unsigned int nr_free_pagecache_pages(void)
2432{
2433 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2434}
2435
2436static inline void show_node(struct zone *zone)
2437{
2438 if (NUMA_BUILD)
2439 printk("Node %d ", zone_to_nid(zone));
2440}
2441
2442void si_meminfo(struct sysinfo *val)
2443{
2444 val->totalram = totalram_pages;
2445 val->sharedram = 0;
2446 val->freeram = global_page_state(NR_FREE_PAGES);
2447 val->bufferram = nr_blockdev_pages();
2448 val->totalhigh = totalhigh_pages;
2449 val->freehigh = nr_free_highpages();
2450 val->mem_unit = PAGE_SIZE;
2451}
2452
2453EXPORT_SYMBOL(si_meminfo);
2454
2455#ifdef CONFIG_NUMA
2456void si_meminfo_node(struct sysinfo *val, int nid)
2457{
2458 pg_data_t *pgdat = NODE_DATA(nid);
2459
2460 val->totalram = pgdat->node_present_pages;
2461 val->freeram = node_page_state(nid, NR_FREE_PAGES);
2462#ifdef CONFIG_HIGHMEM
2463 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2464 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2465 NR_FREE_PAGES);
2466#else
2467 val->totalhigh = 0;
2468 val->freehigh = 0;
2469#endif
2470 val->mem_unit = PAGE_SIZE;
2471}
2472#endif
2473
2474
2475
2476
2477
2478static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone)
2479{
2480 bool ret = false;
2481
2482 if (!(flags & SHOW_MEM_FILTER_NODES))
2483 goto out;
2484
2485 get_mems_allowed();
2486 ret = !node_isset(zone->zone_pgdat->node_id,
2487 cpuset_current_mems_allowed);
2488 put_mems_allowed();
2489out:
2490 return ret;
2491}
2492
2493#define K(x) ((x) << (PAGE_SHIFT-10))
2494
2495
2496
2497
2498
2499
2500
2501
2502void __show_free_areas(unsigned int filter)
2503{
2504 int cpu;
2505 struct zone *zone;
2506
2507 for_each_populated_zone(zone) {
2508 if (skip_free_areas_zone(filter, zone))
2509 continue;
2510 show_node(zone);
2511 printk("%s per-cpu:\n", zone->name);
2512
2513 for_each_online_cpu(cpu) {
2514 struct per_cpu_pageset *pageset;
2515
2516 pageset = per_cpu_ptr(zone->pageset, cpu);
2517
2518 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2519 cpu, pageset->pcp.high,
2520 pageset->pcp.batch, pageset->pcp.count);
2521 }
2522 }
2523
2524 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2525 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2526 " unevictable:%lu"
2527 " dirty:%lu writeback:%lu unstable:%lu\n"
2528 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2529 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2530 global_page_state(NR_ACTIVE_ANON),
2531 global_page_state(NR_INACTIVE_ANON),
2532 global_page_state(NR_ISOLATED_ANON),
2533 global_page_state(NR_ACTIVE_FILE),
2534 global_page_state(NR_INACTIVE_FILE),
2535 global_page_state(NR_ISOLATED_FILE),
2536 global_page_state(NR_UNEVICTABLE),
2537 global_page_state(NR_FILE_DIRTY),
2538 global_page_state(NR_WRITEBACK),
2539 global_page_state(NR_UNSTABLE_NFS),
2540 global_page_state(NR_FREE_PAGES),
2541 global_page_state(NR_SLAB_RECLAIMABLE),
2542 global_page_state(NR_SLAB_UNRECLAIMABLE),
2543 global_page_state(NR_FILE_MAPPED),
2544 global_page_state(NR_SHMEM),
2545 global_page_state(NR_PAGETABLE),
2546 global_page_state(NR_BOUNCE));
2547
2548 for_each_populated_zone(zone) {
2549 int i;
2550
2551 if (skip_free_areas_zone(filter, zone))
2552 continue;
2553 show_node(zone);
2554 printk("%s"
2555 " free:%lukB"
2556 " min:%lukB"
2557 " low:%lukB"
2558 " high:%lukB"
2559 " active_anon:%lukB"
2560 " inactive_anon:%lukB"
2561 " active_file:%lukB"
2562 " inactive_file:%lukB"
2563 " unevictable:%lukB"
2564 " isolated(anon):%lukB"
2565 " isolated(file):%lukB"
2566 " present:%lukB"
2567 " mlocked:%lukB"
2568 " dirty:%lukB"
2569 " writeback:%lukB"
2570 " mapped:%lukB"
2571 " shmem:%lukB"
2572 " slab_reclaimable:%lukB"
2573 " slab_unreclaimable:%lukB"
2574 " kernel_stack:%lukB"
2575 " pagetables:%lukB"
2576 " unstable:%lukB"
2577 " bounce:%lukB"
2578 " writeback_tmp:%lukB"
2579 " pages_scanned:%lu"
2580 " all_unreclaimable? %s"
2581 "\n",
2582 zone->name,
2583 K(zone_page_state(zone, NR_FREE_PAGES)),
2584 K(min_wmark_pages(zone)),
2585 K(low_wmark_pages(zone)),
2586 K(high_wmark_pages(zone)),
2587 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2588 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2589 K(zone_page_state(zone, NR_ACTIVE_FILE)),
2590 K(zone_page_state(zone, NR_INACTIVE_FILE)),
2591 K(zone_page_state(zone, NR_UNEVICTABLE)),
2592 K(zone_page_state(zone, NR_ISOLATED_ANON)),
2593 K(zone_page_state(zone, NR_ISOLATED_FILE)),
2594 K(zone->present_pages),
2595 K(zone_page_state(zone, NR_MLOCK)),
2596 K(zone_page_state(zone, NR_FILE_DIRTY)),
2597 K(zone_page_state(zone, NR_WRITEBACK)),
2598 K(zone_page_state(zone, NR_FILE_MAPPED)),
2599 K(zone_page_state(zone, NR_SHMEM)),
2600 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2601 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2602 zone_page_state(zone, NR_KERNEL_STACK) *
2603 THREAD_SIZE / 1024,
2604 K(zone_page_state(zone, NR_PAGETABLE)),
2605 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2606 K(zone_page_state(zone, NR_BOUNCE)),
2607 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2608 zone->pages_scanned,
2609 (zone->all_unreclaimable ? "yes" : "no")
2610 );
2611 printk("lowmem_reserve[]:");
2612 for (i = 0; i < MAX_NR_ZONES; i++)
2613 printk(" %lu", zone->lowmem_reserve[i]);
2614 printk("\n");
2615 }
2616
2617 for_each_populated_zone(zone) {
2618 unsigned long nr[MAX_ORDER], flags, order, total = 0;
2619
2620 if (skip_free_areas_zone(filter, zone))
2621 continue;
2622 show_node(zone);
2623 printk("%s: ", zone->name);
2624
2625 spin_lock_irqsave(&zone->lock, flags);
2626 for (order = 0; order < MAX_ORDER; order++) {
2627 nr[order] = zone->free_area[order].nr_free;
2628 total += nr[order] << order;
2629 }
2630 spin_unlock_irqrestore(&zone->lock, flags);
2631 for (order = 0; order < MAX_ORDER; order++)
2632 printk("%lu*%lukB ", nr[order], K(1UL) << order);
2633 printk("= %lukB\n", K(total));
2634 }
2635
2636 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2637
2638 show_swap_cache_info();
2639}
2640
2641void show_free_areas(void)
2642{
2643 __show_free_areas(0);
2644}
2645
2646static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2647{
2648 zoneref->zone = zone;
2649 zoneref->zone_idx = zone_idx(zone);
2650}
2651
2652
2653
2654
2655
2656
2657static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2658 int nr_zones, enum zone_type zone_type)
2659{
2660 struct zone *zone;
2661
2662 BUG_ON(zone_type >= MAX_NR_ZONES);
2663 zone_type++;
2664
2665 do {
2666 zone_type--;
2667 zone = pgdat->node_zones + zone_type;
2668 if (populated_zone(zone)) {
2669 zoneref_set_zone(zone,
2670 &zonelist->_zonerefs[nr_zones++]);
2671 check_highest_zone(zone_type);
2672 }
2673
2674 } while (zone_type);
2675 return nr_zones;
2676}
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688#define ZONELIST_ORDER_DEFAULT 0
2689#define ZONELIST_ORDER_NODE 1
2690#define ZONELIST_ORDER_ZONE 2
2691
2692
2693
2694
2695static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2696static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2697
2698
2699#ifdef CONFIG_NUMA
2700
2701static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2702
2703#define NUMA_ZONELIST_ORDER_LEN 16
2704char numa_zonelist_order[16] = "default";
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714static int __parse_numa_zonelist_order(char *s)
2715{
2716 if (*s == 'd' || *s == 'D') {
2717 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2718 } else if (*s == 'n' || *s == 'N') {
2719 user_zonelist_order = ZONELIST_ORDER_NODE;
2720 } else if (*s == 'z' || *s == 'Z') {
2721 user_zonelist_order = ZONELIST_ORDER_ZONE;
2722 } else {
2723 printk(KERN_WARNING
2724 "Ignoring invalid numa_zonelist_order value: "
2725 "%s\n", s);
2726 return -EINVAL;
2727 }
2728 return 0;
2729}
2730
2731static __init int setup_numa_zonelist_order(char *s)
2732{
2733 int ret;
2734
2735 if (!s)
2736 return 0;
2737
2738 ret = __parse_numa_zonelist_order(s);
2739 if (ret == 0)
2740 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2741
2742 return ret;
2743}
2744early_param("numa_zonelist_order", setup_numa_zonelist_order);
2745
2746
2747
2748
2749int numa_zonelist_order_handler(ctl_table *table, int write,
2750 void __user *buffer, size_t *length,
2751 loff_t *ppos)
2752{
2753 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2754 int ret;
2755 static DEFINE_MUTEX(zl_order_mutex);
2756
2757 mutex_lock(&zl_order_mutex);
2758 if (write)
2759 strcpy(saved_string, (char*)table->data);
2760 ret = proc_dostring(table, write, buffer, length, ppos);
2761 if (ret)
2762 goto out;
2763 if (write) {
2764 int oldval = user_zonelist_order;
2765 if (__parse_numa_zonelist_order((char*)table->data)) {
2766
2767
2768
2769 strncpy((char*)table->data, saved_string,
2770 NUMA_ZONELIST_ORDER_LEN);
2771 user_zonelist_order = oldval;
2772 } else if (oldval != user_zonelist_order) {
2773 mutex_lock(&zonelists_mutex);
2774 build_all_zonelists(NULL);
2775 mutex_unlock(&zonelists_mutex);
2776 }
2777 }
2778out:
2779 mutex_unlock(&zl_order_mutex);
2780 return ret;
2781}
2782
2783
2784#define MAX_NODE_LOAD (nr_online_nodes)
2785static int node_load[MAX_NUMNODES];
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801static int find_next_best_node(int node, nodemask_t *used_node_mask)
2802{
2803 int n, val;
2804 int min_val = INT_MAX;
2805 int best_node = -1;
2806 const struct cpumask *tmp = cpumask_of_node(0);
2807
2808
2809 if (!node_isset(node, *used_node_mask)) {
2810 node_set(node, *used_node_mask);
2811 return node;
2812 }
2813
2814 for_each_node_state(n, N_HIGH_MEMORY) {
2815
2816
2817 if (node_isset(n, *used_node_mask))
2818 continue;
2819
2820
2821 val = node_distance(node, n);
2822
2823
2824 val += (n < node);
2825
2826
2827 tmp = cpumask_of_node(n);
2828 if (!cpumask_empty(tmp))
2829 val += PENALTY_FOR_NODE_WITH_CPUS;
2830
2831
2832 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2833 val += node_load[n];
2834
2835 if (val < min_val) {
2836 min_val = val;
2837 best_node = n;
2838 }
2839 }
2840
2841 if (best_node >= 0)
2842 node_set(best_node, *used_node_mask);
2843
2844 return best_node;
2845}
2846
2847
2848
2849
2850
2851
2852
2853static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2854{
2855 int j;
2856 struct zonelist *zonelist;
2857
2858 zonelist = &pgdat->node_zonelists[0];
2859 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2860 ;
2861 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2862 MAX_NR_ZONES - 1);
2863 zonelist->_zonerefs[j].zone = NULL;
2864 zonelist->_zonerefs[j].zone_idx = 0;
2865}
2866
2867
2868
2869
2870static void build_thisnode_zonelists(pg_data_t *pgdat)
2871{
2872 int j;
2873 struct zonelist *zonelist;
2874
2875 zonelist = &pgdat->node_zonelists[1];
2876 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2877 zonelist->_zonerefs[j].zone = NULL;
2878 zonelist->_zonerefs[j].zone_idx = 0;
2879}
2880
2881
2882
2883
2884
2885
2886
2887static int node_order[MAX_NUMNODES];
2888
2889static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2890{
2891 int pos, j, node;
2892 int zone_type;
2893 struct zone *z;
2894 struct zonelist *zonelist;
2895
2896 zonelist = &pgdat->node_zonelists[0];
2897 pos = 0;
2898 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2899 for (j = 0; j < nr_nodes; j++) {
2900 node = node_order[j];
2901 z = &NODE_DATA(node)->node_zones[zone_type];
2902 if (populated_zone(z)) {
2903 zoneref_set_zone(z,
2904 &zonelist->_zonerefs[pos++]);
2905 check_highest_zone(zone_type);
2906 }
2907 }
2908 }
2909 zonelist->_zonerefs[pos].zone = NULL;
2910 zonelist->_zonerefs[pos].zone_idx = 0;
2911}
2912
2913static int default_zonelist_order(void)
2914{
2915 int nid, zone_type;
2916 unsigned long low_kmem_size,total_size;
2917 struct zone *z;
2918 int average_size;
2919
2920
2921
2922
2923
2924
2925
2926 low_kmem_size = 0;
2927 total_size = 0;
2928 for_each_online_node(nid) {
2929 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2930 z = &NODE_DATA(nid)->node_zones[zone_type];
2931 if (populated_zone(z)) {
2932 if (zone_type < ZONE_NORMAL)
2933 low_kmem_size += z->present_pages;
2934 total_size += z->present_pages;
2935 } else if (zone_type == ZONE_NORMAL) {
2936
2937
2938
2939
2940
2941
2942
2943 return ZONELIST_ORDER_NODE;
2944 }
2945 }
2946 }
2947 if (!low_kmem_size ||
2948 low_kmem_size > total_size/2)
2949 return ZONELIST_ORDER_NODE;
2950
2951
2952
2953
2954
2955 average_size = total_size /
2956 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2957 for_each_online_node(nid) {
2958 low_kmem_size = 0;
2959 total_size = 0;
2960 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2961 z = &NODE_DATA(nid)->node_zones[zone_type];
2962 if (populated_zone(z)) {
2963 if (zone_type < ZONE_NORMAL)
2964 low_kmem_size += z->present_pages;
2965 total_size += z->present_pages;
2966 }
2967 }
2968 if (low_kmem_size &&
2969 total_size > average_size &&
2970 low_kmem_size > total_size * 70/100)
2971 return ZONELIST_ORDER_NODE;
2972 }
2973 return ZONELIST_ORDER_ZONE;
2974}
2975
2976static void set_zonelist_order(void)
2977{
2978 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2979 current_zonelist_order = default_zonelist_order();
2980 else
2981 current_zonelist_order = user_zonelist_order;
2982}
2983
2984static void build_zonelists(pg_data_t *pgdat)
2985{
2986 int j, node, load;
2987 enum zone_type i;
2988 nodemask_t used_mask;
2989 int local_node, prev_node;
2990 struct zonelist *zonelist;
2991 int order = current_zonelist_order;
2992
2993
2994 for (i = 0; i < MAX_ZONELISTS; i++) {
2995 zonelist = pgdat->node_zonelists + i;
2996 zonelist->_zonerefs[0].zone = NULL;
2997 zonelist->_zonerefs[0].zone_idx = 0;
2998 }
2999
3000
3001 local_node = pgdat->node_id;
3002 load = nr_online_nodes;
3003 prev_node = local_node;
3004 nodes_clear(used_mask);
3005
3006 memset(node_order, 0, sizeof(node_order));
3007 j = 0;
3008
3009 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3010 int distance = node_distance(local_node, node);
3011
3012
3013
3014
3015
3016 if (distance > RECLAIM_DISTANCE)
3017 zone_reclaim_mode = 1;
3018
3019
3020
3021
3022
3023
3024 if (distance != node_distance(local_node, prev_node))
3025 node_load[node] = load;
3026
3027 prev_node = node;
3028 load--;
3029 if (order == ZONELIST_ORDER_NODE)
3030 build_zonelists_in_node_order(pgdat, node);
3031 else
3032 node_order[j++] = node;
3033 }
3034
3035 if (order == ZONELIST_ORDER_ZONE) {
3036
3037 build_zonelists_in_zone_order(pgdat, j);
3038 }
3039
3040 build_thisnode_zonelists(pgdat);
3041}
3042
3043
3044static void build_zonelist_cache(pg_data_t *pgdat)
3045{
3046 struct zonelist *zonelist;
3047 struct zonelist_cache *zlc;
3048 struct zoneref *z;
3049
3050 zonelist = &pgdat->node_zonelists[0];
3051 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3052 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3053 for (z = zonelist->_zonerefs; z->zone; z++)
3054 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3055}
3056
3057#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3058
3059
3060
3061
3062
3063
3064int local_memory_node(int node)
3065{
3066 struct zone *zone;
3067
3068 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3069 gfp_zone(GFP_KERNEL),
3070 NULL,
3071 &zone);
3072 return zone->node;
3073}
3074#endif
3075
3076#else
3077
3078static void set_zonelist_order(void)
3079{
3080 current_zonelist_order = ZONELIST_ORDER_ZONE;
3081}
3082
3083static void build_zonelists(pg_data_t *pgdat)
3084{
3085 int node, local_node;
3086 enum zone_type j;
3087 struct zonelist *zonelist;
3088
3089 local_node = pgdat->node_id;
3090
3091 zonelist = &pgdat->node_zonelists[0];
3092 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3103 if (!node_online(node))
3104 continue;
3105 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3106 MAX_NR_ZONES - 1);
3107 }
3108 for (node = 0; node < local_node; node++) {
3109 if (!node_online(node))
3110 continue;
3111 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3112 MAX_NR_ZONES - 1);
3113 }
3114
3115 zonelist->_zonerefs[j].zone = NULL;
3116 zonelist->_zonerefs[j].zone_idx = 0;
3117}
3118
3119
3120static void build_zonelist_cache(pg_data_t *pgdat)
3121{
3122 pgdat->node_zonelists[0].zlcache_ptr = NULL;
3123}
3124
3125#endif
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3143static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3144static void setup_zone_pageset(struct zone *zone);
3145
3146
3147
3148
3149
3150DEFINE_MUTEX(zonelists_mutex);
3151
3152
3153static __init_refok int __build_all_zonelists(void *data)
3154{
3155 int nid;
3156 int cpu;
3157
3158#ifdef CONFIG_NUMA
3159 memset(node_load, 0, sizeof(node_load));
3160#endif
3161 for_each_online_node(nid) {
3162 pg_data_t *pgdat = NODE_DATA(nid);
3163
3164 build_zonelists(pgdat);
3165 build_zonelist_cache(pgdat);
3166 }
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181 for_each_possible_cpu(cpu) {
3182 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3183
3184#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3185
3186
3187
3188
3189
3190
3191
3192
3193 if (cpu_online(cpu))
3194 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3195#endif
3196 }
3197
3198 return 0;
3199}
3200
3201
3202
3203
3204
3205void __ref build_all_zonelists(void *data)
3206{
3207 set_zonelist_order();
3208
3209 if (system_state == SYSTEM_BOOTING) {
3210 __build_all_zonelists(NULL);
3211 mminit_verify_zonelist();
3212 cpuset_init_current_mems_allowed();
3213 } else {
3214
3215
3216#ifdef CONFIG_MEMORY_HOTPLUG
3217 if (data)
3218 setup_zone_pageset((struct zone *)data);
3219#endif
3220 stop_machine(__build_all_zonelists, NULL, NULL);
3221
3222 }
3223 vm_total_pages = nr_free_pagecache_pages();
3224
3225
3226
3227
3228
3229
3230
3231 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3232 page_group_by_mobility_disabled = 1;
3233 else
3234 page_group_by_mobility_disabled = 0;
3235
3236 printk("Built %i zonelists in %s order, mobility grouping %s. "
3237 "Total pages: %ld\n",
3238 nr_online_nodes,
3239 zonelist_order_name[current_zonelist_order],
3240 page_group_by_mobility_disabled ? "off" : "on",
3241 vm_total_pages);
3242#ifdef CONFIG_NUMA
3243 printk("Policy zone: %s\n", zone_names[policy_zone]);
3244#endif
3245}
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258#define PAGES_PER_WAITQUEUE 256
3259
3260#ifndef CONFIG_MEMORY_HOTPLUG
3261static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3262{
3263 unsigned long size = 1;
3264
3265 pages /= PAGES_PER_WAITQUEUE;
3266
3267 while (size < pages)
3268 size <<= 1;
3269
3270
3271
3272
3273
3274
3275 size = min(size, 4096UL);
3276
3277 return max(size, 4UL);
3278}
3279#else
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3298{
3299 return 4096UL;
3300}
3301#endif
3302
3303
3304
3305
3306
3307
3308static inline unsigned long wait_table_bits(unsigned long size)
3309{
3310 return ffz(~size);
3311}
3312
3313#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3314
3315
3316
3317
3318
3319
3320
3321
3322static void setup_zone_migrate_reserve(struct zone *zone)
3323{
3324 unsigned long start_pfn, pfn, end_pfn;
3325 struct page *page;
3326 unsigned long block_migratetype;
3327 int reserve;
3328
3329
3330 start_pfn = zone->zone_start_pfn;
3331 end_pfn = start_pfn + zone->spanned_pages;
3332 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3333 pageblock_order;
3334
3335
3336
3337
3338
3339
3340
3341
3342 reserve = min(2, reserve);
3343
3344 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3345 if (!pfn_valid(pfn))
3346 continue;
3347 page = pfn_to_page(pfn);
3348
3349
3350 if (page_to_nid(page) != zone_to_nid(zone))
3351 continue;
3352
3353
3354 if (PageReserved(page))
3355 continue;
3356
3357 block_migratetype = get_pageblock_migratetype(page);
3358
3359
3360 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3361 reserve--;
3362 continue;
3363 }
3364
3365
3366 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3367 set_pageblock_migratetype(page, MIGRATE_RESERVE);
3368 move_freepages_block(zone, page, MIGRATE_RESERVE);
3369 reserve--;
3370 continue;
3371 }
3372
3373
3374
3375
3376
3377 if (block_migratetype == MIGRATE_RESERVE) {
3378 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3379 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3380 }
3381 }
3382}
3383
3384
3385
3386
3387
3388
3389void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3390 unsigned long start_pfn, enum memmap_context context)
3391{
3392 struct page *page;
3393 unsigned long end_pfn = start_pfn + size;
3394 unsigned long pfn;
3395 struct zone *z;
3396
3397 if (highest_memmap_pfn < end_pfn - 1)
3398 highest_memmap_pfn = end_pfn - 1;
3399
3400 z = &NODE_DATA(nid)->node_zones[zone];
3401 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3402
3403
3404
3405
3406
3407 if (context == MEMMAP_EARLY) {
3408 if (!early_pfn_valid(pfn))
3409 continue;
3410 if (!early_pfn_in_nid(pfn, nid))
3411 continue;
3412 }
3413 page = pfn_to_page(pfn);
3414 set_page_links(page, zone, nid, pfn);
3415 mminit_verify_page_links(page, zone, nid, pfn);
3416 init_page_count(page);
3417 reset_page_mapcount(page);
3418 SetPageReserved(page);
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433 if ((z->zone_start_pfn <= pfn)
3434 && (pfn < z->zone_start_pfn + z->spanned_pages)
3435 && !(pfn & (pageblock_nr_pages - 1)))
3436 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3437
3438 INIT_LIST_HEAD(&page->lru);
3439#ifdef WANT_PAGE_VIRTUAL
3440
3441 if (!is_highmem_idx(zone))
3442 set_page_address(page, __va(pfn << PAGE_SHIFT));
3443#endif
3444 }
3445}
3446
3447static void __meminit zone_init_free_lists(struct zone *zone)
3448{
3449 int order, t;
3450 for_each_migratetype_order(order, t) {
3451 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3452 zone->free_area[order].nr_free = 0;
3453 }
3454}
3455
3456#ifndef __HAVE_ARCH_MEMMAP_INIT
3457#define memmap_init(size, nid, zone, start_pfn) \
3458 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3459#endif
3460
3461static int zone_batchsize(struct zone *zone)
3462{
3463#ifdef CONFIG_MMU
3464 int batch;
3465
3466
3467
3468
3469
3470
3471
3472 batch = zone->present_pages / 1024;
3473 if (batch * PAGE_SIZE > 512 * 1024)
3474 batch = (512 * 1024) / PAGE_SIZE;
3475 batch /= 4;
3476 if (batch < 1)
3477 batch = 1;
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489 batch = rounddown_pow_of_two(batch + batch/2) - 1;
3490
3491 return batch;
3492
3493#else
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507 return 0;
3508#endif
3509}
3510
3511static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3512{
3513 struct per_cpu_pages *pcp;
3514 int migratetype;
3515
3516 memset(p, 0, sizeof(*p));
3517
3518 pcp = &p->pcp;
3519 pcp->count = 0;
3520 pcp->high = 6 * batch;
3521 pcp->batch = max(1UL, 1 * batch);
3522 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3523 INIT_LIST_HEAD(&pcp->lists[migratetype]);
3524}
3525
3526
3527
3528
3529
3530
3531static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3532 unsigned long high)
3533{
3534 struct per_cpu_pages *pcp;
3535
3536 pcp = &p->pcp;
3537 pcp->high = high;
3538 pcp->batch = max(1UL, high/4);
3539 if ((high/4) > (PAGE_SHIFT * 8))
3540 pcp->batch = PAGE_SHIFT * 8;
3541}
3542
3543static __meminit void setup_zone_pageset(struct zone *zone)
3544{
3545 int cpu;
3546
3547 zone->pageset = alloc_percpu(struct per_cpu_pageset);
3548
3549 for_each_possible_cpu(cpu) {
3550 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3551
3552 setup_pageset(pcp, zone_batchsize(zone));
3553
3554 if (percpu_pagelist_fraction)
3555 setup_pagelist_highmark(pcp,
3556 (zone->present_pages /
3557 percpu_pagelist_fraction));
3558 }
3559}
3560
3561
3562
3563
3564
3565void __init setup_per_cpu_pageset(void)
3566{
3567 struct zone *zone;
3568
3569 for_each_populated_zone(zone)
3570 setup_zone_pageset(zone);
3571}
3572
3573static noinline __init_refok
3574int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3575{
3576 int i;
3577 struct pglist_data *pgdat = zone->zone_pgdat;
3578 size_t alloc_size;
3579
3580
3581
3582
3583
3584 zone->wait_table_hash_nr_entries =
3585 wait_table_hash_nr_entries(zone_size_pages);
3586 zone->wait_table_bits =
3587 wait_table_bits(zone->wait_table_hash_nr_entries);
3588 alloc_size = zone->wait_table_hash_nr_entries
3589 * sizeof(wait_queue_head_t);
3590
3591 if (!slab_is_available()) {
3592 zone->wait_table = (wait_queue_head_t *)
3593 alloc_bootmem_node_nopanic(pgdat, alloc_size);
3594 } else {
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605 zone->wait_table = vmalloc(alloc_size);
3606 }
3607 if (!zone->wait_table)
3608 return -ENOMEM;
3609
3610 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3611 init_waitqueue_head(zone->wait_table + i);
3612
3613 return 0;
3614}
3615
3616static int __zone_pcp_update(void *data)
3617{
3618 struct zone *zone = data;
3619 int cpu;
3620 unsigned long batch = zone_batchsize(zone), flags;
3621
3622 for_each_possible_cpu(cpu) {
3623 struct per_cpu_pageset *pset;
3624 struct per_cpu_pages *pcp;
3625
3626 pset = per_cpu_ptr(zone->pageset, cpu);
3627 pcp = &pset->pcp;
3628
3629 local_irq_save(flags);
3630 free_pcppages_bulk(zone, pcp->count, pcp);
3631 setup_pageset(pset, batch);
3632 local_irq_restore(flags);
3633 }
3634 return 0;
3635}
3636
3637void zone_pcp_update(struct zone *zone)
3638{
3639 stop_machine(__zone_pcp_update, zone, NULL);
3640}
3641
3642static __meminit void zone_pcp_init(struct zone *zone)
3643{
3644
3645
3646
3647
3648
3649 zone->pageset = &boot_pageset;
3650
3651 if (zone->present_pages)
3652 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
3653 zone->name, zone->present_pages,
3654 zone_batchsize(zone));
3655}
3656
3657__meminit int init_currently_empty_zone(struct zone *zone,
3658 unsigned long zone_start_pfn,
3659 unsigned long size,
3660 enum memmap_context context)
3661{
3662 struct pglist_data *pgdat = zone->zone_pgdat;
3663 int ret;
3664 ret = zone_wait_table_init(zone, size);
3665 if (ret)
3666 return ret;
3667 pgdat->nr_zones = zone_idx(zone) + 1;
3668
3669 zone->zone_start_pfn = zone_start_pfn;
3670
3671 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3672 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3673 pgdat->node_id,
3674 (unsigned long)zone_idx(zone),
3675 zone_start_pfn, (zone_start_pfn + size));
3676
3677 zone_init_free_lists(zone);
3678
3679 return 0;
3680}
3681
3682#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3683
3684
3685
3686
3687static int __meminit first_active_region_index_in_nid(int nid)
3688{
3689 int i;
3690
3691 for (i = 0; i < nr_nodemap_entries; i++)
3692 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3693 return i;
3694
3695 return -1;
3696}
3697
3698
3699
3700
3701
3702static int __meminit next_active_region_index_in_nid(int index, int nid)
3703{
3704 for (index = index + 1; index < nr_nodemap_entries; index++)
3705 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3706 return index;
3707
3708 return -1;
3709}
3710
3711#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3712
3713
3714
3715
3716
3717
3718int __meminit __early_pfn_to_nid(unsigned long pfn)
3719{
3720 int i;
3721
3722 for (i = 0; i < nr_nodemap_entries; i++) {
3723 unsigned long start_pfn = early_node_map[i].start_pfn;
3724 unsigned long end_pfn = early_node_map[i].end_pfn;
3725
3726 if (start_pfn <= pfn && pfn < end_pfn)
3727 return early_node_map[i].nid;
3728 }
3729
3730 return -1;
3731}
3732#endif
3733
3734int __meminit early_pfn_to_nid(unsigned long pfn)
3735{
3736 int nid;
3737
3738 nid = __early_pfn_to_nid(pfn);
3739 if (nid >= 0)
3740 return nid;
3741
3742 return 0;
3743}
3744
3745#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3746bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3747{
3748 int nid;
3749
3750 nid = __early_pfn_to_nid(pfn);
3751 if (nid >= 0 && nid != node)
3752 return false;
3753 return true;
3754}
3755#endif
3756
3757
3758#define for_each_active_range_index_in_nid(i, nid) \
3759 for (i = first_active_region_index_in_nid(nid); i != -1; \
3760 i = next_active_region_index_in_nid(i, nid))
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771void __init free_bootmem_with_active_regions(int nid,
3772 unsigned long max_low_pfn)
3773{
3774 int i;
3775
3776 for_each_active_range_index_in_nid(i, nid) {
3777 unsigned long size_pages = 0;
3778 unsigned long end_pfn = early_node_map[i].end_pfn;
3779
3780 if (early_node_map[i].start_pfn >= max_low_pfn)
3781 continue;
3782
3783 if (end_pfn > max_low_pfn)
3784 end_pfn = max_low_pfn;
3785
3786 size_pages = end_pfn - early_node_map[i].start_pfn;
3787 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3788 PFN_PHYS(early_node_map[i].start_pfn),
3789 size_pages << PAGE_SHIFT);
3790 }
3791}
3792
3793#ifdef CONFIG_HAVE_MEMBLOCK
3794
3795
3796
3797
3798static int __meminit last_active_region_index_in_nid(int nid)
3799{
3800 int i;
3801
3802 for (i = nr_nodemap_entries - 1; i >= 0; i--)
3803 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3804 return i;
3805
3806 return -1;
3807}
3808
3809
3810
3811
3812
3813static int __meminit previous_active_region_index_in_nid(int index, int nid)
3814{
3815 for (index = index - 1; index >= 0; index--)
3816 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3817 return index;
3818
3819 return -1;
3820}
3821
3822#define for_each_active_range_index_in_nid_reverse(i, nid) \
3823 for (i = last_active_region_index_in_nid(nid); i != -1; \
3824 i = previous_active_region_index_in_nid(i, nid))
3825
3826u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3827 u64 goal, u64 limit)
3828{
3829 int i;
3830
3831
3832 for_each_active_range_index_in_nid_reverse(i, nid) {
3833 u64 addr;
3834 u64 ei_start, ei_last;
3835 u64 final_start, final_end;
3836
3837 ei_last = early_node_map[i].end_pfn;
3838 ei_last <<= PAGE_SHIFT;
3839 ei_start = early_node_map[i].start_pfn;
3840 ei_start <<= PAGE_SHIFT;
3841
3842 final_start = max(ei_start, goal);
3843 final_end = min(ei_last, limit);
3844
3845 if (final_start >= final_end)
3846 continue;
3847
3848 addr = memblock_find_in_range(final_start, final_end, size, align);
3849
3850 if (addr == MEMBLOCK_ERROR)
3851 continue;
3852
3853 return addr;
3854 }
3855
3856 return MEMBLOCK_ERROR;
3857}
3858#endif
3859
3860int __init add_from_early_node_map(struct range *range, int az,
3861 int nr_range, int nid)
3862{
3863 int i;
3864 u64 start, end;
3865
3866
3867 for_each_active_range_index_in_nid(i, nid) {
3868 start = early_node_map[i].start_pfn;
3869 end = early_node_map[i].end_pfn;
3870 nr_range = add_range(range, az, nr_range, start, end);
3871 }
3872 return nr_range;
3873}
3874
3875void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3876{
3877 int i;
3878 int ret;
3879
3880 for_each_active_range_index_in_nid(i, nid) {
3881 ret = work_fn(early_node_map[i].start_pfn,
3882 early_node_map[i].end_pfn, data);
3883 if (ret)
3884 break;
3885 }
3886}
3887
3888
3889
3890
3891
3892
3893
3894
3895void __init sparse_memory_present_with_active_regions(int nid)
3896{
3897 int i;
3898
3899 for_each_active_range_index_in_nid(i, nid)
3900 memory_present(early_node_map[i].nid,
3901 early_node_map[i].start_pfn,
3902 early_node_map[i].end_pfn);
3903}
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916void __meminit get_pfn_range_for_nid(unsigned int nid,
3917 unsigned long *start_pfn, unsigned long *end_pfn)
3918{
3919 int i;
3920 *start_pfn = -1UL;
3921 *end_pfn = 0;
3922
3923 for_each_active_range_index_in_nid(i, nid) {
3924 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3925 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3926 }
3927
3928 if (*start_pfn == -1UL)
3929 *start_pfn = 0;
3930}
3931
3932
3933
3934
3935
3936
3937static void __init find_usable_zone_for_movable(void)
3938{
3939 int zone_index;
3940 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3941 if (zone_index == ZONE_MOVABLE)
3942 continue;
3943
3944 if (arch_zone_highest_possible_pfn[zone_index] >
3945 arch_zone_lowest_possible_pfn[zone_index])
3946 break;
3947 }
3948
3949 VM_BUG_ON(zone_index == -1);
3950 movable_zone = zone_index;
3951}
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963static void __meminit adjust_zone_range_for_zone_movable(int nid,
3964 unsigned long zone_type,
3965 unsigned long node_start_pfn,
3966 unsigned long node_end_pfn,
3967 unsigned long *zone_start_pfn,
3968 unsigned long *zone_end_pfn)
3969{
3970
3971 if (zone_movable_pfn[nid]) {
3972
3973 if (zone_type == ZONE_MOVABLE) {
3974 *zone_start_pfn = zone_movable_pfn[nid];
3975 *zone_end_pfn = min(node_end_pfn,
3976 arch_zone_highest_possible_pfn[movable_zone]);
3977
3978
3979 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3980 *zone_end_pfn > zone_movable_pfn[nid]) {
3981 *zone_end_pfn = zone_movable_pfn[nid];
3982
3983
3984 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3985 *zone_start_pfn = *zone_end_pfn;
3986 }
3987}
3988
3989
3990
3991
3992
3993static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3994 unsigned long zone_type,
3995 unsigned long *ignored)
3996{
3997 unsigned long node_start_pfn, node_end_pfn;
3998 unsigned long zone_start_pfn, zone_end_pfn;
3999
4000
4001 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4002 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4003 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4004 adjust_zone_range_for_zone_movable(nid, zone_type,
4005 node_start_pfn, node_end_pfn,
4006 &zone_start_pfn, &zone_end_pfn);
4007
4008
4009 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4010 return 0;
4011
4012
4013 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4014 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4015
4016
4017 return zone_end_pfn - zone_start_pfn;
4018}
4019
4020
4021
4022
4023
4024unsigned long __meminit __absent_pages_in_range(int nid,
4025 unsigned long range_start_pfn,
4026 unsigned long range_end_pfn)
4027{
4028 int i = 0;
4029 unsigned long prev_end_pfn = 0, hole_pages = 0;
4030 unsigned long start_pfn;
4031
4032
4033 i = first_active_region_index_in_nid(nid);
4034 if (i == -1)
4035 return 0;
4036
4037 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4038
4039
4040 if (early_node_map[i].start_pfn > range_start_pfn)
4041 hole_pages = prev_end_pfn - range_start_pfn;
4042
4043
4044 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
4045
4046
4047 if (prev_end_pfn >= range_end_pfn)
4048 break;
4049
4050
4051 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4052 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
4053
4054
4055 if (start_pfn > range_start_pfn) {
4056 BUG_ON(prev_end_pfn > start_pfn);
4057 hole_pages += start_pfn - prev_end_pfn;
4058 }
4059 prev_end_pfn = early_node_map[i].end_pfn;
4060 }
4061
4062
4063 if (range_end_pfn > prev_end_pfn)
4064 hole_pages += range_end_pfn -
4065 max(range_start_pfn, prev_end_pfn);
4066
4067 return hole_pages;
4068}
4069
4070
4071
4072
4073
4074
4075
4076
4077unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4078 unsigned long end_pfn)
4079{
4080 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4081}
4082
4083
4084static unsigned long __meminit zone_absent_pages_in_node(int nid,
4085 unsigned long zone_type,
4086 unsigned long *ignored)
4087{
4088 unsigned long node_start_pfn, node_end_pfn;
4089 unsigned long zone_start_pfn, zone_end_pfn;
4090
4091 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4092 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
4093 node_start_pfn);
4094 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
4095 node_end_pfn);
4096
4097 adjust_zone_range_for_zone_movable(nid, zone_type,
4098 node_start_pfn, node_end_pfn,
4099 &zone_start_pfn, &zone_end_pfn);
4100 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4101}
4102
4103#else
4104static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4105 unsigned long zone_type,
4106 unsigned long *zones_size)
4107{
4108 return zones_size[zone_type];
4109}
4110
4111static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4112 unsigned long zone_type,
4113 unsigned long *zholes_size)
4114{
4115 if (!zholes_size)
4116 return 0;
4117
4118 return zholes_size[zone_type];
4119}
4120
4121#endif
4122
4123static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4124 unsigned long *zones_size, unsigned long *zholes_size)
4125{
4126 unsigned long realtotalpages, totalpages = 0;
4127 enum zone_type i;
4128
4129 for (i = 0; i < MAX_NR_ZONES; i++)
4130 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4131 zones_size);
4132 pgdat->node_spanned_pages = totalpages;
4133
4134 realtotalpages = totalpages;
4135 for (i = 0; i < MAX_NR_ZONES; i++)
4136 realtotalpages -=
4137 zone_absent_pages_in_node(pgdat->node_id, i,
4138 zholes_size);
4139 pgdat->node_present_pages = realtotalpages;
4140 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4141 realtotalpages);
4142}
4143
4144#ifndef CONFIG_SPARSEMEM
4145
4146
4147
4148
4149
4150
4151
4152static unsigned long __init usemap_size(unsigned long zonesize)
4153{
4154 unsigned long usemapsize;
4155
4156 usemapsize = roundup(zonesize, pageblock_nr_pages);
4157 usemapsize = usemapsize >> pageblock_order;
4158 usemapsize *= NR_PAGEBLOCK_BITS;
4159 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4160
4161 return usemapsize / 8;
4162}
4163
4164static void __init setup_usemap(struct pglist_data *pgdat,
4165 struct zone *zone, unsigned long zonesize)
4166{
4167 unsigned long usemapsize = usemap_size(zonesize);
4168 zone->pageblock_flags = NULL;
4169 if (usemapsize)
4170 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4171 usemapsize);
4172}
4173#else
4174static inline void setup_usemap(struct pglist_data *pgdat,
4175 struct zone *zone, unsigned long zonesize) {}
4176#endif
4177
4178#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4179
4180
4181static inline int pageblock_default_order(void)
4182{
4183 if (HPAGE_SHIFT > PAGE_SHIFT)
4184 return HUGETLB_PAGE_ORDER;
4185
4186 return MAX_ORDER-1;
4187}
4188
4189
4190static inline void __init set_pageblock_order(unsigned int order)
4191{
4192
4193 if (pageblock_order)
4194 return;
4195
4196
4197
4198
4199
4200 pageblock_order = order;
4201}
4202#else
4203
4204
4205
4206
4207
4208
4209
4210static inline int pageblock_default_order(unsigned int order)
4211{
4212 return MAX_ORDER-1;
4213}
4214#define set_pageblock_order(x) do {} while (0)
4215
4216#endif
4217
4218
4219
4220
4221
4222
4223
4224static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4225 unsigned long *zones_size, unsigned long *zholes_size)
4226{
4227 enum zone_type j;
4228 int nid = pgdat->node_id;
4229 unsigned long zone_start_pfn = pgdat->node_start_pfn;
4230 int ret;
4231
4232 pgdat_resize_init(pgdat);
4233 pgdat->nr_zones = 0;
4234 init_waitqueue_head(&pgdat->kswapd_wait);
4235 pgdat->kswapd_max_order = 0;
4236 pgdat_page_cgroup_init(pgdat);
4237
4238 for (j = 0; j < MAX_NR_ZONES; j++) {
4239 struct zone *zone = pgdat->node_zones + j;
4240 unsigned long size, realsize, memmap_pages;
4241 enum lru_list l;
4242
4243 size = zone_spanned_pages_in_node(nid, j, zones_size);
4244 realsize = size - zone_absent_pages_in_node(nid, j,
4245 zholes_size);
4246
4247
4248
4249
4250
4251
4252 memmap_pages =
4253 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4254 if (realsize >= memmap_pages) {
4255 realsize -= memmap_pages;
4256 if (memmap_pages)
4257 printk(KERN_DEBUG
4258 " %s zone: %lu pages used for memmap\n",
4259 zone_names[j], memmap_pages);
4260 } else
4261 printk(KERN_WARNING
4262 " %s zone: %lu pages exceeds realsize %lu\n",
4263 zone_names[j], memmap_pages, realsize);
4264
4265
4266 if (j == 0 && realsize > dma_reserve) {
4267 realsize -= dma_reserve;
4268 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
4269 zone_names[0], dma_reserve);
4270 }
4271
4272 if (!is_highmem_idx(j))
4273 nr_kernel_pages += realsize;
4274 nr_all_pages += realsize;
4275
4276 zone->spanned_pages = size;
4277 zone->present_pages = realsize;
4278#ifdef CONFIG_NUMA
4279 zone->node = nid;
4280 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4281 / 100;
4282 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4283#endif
4284 zone->name = zone_names[j];
4285 spin_lock_init(&zone->lock);
4286 spin_lock_init(&zone->lru_lock);
4287 zone_seqlock_init(zone);
4288 zone->zone_pgdat = pgdat;
4289
4290 zone_pcp_init(zone);
4291 for_each_lru(l) {
4292 INIT_LIST_HEAD(&zone->lru[l].list);
4293 zone->reclaim_stat.nr_saved_scan[l] = 0;
4294 }
4295 zone->reclaim_stat.recent_rotated[0] = 0;
4296 zone->reclaim_stat.recent_rotated[1] = 0;
4297 zone->reclaim_stat.recent_scanned[0] = 0;
4298 zone->reclaim_stat.recent_scanned[1] = 0;
4299 zap_zone_vm_stats(zone);
4300 zone->flags = 0;
4301 if (!size)
4302 continue;
4303
4304 set_pageblock_order(pageblock_default_order());
4305 setup_usemap(pgdat, zone, size);
4306 ret = init_currently_empty_zone(zone, zone_start_pfn,
4307 size, MEMMAP_EARLY);
4308 BUG_ON(ret);
4309 memmap_init(size, nid, j, zone_start_pfn);
4310 zone_start_pfn += size;
4311 }
4312}
4313
4314static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4315{
4316
4317 if (!pgdat->node_spanned_pages)
4318 return;
4319
4320#ifdef CONFIG_FLAT_NODE_MEM_MAP
4321
4322 if (!pgdat->node_mem_map) {
4323 unsigned long size, start, end;
4324 struct page *map;
4325
4326
4327
4328
4329
4330
4331 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4332 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4333 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4334 size = (end - start) * sizeof(struct page);
4335 map = alloc_remap(pgdat->node_id, size);
4336 if (!map)
4337 map = alloc_bootmem_node_nopanic(pgdat, size);
4338 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4339 }
4340#ifndef CONFIG_NEED_MULTIPLE_NODES
4341
4342
4343
4344 if (pgdat == NODE_DATA(0)) {
4345 mem_map = NODE_DATA(0)->node_mem_map;
4346#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4347 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4348 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4349#endif
4350 }
4351#endif
4352#endif
4353}
4354
4355void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4356 unsigned long node_start_pfn, unsigned long *zholes_size)
4357{
4358 pg_data_t *pgdat = NODE_DATA(nid);
4359
4360 pgdat->node_id = nid;
4361 pgdat->node_start_pfn = node_start_pfn;
4362 calculate_node_totalpages(pgdat, zones_size, zholes_size);
4363
4364 alloc_node_mem_map(pgdat);
4365#ifdef CONFIG_FLAT_NODE_MEM_MAP
4366 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4367 nid, (unsigned long)pgdat,
4368 (unsigned long)pgdat->node_mem_map);
4369#endif
4370
4371 free_area_init_core(pgdat, zones_size, zholes_size);
4372}
4373
4374#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4375
4376#if MAX_NUMNODES > 1
4377
4378
4379
4380static void __init setup_nr_node_ids(void)
4381{
4382 unsigned int node;
4383 unsigned int highest = 0;
4384
4385 for_each_node_mask(node, node_possible_map)
4386 highest = node;
4387 nr_node_ids = highest + 1;
4388}
4389#else
4390static inline void setup_nr_node_ids(void)
4391{
4392}
4393#endif
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4408 unsigned long end_pfn)
4409{
4410 int i;
4411
4412 mminit_dprintk(MMINIT_TRACE, "memory_register",
4413 "Entering add_active_range(%d, %#lx, %#lx) "
4414 "%d entries of %d used\n",
4415 nid, start_pfn, end_pfn,
4416 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
4417
4418 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4419
4420
4421 for (i = 0; i < nr_nodemap_entries; i++) {
4422 if (early_node_map[i].nid != nid)
4423 continue;
4424
4425
4426 if (start_pfn >= early_node_map[i].start_pfn &&
4427 end_pfn <= early_node_map[i].end_pfn)
4428 return;
4429
4430
4431 if (start_pfn <= early_node_map[i].end_pfn &&
4432 end_pfn > early_node_map[i].end_pfn) {
4433 early_node_map[i].end_pfn = end_pfn;
4434 return;
4435 }
4436
4437
4438 if (start_pfn < early_node_map[i].start_pfn &&
4439 end_pfn >= early_node_map[i].start_pfn) {
4440 early_node_map[i].start_pfn = start_pfn;
4441 return;
4442 }
4443 }
4444
4445
4446 if (i >= MAX_ACTIVE_REGIONS) {
4447 printk(KERN_CRIT "More than %d memory regions, truncating\n",
4448 MAX_ACTIVE_REGIONS);
4449 return;
4450 }
4451
4452 early_node_map[i].nid = nid;
4453 early_node_map[i].start_pfn = start_pfn;
4454 early_node_map[i].end_pfn = end_pfn;
4455 nr_nodemap_entries = i + 1;
4456}
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4470 unsigned long end_pfn)
4471{
4472 int i, j;
4473 int removed = 0;
4474
4475 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4476 nid, start_pfn, end_pfn);
4477
4478
4479 for_each_active_range_index_in_nid(i, nid) {
4480 if (early_node_map[i].start_pfn >= start_pfn &&
4481 early_node_map[i].end_pfn <= end_pfn) {
4482
4483 early_node_map[i].start_pfn = 0;
4484 early_node_map[i].end_pfn = 0;
4485 removed = 1;
4486 continue;
4487 }
4488 if (early_node_map[i].start_pfn < start_pfn &&
4489 early_node_map[i].end_pfn > start_pfn) {
4490 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4491 early_node_map[i].end_pfn = start_pfn;
4492 if (temp_end_pfn > end_pfn)
4493 add_active_range(nid, end_pfn, temp_end_pfn);
4494 continue;
4495 }
4496 if (early_node_map[i].start_pfn >= start_pfn &&
4497 early_node_map[i].end_pfn > end_pfn &&
4498 early_node_map[i].start_pfn < end_pfn) {
4499 early_node_map[i].start_pfn = end_pfn;
4500 continue;
4501 }
4502 }
4503
4504 if (!removed)
4505 return;
4506
4507
4508 for (i = nr_nodemap_entries - 1; i > 0; i--) {
4509 if (early_node_map[i].nid != nid)
4510 continue;
4511 if (early_node_map[i].end_pfn)
4512 continue;
4513
4514 for (j = i; j < nr_nodemap_entries - 1; j++)
4515 memcpy(&early_node_map[j], &early_node_map[j+1],
4516 sizeof(early_node_map[j]));
4517 j = nr_nodemap_entries - 1;
4518 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4519 nr_nodemap_entries--;
4520 }
4521}
4522
4523
4524
4525
4526
4527
4528
4529
4530void __init remove_all_active_ranges(void)
4531{
4532 memset(early_node_map, 0, sizeof(early_node_map));
4533 nr_nodemap_entries = 0;
4534}
4535
4536
4537static int __init cmp_node_active_region(const void *a, const void *b)
4538{
4539 struct node_active_region *arange = (struct node_active_region *)a;
4540 struct node_active_region *brange = (struct node_active_region *)b;
4541
4542
4543 if (arange->start_pfn > brange->start_pfn)
4544 return 1;
4545 if (arange->start_pfn < brange->start_pfn)
4546 return -1;
4547
4548 return 0;
4549}
4550
4551
4552void __init sort_node_map(void)
4553{
4554 sort(early_node_map, (size_t)nr_nodemap_entries,
4555 sizeof(struct node_active_region),
4556 cmp_node_active_region, NULL);
4557}
4558
4559
4560static unsigned long __init find_min_pfn_for_node(int nid)
4561{
4562 int i;
4563 unsigned long min_pfn = ULONG_MAX;
4564
4565
4566 for_each_active_range_index_in_nid(i, nid)
4567 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4568
4569 if (min_pfn == ULONG_MAX) {
4570 printk(KERN_WARNING
4571 "Could not find start_pfn for node %d\n", nid);
4572 return 0;
4573 }
4574
4575 return min_pfn;
4576}
4577
4578
4579
4580
4581
4582
4583
4584unsigned long __init find_min_pfn_with_active_regions(void)
4585{
4586 return find_min_pfn_for_node(MAX_NUMNODES);
4587}
4588
4589
4590
4591
4592
4593
4594static unsigned long __init early_calculate_totalpages(void)
4595{
4596 int i;
4597 unsigned long totalpages = 0;
4598
4599 for (i = 0; i < nr_nodemap_entries; i++) {
4600 unsigned long pages = early_node_map[i].end_pfn -
4601 early_node_map[i].start_pfn;
4602 totalpages += pages;
4603 if (pages)
4604 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4605 }
4606 return totalpages;
4607}
4608
4609
4610
4611
4612
4613
4614
4615static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4616{
4617 int i, nid;
4618 unsigned long usable_startpfn;
4619 unsigned long kernelcore_node, kernelcore_remaining;
4620
4621 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4622 unsigned long totalpages = early_calculate_totalpages();
4623 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633 if (required_movablecore) {
4634 unsigned long corepages;
4635
4636
4637
4638
4639
4640 required_movablecore =
4641 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4642 corepages = totalpages - required_movablecore;
4643
4644 required_kernelcore = max(required_kernelcore, corepages);
4645 }
4646
4647
4648 if (!required_kernelcore)
4649 goto out;
4650
4651
4652 find_usable_zone_for_movable();
4653 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4654
4655restart:
4656
4657 kernelcore_node = required_kernelcore / usable_nodes;
4658 for_each_node_state(nid, N_HIGH_MEMORY) {
4659
4660
4661
4662
4663
4664 if (required_kernelcore < kernelcore_node)
4665 kernelcore_node = required_kernelcore / usable_nodes;
4666
4667
4668
4669
4670
4671
4672 kernelcore_remaining = kernelcore_node;
4673
4674
4675 for_each_active_range_index_in_nid(i, nid) {
4676 unsigned long start_pfn, end_pfn;
4677 unsigned long size_pages;
4678
4679 start_pfn = max(early_node_map[i].start_pfn,
4680 zone_movable_pfn[nid]);
4681 end_pfn = early_node_map[i].end_pfn;
4682 if (start_pfn >= end_pfn)
4683 continue;
4684
4685
4686 if (start_pfn < usable_startpfn) {
4687 unsigned long kernel_pages;
4688 kernel_pages = min(end_pfn, usable_startpfn)
4689 - start_pfn;
4690
4691 kernelcore_remaining -= min(kernel_pages,
4692 kernelcore_remaining);
4693 required_kernelcore -= min(kernel_pages,
4694 required_kernelcore);
4695
4696
4697 if (end_pfn <= usable_startpfn) {
4698
4699
4700
4701
4702
4703
4704
4705 zone_movable_pfn[nid] = end_pfn;
4706 continue;
4707 }
4708 start_pfn = usable_startpfn;
4709 }
4710
4711
4712
4713
4714
4715
4716 size_pages = end_pfn - start_pfn;
4717 if (size_pages > kernelcore_remaining)
4718 size_pages = kernelcore_remaining;
4719 zone_movable_pfn[nid] = start_pfn + size_pages;
4720
4721
4722
4723
4724
4725
4726 required_kernelcore -= min(required_kernelcore,
4727 size_pages);
4728 kernelcore_remaining -= size_pages;
4729 if (!kernelcore_remaining)
4730 break;
4731 }
4732 }
4733
4734
4735
4736
4737
4738
4739
4740 usable_nodes--;
4741 if (usable_nodes && required_kernelcore > usable_nodes)
4742 goto restart;
4743
4744
4745 for (nid = 0; nid < MAX_NUMNODES; nid++)
4746 zone_movable_pfn[nid] =
4747 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4748
4749out:
4750
4751 node_states[N_HIGH_MEMORY] = saved_node_state;
4752}
4753
4754
4755static void check_for_regular_memory(pg_data_t *pgdat)
4756{
4757#ifdef CONFIG_HIGHMEM
4758 enum zone_type zone_type;
4759
4760 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4761 struct zone *zone = &pgdat->node_zones[zone_type];
4762 if (zone->present_pages)
4763 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4764 }
4765#endif
4766}
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4782{
4783 unsigned long nid;
4784 int i;
4785
4786
4787 sort_node_map();
4788
4789
4790 memset(arch_zone_lowest_possible_pfn, 0,
4791 sizeof(arch_zone_lowest_possible_pfn));
4792 memset(arch_zone_highest_possible_pfn, 0,
4793 sizeof(arch_zone_highest_possible_pfn));
4794 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4795 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4796 for (i = 1; i < MAX_NR_ZONES; i++) {
4797 if (i == ZONE_MOVABLE)
4798 continue;
4799 arch_zone_lowest_possible_pfn[i] =
4800 arch_zone_highest_possible_pfn[i-1];
4801 arch_zone_highest_possible_pfn[i] =
4802 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4803 }
4804 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4805 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4806
4807
4808 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4809 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4810
4811
4812 printk("Zone PFN ranges:\n");
4813 for (i = 0; i < MAX_NR_ZONES; i++) {
4814 if (i == ZONE_MOVABLE)
4815 continue;
4816 printk(" %-8s ", zone_names[i]);
4817 if (arch_zone_lowest_possible_pfn[i] ==
4818 arch_zone_highest_possible_pfn[i])
4819 printk("empty\n");
4820 else
4821 printk("%0#10lx -> %0#10lx\n",
4822 arch_zone_lowest_possible_pfn[i],
4823 arch_zone_highest_possible_pfn[i]);
4824 }
4825
4826
4827 printk("Movable zone start PFN for each node\n");
4828 for (i = 0; i < MAX_NUMNODES; i++) {
4829 if (zone_movable_pfn[i])
4830 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4831 }
4832
4833
4834 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4835 for (i = 0; i < nr_nodemap_entries; i++)
4836 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4837 early_node_map[i].start_pfn,
4838 early_node_map[i].end_pfn);
4839
4840
4841 mminit_verify_pageflags_layout();
4842 setup_nr_node_ids();
4843 for_each_online_node(nid) {
4844 pg_data_t *pgdat = NODE_DATA(nid);
4845 free_area_init_node(nid, NULL,
4846 find_min_pfn_for_node(nid), NULL);
4847
4848
4849 if (pgdat->node_present_pages)
4850 node_set_state(nid, N_HIGH_MEMORY);
4851 check_for_regular_memory(pgdat);
4852 }
4853}
4854
4855static int __init cmdline_parse_core(char *p, unsigned long *core)
4856{
4857 unsigned long long coremem;
4858 if (!p)
4859 return -EINVAL;
4860
4861 coremem = memparse(p, &p);
4862 *core = coremem >> PAGE_SHIFT;
4863
4864
4865 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4866
4867 return 0;
4868}
4869
4870
4871
4872
4873
4874static int __init cmdline_parse_kernelcore(char *p)
4875{
4876 return cmdline_parse_core(p, &required_kernelcore);
4877}
4878
4879
4880
4881
4882
4883static int __init cmdline_parse_movablecore(char *p)
4884{
4885 return cmdline_parse_core(p, &required_movablecore);
4886}
4887
4888early_param("kernelcore", cmdline_parse_kernelcore);
4889early_param("movablecore", cmdline_parse_movablecore);
4890
4891#endif
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904void __init set_dma_reserve(unsigned long new_dma_reserve)
4905{
4906 dma_reserve = new_dma_reserve;
4907}
4908
4909void __init free_area_init(unsigned long *zones_size)
4910{
4911 free_area_init_node(0, zones_size,
4912 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4913}
4914
4915static int page_alloc_cpu_notify(struct notifier_block *self,
4916 unsigned long action, void *hcpu)
4917{
4918 int cpu = (unsigned long)hcpu;
4919
4920 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4921 drain_pages(cpu);
4922
4923
4924
4925
4926
4927
4928
4929 vm_events_fold_cpu(cpu);
4930
4931
4932
4933
4934
4935
4936
4937
4938 refresh_cpu_vm_stats(cpu);
4939 }
4940 return NOTIFY_OK;
4941}
4942
4943void __init page_alloc_init(void)
4944{
4945 hotcpu_notifier(page_alloc_cpu_notify, 0);
4946}
4947
4948
4949
4950
4951
4952static void calculate_totalreserve_pages(void)
4953{
4954 struct pglist_data *pgdat;
4955 unsigned long reserve_pages = 0;
4956 enum zone_type i, j;
4957
4958 for_each_online_pgdat(pgdat) {
4959 for (i = 0; i < MAX_NR_ZONES; i++) {
4960 struct zone *zone = pgdat->node_zones + i;
4961 unsigned long max = 0;
4962
4963
4964 for (j = i; j < MAX_NR_ZONES; j++) {
4965 if (zone->lowmem_reserve[j] > max)
4966 max = zone->lowmem_reserve[j];
4967 }
4968
4969
4970 max += high_wmark_pages(zone);
4971
4972 if (max > zone->present_pages)
4973 max = zone->present_pages;
4974 reserve_pages += max;
4975 }
4976 }
4977 totalreserve_pages = reserve_pages;
4978}
4979
4980
4981
4982
4983
4984
4985
4986static void setup_per_zone_lowmem_reserve(void)
4987{
4988 struct pglist_data *pgdat;
4989 enum zone_type j, idx;
4990
4991 for_each_online_pgdat(pgdat) {
4992 for (j = 0; j < MAX_NR_ZONES; j++) {
4993 struct zone *zone = pgdat->node_zones + j;
4994 unsigned long present_pages = zone->present_pages;
4995
4996 zone->lowmem_reserve[j] = 0;
4997
4998 idx = j;
4999 while (idx) {
5000 struct zone *lower_zone;
5001
5002 idx--;
5003
5004 if (sysctl_lowmem_reserve_ratio[idx] < 1)
5005 sysctl_lowmem_reserve_ratio[idx] = 1;
5006
5007 lower_zone = pgdat->node_zones + idx;
5008 lower_zone->lowmem_reserve[j] = present_pages /
5009 sysctl_lowmem_reserve_ratio[idx];
5010 present_pages += lower_zone->present_pages;
5011 }
5012 }
5013 }
5014
5015
5016 calculate_totalreserve_pages();
5017}
5018
5019
5020
5021
5022
5023
5024
5025
5026void setup_per_zone_wmarks(void)
5027{
5028 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5029 unsigned long lowmem_pages = 0;
5030 struct zone *zone;
5031 unsigned long flags;
5032
5033
5034 for_each_zone(zone) {
5035 if (!is_highmem(zone))
5036 lowmem_pages += zone->present_pages;
5037 }
5038
5039 for_each_zone(zone) {
5040 u64 tmp;
5041
5042 spin_lock_irqsave(&zone->lock, flags);
5043 tmp = (u64)pages_min * zone->present_pages;
5044 do_div(tmp, lowmem_pages);
5045 if (is_highmem(zone)) {
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055 int min_pages;
5056
5057 min_pages = zone->present_pages / 1024;
5058 if (min_pages < SWAP_CLUSTER_MAX)
5059 min_pages = SWAP_CLUSTER_MAX;
5060 if (min_pages > 128)
5061 min_pages = 128;
5062 zone->watermark[WMARK_MIN] = min_pages;
5063 } else {
5064
5065
5066
5067
5068 zone->watermark[WMARK_MIN] = tmp;
5069 }
5070
5071 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
5072 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5073 setup_zone_migrate_reserve(zone);
5074 spin_unlock_irqrestore(&zone->lock, flags);
5075 }
5076
5077
5078 calculate_totalreserve_pages();
5079}
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102void calculate_zone_inactive_ratio(struct zone *zone)
5103{
5104 unsigned int gb, ratio;
5105
5106
5107 gb = zone->present_pages >> (30 - PAGE_SHIFT);
5108 if (gb)
5109 ratio = int_sqrt(10 * gb);
5110 else
5111 ratio = 1;
5112
5113 zone->inactive_ratio = ratio;
5114}
5115
5116static void __init setup_per_zone_inactive_ratio(void)
5117{
5118 struct zone *zone;
5119
5120 for_each_zone(zone)
5121 calculate_zone_inactive_ratio(zone);
5122}
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148static int __init init_per_zone_wmark_min(void)
5149{
5150 unsigned long lowmem_kbytes;
5151
5152 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5153
5154 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5155 if (min_free_kbytes < 128)
5156 min_free_kbytes = 128;
5157 if (min_free_kbytes > 65536)
5158 min_free_kbytes = 65536;
5159 setup_per_zone_wmarks();
5160 setup_per_zone_lowmem_reserve();
5161 setup_per_zone_inactive_ratio();
5162 return 0;
5163}
5164module_init(init_per_zone_wmark_min)
5165
5166
5167
5168
5169
5170
5171int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
5172 void __user *buffer, size_t *length, loff_t *ppos)
5173{
5174 proc_dointvec(table, write, buffer, length, ppos);
5175 if (write)
5176 setup_per_zone_wmarks();
5177 return 0;
5178}
5179
5180#ifdef CONFIG_NUMA
5181int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5182 void __user *buffer, size_t *length, loff_t *ppos)
5183{
5184 struct zone *zone;
5185 int rc;
5186
5187 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5188 if (rc)
5189 return rc;
5190
5191 for_each_zone(zone)
5192 zone->min_unmapped_pages = (zone->present_pages *
5193 sysctl_min_unmapped_ratio) / 100;
5194 return 0;
5195}
5196
5197int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5198 void __user *buffer, size_t *length, loff_t *ppos)
5199{
5200 struct zone *zone;
5201 int rc;
5202
5203 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5204 if (rc)
5205 return rc;
5206
5207 for_each_zone(zone)
5208 zone->min_slab_pages = (zone->present_pages *
5209 sysctl_min_slab_ratio) / 100;
5210 return 0;
5211}
5212#endif
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5224 void __user *buffer, size_t *length, loff_t *ppos)
5225{
5226 proc_dointvec_minmax(table, write, buffer, length, ppos);
5227 setup_per_zone_lowmem_reserve();
5228 return 0;
5229}
5230
5231
5232
5233
5234
5235
5236
5237int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5238 void __user *buffer, size_t *length, loff_t *ppos)
5239{
5240 struct zone *zone;
5241 unsigned int cpu;
5242 int ret;
5243
5244 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5245 if (!write || (ret == -EINVAL))
5246 return ret;
5247 for_each_populated_zone(zone) {
5248 for_each_possible_cpu(cpu) {
5249 unsigned long high;
5250 high = zone->present_pages / percpu_pagelist_fraction;
5251 setup_pagelist_highmark(
5252 per_cpu_ptr(zone->pageset, cpu), high);
5253 }
5254 }
5255 return 0;
5256}
5257
5258int hashdist = HASHDIST_DEFAULT;
5259
5260#ifdef CONFIG_NUMA
5261static int __init set_hashdist(char *str)
5262{
5263 if (!str)
5264 return 0;
5265 hashdist = simple_strtoul(str, &str, 0);
5266 return 1;
5267}
5268__setup("hashdist=", set_hashdist);
5269#endif
5270
5271
5272
5273
5274
5275
5276
5277void *__init alloc_large_system_hash(const char *tablename,
5278 unsigned long bucketsize,
5279 unsigned long numentries,
5280 int scale,
5281 int flags,
5282 unsigned int *_hash_shift,
5283 unsigned int *_hash_mask,
5284 unsigned long limit)
5285{
5286 unsigned long long max = limit;
5287 unsigned long log2qty, size;
5288 void *table = NULL;
5289
5290
5291 if (!numentries) {
5292
5293 numentries = nr_kernel_pages;
5294 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5295 numentries >>= 20 - PAGE_SHIFT;
5296 numentries <<= 20 - PAGE_SHIFT;
5297
5298
5299 if (scale > PAGE_SHIFT)
5300 numentries >>= (scale - PAGE_SHIFT);
5301 else
5302 numentries <<= (PAGE_SHIFT - scale);
5303
5304
5305 if (unlikely(flags & HASH_SMALL)) {
5306
5307 WARN_ON(!(flags & HASH_EARLY));
5308 if (!(numentries >> *_hash_shift)) {
5309 numentries = 1UL << *_hash_shift;
5310 BUG_ON(!numentries);
5311 }
5312 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5313 numentries = PAGE_SIZE / bucketsize;
5314 }
5315 numentries = roundup_pow_of_two(numentries);
5316
5317
5318 if (max == 0) {
5319 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5320 do_div(max, bucketsize);
5321 }
5322
5323 if (numentries > max)
5324 numentries = max;
5325
5326 log2qty = ilog2(numentries);
5327
5328 do {
5329 size = bucketsize << log2qty;
5330 if (flags & HASH_EARLY)
5331 table = alloc_bootmem_nopanic(size);
5332 else if (hashdist)
5333 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5334 else {
5335
5336
5337
5338
5339
5340 if (get_order(size) < MAX_ORDER) {
5341 table = alloc_pages_exact(size, GFP_ATOMIC);
5342 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5343 }
5344 }
5345 } while (!table && size > PAGE_SIZE && --log2qty);
5346
5347 if (!table)
5348 panic("Failed to allocate %s hash table\n", tablename);
5349
5350 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5351 tablename,
5352 (1UL << log2qty),
5353 ilog2(size) - PAGE_SHIFT,
5354 size);
5355
5356 if (_hash_shift)
5357 *_hash_shift = log2qty;
5358 if (_hash_mask)
5359 *_hash_mask = (1 << log2qty) - 1;
5360
5361 return table;
5362}
5363
5364
5365static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5366 unsigned long pfn)
5367{
5368#ifdef CONFIG_SPARSEMEM
5369 return __pfn_to_section(pfn)->pageblock_flags;
5370#else
5371 return zone->pageblock_flags;
5372#endif
5373}
5374
5375static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5376{
5377#ifdef CONFIG_SPARSEMEM
5378 pfn &= (PAGES_PER_SECTION-1);
5379 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5380#else
5381 pfn = pfn - zone->zone_start_pfn;
5382 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5383#endif
5384}
5385
5386
5387
5388
5389
5390
5391
5392
5393unsigned long get_pageblock_flags_group(struct page *page,
5394 int start_bitidx, int end_bitidx)
5395{
5396 struct zone *zone;
5397 unsigned long *bitmap;
5398 unsigned long pfn, bitidx;
5399 unsigned long flags = 0;
5400 unsigned long value = 1;
5401
5402 zone = page_zone(page);
5403 pfn = page_to_pfn(page);
5404 bitmap = get_pageblock_bitmap(zone, pfn);
5405 bitidx = pfn_to_bitidx(zone, pfn);
5406
5407 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5408 if (test_bit(bitidx + start_bitidx, bitmap))
5409 flags |= value;
5410
5411 return flags;
5412}
5413
5414
5415
5416
5417
5418
5419
5420
5421void set_pageblock_flags_group(struct page *page, unsigned long flags,
5422 int start_bitidx, int end_bitidx)
5423{
5424 struct zone *zone;
5425 unsigned long *bitmap;
5426 unsigned long pfn, bitidx;
5427 unsigned long value = 1;
5428
5429 zone = page_zone(page);
5430 pfn = page_to_pfn(page);
5431 bitmap = get_pageblock_bitmap(zone, pfn);
5432 bitidx = pfn_to_bitidx(zone, pfn);
5433 VM_BUG_ON(pfn < zone->zone_start_pfn);
5434 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5435
5436 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5437 if (flags & value)
5438 __set_bit(bitidx + start_bitidx, bitmap);
5439 else
5440 __clear_bit(bitidx + start_bitidx, bitmap);
5441}
5442
5443
5444
5445
5446
5447
5448
5449static int
5450__count_immobile_pages(struct zone *zone, struct page *page, int count)
5451{
5452 unsigned long pfn, iter, found;
5453
5454
5455
5456
5457 if (zone_idx(zone) == ZONE_MOVABLE)
5458 return true;
5459
5460 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5461 return true;
5462
5463 pfn = page_to_pfn(page);
5464 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5465 unsigned long check = pfn + iter;
5466
5467 if (!pfn_valid_within(check))
5468 continue;
5469
5470 page = pfn_to_page(check);
5471 if (!page_count(page)) {
5472 if (PageBuddy(page))
5473 iter += (1 << page_order(page)) - 1;
5474 continue;
5475 }
5476 if (!PageLRU(page))
5477 found++;
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491 if (found > count)
5492 return false;
5493 }
5494 return true;
5495}
5496
5497bool is_pageblock_removable_nolock(struct page *page)
5498{
5499 struct zone *zone = page_zone(page);
5500 return __count_immobile_pages(zone, page, 0);
5501}
5502
5503int set_migratetype_isolate(struct page *page)
5504{
5505 struct zone *zone;
5506 unsigned long flags, pfn;
5507 struct memory_isolate_notify arg;
5508 int notifier_ret;
5509 int ret = -EBUSY;
5510 int zone_idx;
5511
5512 zone = page_zone(page);
5513 zone_idx = zone_idx(zone);
5514
5515 spin_lock_irqsave(&zone->lock, flags);
5516
5517 pfn = page_to_pfn(page);
5518 arg.start_pfn = pfn;
5519 arg.nr_pages = pageblock_nr_pages;
5520 arg.pages_found = 0;
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5534 notifier_ret = notifier_to_errno(notifier_ret);
5535 if (notifier_ret)
5536 goto out;
5537
5538
5539
5540
5541 if (__count_immobile_pages(zone, page, arg.pages_found))
5542 ret = 0;
5543
5544
5545
5546
5547
5548
5549out:
5550 if (!ret) {
5551 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5552 move_freepages_block(zone, page, MIGRATE_ISOLATE);
5553 }
5554
5555 spin_unlock_irqrestore(&zone->lock, flags);
5556 if (!ret)
5557 drain_all_pages();
5558 return ret;
5559}
5560
5561void unset_migratetype_isolate(struct page *page)
5562{
5563 struct zone *zone;
5564 unsigned long flags;
5565 zone = page_zone(page);
5566 spin_lock_irqsave(&zone->lock, flags);
5567 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5568 goto out;
5569 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5570 move_freepages_block(zone, page, MIGRATE_MOVABLE);
5571out:
5572 spin_unlock_irqrestore(&zone->lock, flags);
5573}
5574
5575#ifdef CONFIG_MEMORY_HOTREMOVE
5576
5577
5578
5579void
5580__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5581{
5582 struct page *page;
5583 struct zone *zone;
5584 int order, i;
5585 unsigned long pfn;
5586 unsigned long flags;
5587
5588 for (pfn = start_pfn; pfn < end_pfn; pfn++)
5589 if (pfn_valid(pfn))
5590 break;
5591 if (pfn == end_pfn)
5592 return;
5593 zone = page_zone(pfn_to_page(pfn));
5594 spin_lock_irqsave(&zone->lock, flags);
5595 pfn = start_pfn;
5596 while (pfn < end_pfn) {
5597 if (!pfn_valid(pfn)) {
5598 pfn++;
5599 continue;
5600 }
5601 page = pfn_to_page(pfn);
5602 BUG_ON(page_count(page));
5603 BUG_ON(!PageBuddy(page));
5604 order = page_order(page);
5605#ifdef CONFIG_DEBUG_VM
5606 printk(KERN_INFO "remove from free list %lx %d %lx\n",
5607 pfn, 1 << order, end_pfn);
5608#endif
5609 list_del(&page->lru);
5610 rmv_page_order(page);
5611 zone->free_area[order].nr_free--;
5612 __mod_zone_page_state(zone, NR_FREE_PAGES,
5613 - (1UL << order));
5614 for (i = 0; i < (1 << order); i++)
5615 SetPageReserved((page+i));
5616 pfn += (1 << order);
5617 }
5618 spin_unlock_irqrestore(&zone->lock, flags);
5619}
5620#endif
5621
5622#ifdef CONFIG_MEMORY_FAILURE
5623bool is_free_buddy_page(struct page *page)
5624{
5625 struct zone *zone = page_zone(page);
5626 unsigned long pfn = page_to_pfn(page);
5627 unsigned long flags;
5628 int order;
5629
5630 spin_lock_irqsave(&zone->lock, flags);
5631 for (order = 0; order < MAX_ORDER; order++) {
5632 struct page *page_head = page - (pfn & ((1 << order) - 1));
5633
5634 if (PageBuddy(page_head) && page_order(page_head) >= order)
5635 break;
5636 }
5637 spin_unlock_irqrestore(&zone->lock, flags);
5638
5639 return order < MAX_ORDER;
5640}
5641#endif
5642
5643static struct trace_print_flags pageflag_names[] = {
5644 {1UL << PG_locked, "locked" },
5645 {1UL << PG_error, "error" },
5646 {1UL << PG_referenced, "referenced" },
5647 {1UL << PG_uptodate, "uptodate" },
5648 {1UL << PG_dirty, "dirty" },
5649 {1UL << PG_lru, "lru" },
5650 {1UL << PG_active, "active" },
5651 {1UL << PG_slab, "slab" },
5652 {1UL << PG_owner_priv_1, "owner_priv_1" },
5653 {1UL << PG_arch_1, "arch_1" },
5654 {1UL << PG_reserved, "reserved" },
5655 {1UL << PG_private, "private" },
5656 {1UL << PG_private_2, "private_2" },
5657 {1UL << PG_writeback, "writeback" },
5658#ifdef CONFIG_PAGEFLAGS_EXTENDED
5659 {1UL << PG_head, "head" },
5660 {1UL << PG_tail, "tail" },
5661#else
5662 {1UL << PG_compound, "compound" },
5663#endif
5664 {1UL << PG_swapcache, "swapcache" },
5665 {1UL << PG_mappedtodisk, "mappedtodisk" },
5666 {1UL << PG_reclaim, "reclaim" },
5667 {1UL << PG_swapbacked, "swapbacked" },
5668 {1UL << PG_unevictable, "unevictable" },
5669#ifdef CONFIG_MMU
5670 {1UL << PG_mlocked, "mlocked" },
5671#endif
5672#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5673 {1UL << PG_uncached, "uncached" },
5674#endif
5675#ifdef CONFIG_MEMORY_FAILURE
5676 {1UL << PG_hwpoison, "hwpoison" },
5677#endif
5678 {-1UL, NULL },
5679};
5680
5681static void dump_page_flags(unsigned long flags)
5682{
5683 const char *delim = "";
5684 unsigned long mask;
5685 int i;
5686
5687 printk(KERN_ALERT "page flags: %#lx(", flags);
5688
5689
5690 flags &= (1UL << NR_PAGEFLAGS) - 1;
5691
5692 for (i = 0; pageflag_names[i].name && flags; i++) {
5693
5694 mask = pageflag_names[i].mask;
5695 if ((flags & mask) != mask)
5696 continue;
5697
5698 flags &= ~mask;
5699 printk("%s%s", delim, pageflag_names[i].name);
5700 delim = "|";
5701 }
5702
5703
5704 if (flags)
5705 printk("%s%#lx", delim, flags);
5706
5707 printk(")\n");
5708}
5709
5710void dump_page(struct page *page)
5711{
5712 printk(KERN_ALERT
5713 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5714 page, atomic_read(&page->_count), page_mapcount(page),
5715 page->mapping, page->index);
5716 dump_page_flags(page->flags);
5717 mem_cgroup_print_bad_page(page);
5718}
5719