1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h>
25#include <linux/compiler.h>
26#include <linux/kernel.h>
27#include <linux/kmemcheck.h>
28#include <linux/kasan.h>
29#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
34#include <linux/ratelimit.h>
35#include <linux/oom.h>
36#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
41#include <linux/memory_hotplug.h>
42#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
44#include <linux/vmstat.h>
45#include <linux/mempolicy.h>
46#include <linux/memremap.h>
47#include <linux/stop_machine.h>
48#include <linux/sort.h>
49#include <linux/pfn.h>
50#include <linux/backing-dev.h>
51#include <linux/fault-inject.h>
52#include <linux/page-isolation.h>
53#include <linux/page_ext.h>
54#include <linux/debugobjects.h>
55#include <linux/kmemleak.h>
56#include <linux/compaction.h>
57#include <trace/events/kmem.h>
58#include <trace/events/oom.h>
59#include <linux/prefetch.h>
60#include <linux/mm_inline.h>
61#include <linux/migrate.h>
62#include <linux/hugetlb.h>
63#include <linux/sched/rt.h>
64#include <linux/sched/mm.h>
65#include <linux/page_owner.h>
66#include <linux/kthread.h>
67#include <linux/memcontrol.h>
68#include <linux/ftrace.h>
69#include <linux/nmi.h>
70
71#include <asm/sections.h>
72#include <asm/tlbflush.h>
73#include <asm/div64.h>
74#include "internal.h"
75
76
77static DEFINE_MUTEX(pcp_batch_high_lock);
78#define MIN_PERCPU_PAGELIST_FRACTION (8)
79
80#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
81DEFINE_PER_CPU(int, numa_node);
82EXPORT_PER_CPU_SYMBOL(numa_node);
83#endif
84
85#ifdef CONFIG_HAVE_MEMORYLESS_NODES
86
87
88
89
90
91
92DEFINE_PER_CPU(int, _numa_mem_);
93EXPORT_PER_CPU_SYMBOL(_numa_mem_);
94int _node_numa_mem_[MAX_NUMNODES];
95#endif
96
97
98DEFINE_MUTEX(pcpu_drain_mutex);
99DEFINE_PER_CPU(struct work_struct, pcpu_drain);
100
101#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
102volatile unsigned long latent_entropy __latent_entropy;
103EXPORT_SYMBOL(latent_entropy);
104#endif
105
106
107
108
109nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
110 [N_POSSIBLE] = NODE_MASK_ALL,
111 [N_ONLINE] = { { [0] = 1UL } },
112#ifndef CONFIG_NUMA
113 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
114#ifdef CONFIG_HIGHMEM
115 [N_HIGH_MEMORY] = { { [0] = 1UL } },
116#endif
117 [N_MEMORY] = { { [0] = 1UL } },
118 [N_CPU] = { { [0] = 1UL } },
119#endif
120};
121EXPORT_SYMBOL(node_states);
122
123
124static DEFINE_SPINLOCK(managed_page_count_lock);
125
126unsigned long totalram_pages __read_mostly;
127unsigned long totalreserve_pages __read_mostly;
128unsigned long totalcma_pages __read_mostly;
129
130int percpu_pagelist_fraction;
131gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
132
133
134
135
136
137
138
139
140
141static inline int get_pcppage_migratetype(struct page *page)
142{
143 return page->index;
144}
145
146static inline void set_pcppage_migratetype(struct page *page, int migratetype)
147{
148 page->index = migratetype;
149}
150
151#ifdef CONFIG_PM_SLEEP
152
153
154
155
156
157
158
159
160
161static gfp_t saved_gfp_mask;
162
163void pm_restore_gfp_mask(void)
164{
165 WARN_ON(!mutex_is_locked(&pm_mutex));
166 if (saved_gfp_mask) {
167 gfp_allowed_mask = saved_gfp_mask;
168 saved_gfp_mask = 0;
169 }
170}
171
172void pm_restrict_gfp_mask(void)
173{
174 WARN_ON(!mutex_is_locked(&pm_mutex));
175 WARN_ON(saved_gfp_mask);
176 saved_gfp_mask = gfp_allowed_mask;
177 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
178}
179
180bool pm_suspended_storage(void)
181{
182 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
183 return false;
184 return true;
185}
186#endif
187
188#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
189unsigned int pageblock_order __read_mostly;
190#endif
191
192static void __free_pages_ok(struct page *page, unsigned int order);
193
194
195
196
197
198
199
200
201
202
203
204
205int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
206#ifdef CONFIG_ZONE_DMA
207 256,
208#endif
209#ifdef CONFIG_ZONE_DMA32
210 256,
211#endif
212#ifdef CONFIG_HIGHMEM
213 32,
214#endif
215 32,
216};
217
218EXPORT_SYMBOL(totalram_pages);
219
220static char * const zone_names[MAX_NR_ZONES] = {
221#ifdef CONFIG_ZONE_DMA
222 "DMA",
223#endif
224#ifdef CONFIG_ZONE_DMA32
225 "DMA32",
226#endif
227 "Normal",
228#ifdef CONFIG_HIGHMEM
229 "HighMem",
230#endif
231 "Movable",
232#ifdef CONFIG_ZONE_DEVICE
233 "Device",
234#endif
235};
236
237char * const migratetype_names[MIGRATE_TYPES] = {
238 "Unmovable",
239 "Movable",
240 "Reclaimable",
241 "HighAtomic",
242#ifdef CONFIG_CMA
243 "CMA",
244#endif
245#ifdef CONFIG_MEMORY_ISOLATION
246 "Isolate",
247#endif
248};
249
250compound_page_dtor * const compound_page_dtors[] = {
251 NULL,
252 free_compound_page,
253#ifdef CONFIG_HUGETLB_PAGE
254 free_huge_page,
255#endif
256#ifdef CONFIG_TRANSPARENT_HUGEPAGE
257 free_transhuge_page,
258#endif
259};
260
261int min_free_kbytes = 1024;
262int user_min_free_kbytes = -1;
263int watermark_scale_factor = 10;
264
265static unsigned long __meminitdata nr_kernel_pages;
266static unsigned long __meminitdata nr_all_pages;
267static unsigned long __meminitdata dma_reserve;
268
269#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
270static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
271static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
272static unsigned long __initdata required_kernelcore;
273static unsigned long __initdata required_movablecore;
274static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
275static bool mirrored_kernelcore;
276
277
278int movable_zone;
279EXPORT_SYMBOL(movable_zone);
280#endif
281
282#if MAX_NUMNODES > 1
283int nr_node_ids __read_mostly = MAX_NUMNODES;
284int nr_online_nodes __read_mostly = 1;
285EXPORT_SYMBOL(nr_node_ids);
286EXPORT_SYMBOL(nr_online_nodes);
287#endif
288
289int page_group_by_mobility_disabled __read_mostly;
290
291#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
292static inline void reset_deferred_meminit(pg_data_t *pgdat)
293{
294 unsigned long max_initialise;
295 unsigned long reserved_lowmem;
296
297
298
299
300
301 max_initialise = max(2UL << (30 - PAGE_SHIFT),
302 (pgdat->node_spanned_pages >> 8));
303
304
305
306
307
308
309 reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
310 pgdat->node_start_pfn + max_initialise);
311 max_initialise += reserved_lowmem;
312
313 pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
314 pgdat->first_deferred_pfn = ULONG_MAX;
315}
316
317
318static inline bool __meminit early_page_uninitialised(unsigned long pfn)
319{
320 int nid = early_pfn_to_nid(pfn);
321
322 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
323 return true;
324
325 return false;
326}
327
328
329
330
331
332static inline bool update_defer_init(pg_data_t *pgdat,
333 unsigned long pfn, unsigned long zone_end,
334 unsigned long *nr_initialised)
335{
336
337 if (zone_end < pgdat_end_pfn(pgdat))
338 return true;
339 (*nr_initialised)++;
340 if ((*nr_initialised > pgdat->static_init_size) &&
341 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
342 pgdat->first_deferred_pfn = pfn;
343 return false;
344 }
345
346 return true;
347}
348#else
349static inline void reset_deferred_meminit(pg_data_t *pgdat)
350{
351}
352
353static inline bool early_page_uninitialised(unsigned long pfn)
354{
355 return false;
356}
357
358static inline bool update_defer_init(pg_data_t *pgdat,
359 unsigned long pfn, unsigned long zone_end,
360 unsigned long *nr_initialised)
361{
362 return true;
363}
364#endif
365
366
367static inline unsigned long *get_pageblock_bitmap(struct page *page,
368 unsigned long pfn)
369{
370#ifdef CONFIG_SPARSEMEM
371 return __pfn_to_section(pfn)->pageblock_flags;
372#else
373 return page_zone(page)->pageblock_flags;
374#endif
375}
376
377static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
378{
379#ifdef CONFIG_SPARSEMEM
380 pfn &= (PAGES_PER_SECTION-1);
381 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
382#else
383 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
384 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
385#endif
386}
387
388
389
390
391
392
393
394
395
396
397static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
398 unsigned long pfn,
399 unsigned long end_bitidx,
400 unsigned long mask)
401{
402 unsigned long *bitmap;
403 unsigned long bitidx, word_bitidx;
404 unsigned long word;
405
406 bitmap = get_pageblock_bitmap(page, pfn);
407 bitidx = pfn_to_bitidx(page, pfn);
408 word_bitidx = bitidx / BITS_PER_LONG;
409 bitidx &= (BITS_PER_LONG-1);
410
411 word = bitmap[word_bitidx];
412 bitidx += end_bitidx;
413 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
414}
415
416unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
417 unsigned long end_bitidx,
418 unsigned long mask)
419{
420 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
421}
422
423static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
424{
425 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
426}
427
428
429
430
431
432
433
434
435
436void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
437 unsigned long pfn,
438 unsigned long end_bitidx,
439 unsigned long mask)
440{
441 unsigned long *bitmap;
442 unsigned long bitidx, word_bitidx;
443 unsigned long old_word, word;
444
445 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
446
447 bitmap = get_pageblock_bitmap(page, pfn);
448 bitidx = pfn_to_bitidx(page, pfn);
449 word_bitidx = bitidx / BITS_PER_LONG;
450 bitidx &= (BITS_PER_LONG-1);
451
452 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
453
454 bitidx += end_bitidx;
455 mask <<= (BITS_PER_LONG - bitidx - 1);
456 flags <<= (BITS_PER_LONG - bitidx - 1);
457
458 word = READ_ONCE(bitmap[word_bitidx]);
459 for (;;) {
460 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
461 if (word == old_word)
462 break;
463 word = old_word;
464 }
465}
466
467void set_pageblock_migratetype(struct page *page, int migratetype)
468{
469 if (unlikely(page_group_by_mobility_disabled &&
470 migratetype < MIGRATE_PCPTYPES))
471 migratetype = MIGRATE_UNMOVABLE;
472
473 set_pageblock_flags_group(page, (unsigned long)migratetype,
474 PB_migrate, PB_migrate_end);
475}
476
477#ifdef CONFIG_DEBUG_VM
478static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
479{
480 int ret = 0;
481 unsigned seq;
482 unsigned long pfn = page_to_pfn(page);
483 unsigned long sp, start_pfn;
484
485 do {
486 seq = zone_span_seqbegin(zone);
487 start_pfn = zone->zone_start_pfn;
488 sp = zone->spanned_pages;
489 if (!zone_spans_pfn(zone, pfn))
490 ret = 1;
491 } while (zone_span_seqretry(zone, seq));
492
493 if (ret)
494 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
495 pfn, zone_to_nid(zone), zone->name,
496 start_pfn, start_pfn + sp);
497
498 return ret;
499}
500
501static int page_is_consistent(struct zone *zone, struct page *page)
502{
503 if (!pfn_valid_within(page_to_pfn(page)))
504 return 0;
505 if (zone != page_zone(page))
506 return 0;
507
508 return 1;
509}
510
511
512
513static int __maybe_unused bad_range(struct zone *zone, struct page *page)
514{
515 if (page_outside_zone_boundaries(zone, page))
516 return 1;
517 if (!page_is_consistent(zone, page))
518 return 1;
519
520 return 0;
521}
522#else
523static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
524{
525 return 0;
526}
527#endif
528
529static void bad_page(struct page *page, const char *reason,
530 unsigned long bad_flags)
531{
532 static unsigned long resume;
533 static unsigned long nr_shown;
534 static unsigned long nr_unshown;
535
536
537
538
539
540 if (nr_shown == 60) {
541 if (time_before(jiffies, resume)) {
542 nr_unshown++;
543 goto out;
544 }
545 if (nr_unshown) {
546 pr_alert(
547 "BUG: Bad page state: %lu messages suppressed\n",
548 nr_unshown);
549 nr_unshown = 0;
550 }
551 nr_shown = 0;
552 }
553 if (nr_shown++ == 0)
554 resume = jiffies + 60 * HZ;
555
556 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
557 current->comm, page_to_pfn(page));
558 __dump_page(page, reason);
559 bad_flags &= page->flags;
560 if (bad_flags)
561 pr_alert("bad because of flags: %#lx(%pGp)\n",
562 bad_flags, &bad_flags);
563 dump_page_owner(page);
564
565 print_modules();
566 dump_stack();
567out:
568
569 page_mapcount_reset(page);
570 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
571}
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588void free_compound_page(struct page *page)
589{
590 __free_pages_ok(page, compound_order(page));
591}
592
593void prep_compound_page(struct page *page, unsigned int order)
594{
595 int i;
596 int nr_pages = 1 << order;
597
598 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
599 set_compound_order(page, order);
600 __SetPageHead(page);
601 for (i = 1; i < nr_pages; i++) {
602 struct page *p = page + i;
603 set_page_count(p, 0);
604 p->mapping = TAIL_MAPPING;
605 set_compound_head(p, page);
606 }
607 atomic_set(compound_mapcount_ptr(page), -1);
608}
609
610#ifdef CONFIG_DEBUG_PAGEALLOC
611unsigned int _debug_guardpage_minorder;
612bool _debug_pagealloc_enabled __read_mostly
613 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
614EXPORT_SYMBOL(_debug_pagealloc_enabled);
615bool _debug_guardpage_enabled __read_mostly;
616
617static int __init early_debug_pagealloc(char *buf)
618{
619 if (!buf)
620 return -EINVAL;
621 return kstrtobool(buf, &_debug_pagealloc_enabled);
622}
623early_param("debug_pagealloc", early_debug_pagealloc);
624
625static bool need_debug_guardpage(void)
626{
627
628 if (!debug_pagealloc_enabled())
629 return false;
630
631 if (!debug_guardpage_minorder())
632 return false;
633
634 return true;
635}
636
637static void init_debug_guardpage(void)
638{
639 if (!debug_pagealloc_enabled())
640 return;
641
642 if (!debug_guardpage_minorder())
643 return;
644
645 _debug_guardpage_enabled = true;
646}
647
648struct page_ext_operations debug_guardpage_ops = {
649 .need = need_debug_guardpage,
650 .init = init_debug_guardpage,
651};
652
653static int __init debug_guardpage_minorder_setup(char *buf)
654{
655 unsigned long res;
656
657 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
658 pr_err("Bad debug_guardpage_minorder value\n");
659 return 0;
660 }
661 _debug_guardpage_minorder = res;
662 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
663 return 0;
664}
665early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
666
667static inline bool set_page_guard(struct zone *zone, struct page *page,
668 unsigned int order, int migratetype)
669{
670 struct page_ext *page_ext;
671
672 if (!debug_guardpage_enabled())
673 return false;
674
675 if (order >= debug_guardpage_minorder())
676 return false;
677
678 page_ext = lookup_page_ext(page);
679 if (unlikely(!page_ext))
680 return false;
681
682 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
683
684 INIT_LIST_HEAD(&page->lru);
685 set_page_private(page, order);
686
687 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
688
689 return true;
690}
691
692static inline void clear_page_guard(struct zone *zone, struct page *page,
693 unsigned int order, int migratetype)
694{
695 struct page_ext *page_ext;
696
697 if (!debug_guardpage_enabled())
698 return;
699
700 page_ext = lookup_page_ext(page);
701 if (unlikely(!page_ext))
702 return;
703
704 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
705
706 set_page_private(page, 0);
707 if (!is_migrate_isolate(migratetype))
708 __mod_zone_freepage_state(zone, (1 << order), migratetype);
709}
710#else
711struct page_ext_operations debug_guardpage_ops;
712static inline bool set_page_guard(struct zone *zone, struct page *page,
713 unsigned int order, int migratetype) { return false; }
714static inline void clear_page_guard(struct zone *zone, struct page *page,
715 unsigned int order, int migratetype) {}
716#endif
717
718static inline void set_page_order(struct page *page, unsigned int order)
719{
720 set_page_private(page, order);
721 __SetPageBuddy(page);
722}
723
724static inline void rmv_page_order(struct page *page)
725{
726 __ClearPageBuddy(page);
727 set_page_private(page, 0);
728}
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745static inline int page_is_buddy(struct page *page, struct page *buddy,
746 unsigned int order)
747{
748 if (page_is_guard(buddy) && page_order(buddy) == order) {
749 if (page_zone_id(page) != page_zone_id(buddy))
750 return 0;
751
752 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
753
754 return 1;
755 }
756
757 if (PageBuddy(buddy) && page_order(buddy) == order) {
758
759
760
761
762
763 if (page_zone_id(page) != page_zone_id(buddy))
764 return 0;
765
766 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
767
768 return 1;
769 }
770 return 0;
771}
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798static inline void __free_one_page(struct page *page,
799 unsigned long pfn,
800 struct zone *zone, unsigned int order,
801 int migratetype)
802{
803 unsigned long combined_pfn;
804 unsigned long uninitialized_var(buddy_pfn);
805 struct page *buddy;
806 unsigned int max_order;
807
808 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
809
810 VM_BUG_ON(!zone_is_initialized(zone));
811 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
812
813 VM_BUG_ON(migratetype == -1);
814 if (likely(!is_migrate_isolate(migratetype)))
815 __mod_zone_freepage_state(zone, 1 << order, migratetype);
816
817 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
818 VM_BUG_ON_PAGE(bad_range(zone, page), page);
819
820continue_merging:
821 while (order < max_order - 1) {
822 buddy_pfn = __find_buddy_pfn(pfn, order);
823 buddy = page + (buddy_pfn - pfn);
824
825 if (!pfn_valid_within(buddy_pfn))
826 goto done_merging;
827 if (!page_is_buddy(page, buddy, order))
828 goto done_merging;
829
830
831
832
833 if (page_is_guard(buddy)) {
834 clear_page_guard(zone, buddy, order, migratetype);
835 } else {
836 list_del(&buddy->lru);
837 zone->free_area[order].nr_free--;
838 rmv_page_order(buddy);
839 }
840 combined_pfn = buddy_pfn & pfn;
841 page = page + (combined_pfn - pfn);
842 pfn = combined_pfn;
843 order++;
844 }
845 if (max_order < MAX_ORDER) {
846
847
848
849
850
851
852
853
854 if (unlikely(has_isolate_pageblock(zone))) {
855 int buddy_mt;
856
857 buddy_pfn = __find_buddy_pfn(pfn, order);
858 buddy = page + (buddy_pfn - pfn);
859 buddy_mt = get_pageblock_migratetype(buddy);
860
861 if (migratetype != buddy_mt
862 && (is_migrate_isolate(migratetype) ||
863 is_migrate_isolate(buddy_mt)))
864 goto done_merging;
865 }
866 max_order++;
867 goto continue_merging;
868 }
869
870done_merging:
871 set_page_order(page, order);
872
873
874
875
876
877
878
879
880
881 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
882 struct page *higher_page, *higher_buddy;
883 combined_pfn = buddy_pfn & pfn;
884 higher_page = page + (combined_pfn - pfn);
885 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
886 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
887 if (pfn_valid_within(buddy_pfn) &&
888 page_is_buddy(higher_page, higher_buddy, order + 1)) {
889 list_add_tail(&page->lru,
890 &zone->free_area[order].free_list[migratetype]);
891 goto out;
892 }
893 }
894
895 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
896out:
897 zone->free_area[order].nr_free++;
898}
899
900
901
902
903
904
905static inline bool page_expected_state(struct page *page,
906 unsigned long check_flags)
907{
908 if (unlikely(atomic_read(&page->_mapcount) != -1))
909 return false;
910
911 if (unlikely((unsigned long)page->mapping |
912 page_ref_count(page) |
913#ifdef CONFIG_MEMCG
914 (unsigned long)page->mem_cgroup |
915#endif
916 (page->flags & check_flags)))
917 return false;
918
919 return true;
920}
921
922static void free_pages_check_bad(struct page *page)
923{
924 const char *bad_reason;
925 unsigned long bad_flags;
926
927 bad_reason = NULL;
928 bad_flags = 0;
929
930 if (unlikely(atomic_read(&page->_mapcount) != -1))
931 bad_reason = "nonzero mapcount";
932 if (unlikely(page->mapping != NULL))
933 bad_reason = "non-NULL mapping";
934 if (unlikely(page_ref_count(page) != 0))
935 bad_reason = "nonzero _refcount";
936 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
937 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
938 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
939 }
940#ifdef CONFIG_MEMCG
941 if (unlikely(page->mem_cgroup))
942 bad_reason = "page still charged to cgroup";
943#endif
944 bad_page(page, bad_reason, bad_flags);
945}
946
947static inline int free_pages_check(struct page *page)
948{
949 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
950 return 0;
951
952
953 free_pages_check_bad(page);
954 return 1;
955}
956
957static int free_tail_pages_check(struct page *head_page, struct page *page)
958{
959 int ret = 1;
960
961
962
963
964
965 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
966
967 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
968 ret = 0;
969 goto out;
970 }
971 switch (page - head_page) {
972 case 1:
973
974 if (unlikely(compound_mapcount(page))) {
975 bad_page(page, "nonzero compound_mapcount", 0);
976 goto out;
977 }
978 break;
979 case 2:
980
981
982
983
984 break;
985 default:
986 if (page->mapping != TAIL_MAPPING) {
987 bad_page(page, "corrupted mapping in tail page", 0);
988 goto out;
989 }
990 break;
991 }
992 if (unlikely(!PageTail(page))) {
993 bad_page(page, "PageTail not set", 0);
994 goto out;
995 }
996 if (unlikely(compound_head(page) != head_page)) {
997 bad_page(page, "compound_head not consistent", 0);
998 goto out;
999 }
1000 ret = 0;
1001out:
1002 page->mapping = NULL;
1003 clear_compound_head(page);
1004 return ret;
1005}
1006
1007static __always_inline bool free_pages_prepare(struct page *page,
1008 unsigned int order, bool check_free)
1009{
1010 int bad = 0;
1011
1012 VM_BUG_ON_PAGE(PageTail(page), page);
1013
1014 trace_mm_page_free(page, order);
1015 kmemcheck_free_shadow(page, order);
1016
1017
1018
1019
1020
1021 if (unlikely(order)) {
1022 bool compound = PageCompound(page);
1023 int i;
1024
1025 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1026
1027 if (compound)
1028 ClearPageDoubleMap(page);
1029 for (i = 1; i < (1 << order); i++) {
1030 if (compound)
1031 bad += free_tail_pages_check(page, page + i);
1032 if (unlikely(free_pages_check(page + i))) {
1033 bad++;
1034 continue;
1035 }
1036 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1037 }
1038 }
1039 if (PageMappingFlags(page))
1040 page->mapping = NULL;
1041 if (memcg_kmem_enabled() && PageKmemcg(page))
1042 memcg_kmem_uncharge(page, order);
1043 if (check_free)
1044 bad += free_pages_check(page);
1045 if (bad)
1046 return false;
1047
1048 page_cpupid_reset_last(page);
1049 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1050 reset_page_owner(page, order);
1051
1052 if (!PageHighMem(page)) {
1053 debug_check_no_locks_freed(page_address(page),
1054 PAGE_SIZE << order);
1055 debug_check_no_obj_freed(page_address(page),
1056 PAGE_SIZE << order);
1057 }
1058 arch_free_page(page, order);
1059 kernel_poison_pages(page, 1 << order, 0);
1060 kernel_map_pages(page, 1 << order, 0);
1061 kasan_free_pages(page, order);
1062
1063 return true;
1064}
1065
1066#ifdef CONFIG_DEBUG_VM
1067static inline bool free_pcp_prepare(struct page *page)
1068{
1069 return free_pages_prepare(page, 0, true);
1070}
1071
1072static inline bool bulkfree_pcp_prepare(struct page *page)
1073{
1074 return false;
1075}
1076#else
1077static bool free_pcp_prepare(struct page *page)
1078{
1079 return free_pages_prepare(page, 0, false);
1080}
1081
1082static bool bulkfree_pcp_prepare(struct page *page)
1083{
1084 return free_pages_check(page);
1085}
1086#endif
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099static void free_pcppages_bulk(struct zone *zone, int count,
1100 struct per_cpu_pages *pcp)
1101{
1102 int migratetype = 0;
1103 int batch_free = 0;
1104 bool isolated_pageblocks;
1105
1106 spin_lock(&zone->lock);
1107 isolated_pageblocks = has_isolate_pageblock(zone);
1108
1109 while (count) {
1110 struct page *page;
1111 struct list_head *list;
1112
1113
1114
1115
1116
1117
1118
1119
1120 do {
1121 batch_free++;
1122 if (++migratetype == MIGRATE_PCPTYPES)
1123 migratetype = 0;
1124 list = &pcp->lists[migratetype];
1125 } while (list_empty(list));
1126
1127
1128 if (batch_free == MIGRATE_PCPTYPES)
1129 batch_free = count;
1130
1131 do {
1132 int mt;
1133
1134 page = list_last_entry(list, struct page, lru);
1135
1136 list_del(&page->lru);
1137
1138 mt = get_pcppage_migratetype(page);
1139
1140 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1141
1142 if (unlikely(isolated_pageblocks))
1143 mt = get_pageblock_migratetype(page);
1144
1145 if (bulkfree_pcp_prepare(page))
1146 continue;
1147
1148 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1149 trace_mm_page_pcpu_drain(page, 0, mt);
1150 } while (--count && --batch_free && !list_empty(list));
1151 }
1152 spin_unlock(&zone->lock);
1153}
1154
1155static void free_one_page(struct zone *zone,
1156 struct page *page, unsigned long pfn,
1157 unsigned int order,
1158 int migratetype)
1159{
1160 spin_lock(&zone->lock);
1161 if (unlikely(has_isolate_pageblock(zone) ||
1162 is_migrate_isolate(migratetype))) {
1163 migratetype = get_pfnblock_migratetype(page, pfn);
1164 }
1165 __free_one_page(page, pfn, zone, order, migratetype);
1166 spin_unlock(&zone->lock);
1167}
1168
1169static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1170 unsigned long zone, int nid)
1171{
1172 set_page_links(page, zone, nid, pfn);
1173 init_page_count(page);
1174 page_mapcount_reset(page);
1175 page_cpupid_reset_last(page);
1176
1177 INIT_LIST_HEAD(&page->lru);
1178#ifdef WANT_PAGE_VIRTUAL
1179
1180 if (!is_highmem_idx(zone))
1181 set_page_address(page, __va(pfn << PAGE_SHIFT));
1182#endif
1183}
1184
1185static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1186 int nid)
1187{
1188 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1189}
1190
1191#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1192static void init_reserved_page(unsigned long pfn)
1193{
1194 pg_data_t *pgdat;
1195 int nid, zid;
1196
1197 if (!early_page_uninitialised(pfn))
1198 return;
1199
1200 nid = early_pfn_to_nid(pfn);
1201 pgdat = NODE_DATA(nid);
1202
1203 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1204 struct zone *zone = &pgdat->node_zones[zid];
1205
1206 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1207 break;
1208 }
1209 __init_single_pfn(pfn, zid, nid);
1210}
1211#else
1212static inline void init_reserved_page(unsigned long pfn)
1213{
1214}
1215#endif
1216
1217
1218
1219
1220
1221
1222
1223void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1224{
1225 unsigned long start_pfn = PFN_DOWN(start);
1226 unsigned long end_pfn = PFN_UP(end);
1227
1228 for (; start_pfn < end_pfn; start_pfn++) {
1229 if (pfn_valid(start_pfn)) {
1230 struct page *page = pfn_to_page(start_pfn);
1231
1232 init_reserved_page(start_pfn);
1233
1234
1235 INIT_LIST_HEAD(&page->lru);
1236
1237 SetPageReserved(page);
1238 }
1239 }
1240}
1241
1242static void __free_pages_ok(struct page *page, unsigned int order)
1243{
1244 unsigned long flags;
1245 int migratetype;
1246 unsigned long pfn = page_to_pfn(page);
1247
1248 if (!free_pages_prepare(page, order, true))
1249 return;
1250
1251 migratetype = get_pfnblock_migratetype(page, pfn);
1252 local_irq_save(flags);
1253 __count_vm_events(PGFREE, 1 << order);
1254 free_one_page(page_zone(page), page, pfn, order, migratetype);
1255 local_irq_restore(flags);
1256}
1257
1258static void __init __free_pages_boot_core(struct page *page, unsigned int order)
1259{
1260 unsigned int nr_pages = 1 << order;
1261 struct page *p = page;
1262 unsigned int loop;
1263
1264 prefetchw(p);
1265 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1266 prefetchw(p + 1);
1267 __ClearPageReserved(p);
1268 set_page_count(p, 0);
1269 }
1270 __ClearPageReserved(p);
1271 set_page_count(p, 0);
1272
1273 page_zone(page)->managed_pages += nr_pages;
1274 set_page_refcounted(page);
1275 __free_pages(page, order);
1276}
1277
1278#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1279 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1280
1281static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1282
1283int __meminit early_pfn_to_nid(unsigned long pfn)
1284{
1285 static DEFINE_SPINLOCK(early_pfn_lock);
1286 int nid;
1287
1288 spin_lock(&early_pfn_lock);
1289 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1290 if (nid < 0)
1291 nid = first_online_node;
1292 spin_unlock(&early_pfn_lock);
1293
1294 return nid;
1295}
1296#endif
1297
1298#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1299static inline bool __meminit __maybe_unused
1300meminit_pfn_in_nid(unsigned long pfn, int node,
1301 struct mminit_pfnnid_cache *state)
1302{
1303 int nid;
1304
1305 nid = __early_pfn_to_nid(pfn, state);
1306 if (nid >= 0 && nid != node)
1307 return false;
1308 return true;
1309}
1310
1311
1312static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1313{
1314 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1315}
1316
1317#else
1318
1319static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1320{
1321 return true;
1322}
1323static inline bool __meminit __maybe_unused
1324meminit_pfn_in_nid(unsigned long pfn, int node,
1325 struct mminit_pfnnid_cache *state)
1326{
1327 return true;
1328}
1329#endif
1330
1331
1332void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
1333 unsigned int order)
1334{
1335 if (early_page_uninitialised(pfn))
1336 return;
1337 return __free_pages_boot_core(page, order);
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1358 unsigned long end_pfn, struct zone *zone)
1359{
1360 struct page *start_page;
1361 struct page *end_page;
1362
1363
1364 end_pfn--;
1365
1366 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1367 return NULL;
1368
1369 start_page = pfn_to_online_page(start_pfn);
1370 if (!start_page)
1371 return NULL;
1372
1373 if (page_zone(start_page) != zone)
1374 return NULL;
1375
1376 end_page = pfn_to_page(end_pfn);
1377
1378
1379 if (page_zone_id(start_page) != page_zone_id(end_page))
1380 return NULL;
1381
1382 return start_page;
1383}
1384
1385void set_zone_contiguous(struct zone *zone)
1386{
1387 unsigned long block_start_pfn = zone->zone_start_pfn;
1388 unsigned long block_end_pfn;
1389
1390 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1391 for (; block_start_pfn < zone_end_pfn(zone);
1392 block_start_pfn = block_end_pfn,
1393 block_end_pfn += pageblock_nr_pages) {
1394
1395 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1396
1397 if (!__pageblock_pfn_to_page(block_start_pfn,
1398 block_end_pfn, zone))
1399 return;
1400 }
1401
1402
1403 zone->contiguous = true;
1404}
1405
1406void clear_zone_contiguous(struct zone *zone)
1407{
1408 zone->contiguous = false;
1409}
1410
1411#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1412static void __init deferred_free_range(struct page *page,
1413 unsigned long pfn, int nr_pages)
1414{
1415 int i;
1416
1417 if (!page)
1418 return;
1419
1420
1421 if (nr_pages == pageblock_nr_pages &&
1422 (pfn & (pageblock_nr_pages - 1)) == 0) {
1423 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1424 __free_pages_boot_core(page, pageblock_order);
1425 return;
1426 }
1427
1428 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1429 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1430 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1431 __free_pages_boot_core(page, 0);
1432 }
1433}
1434
1435
1436static atomic_t pgdat_init_n_undone __initdata;
1437static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1438
1439static inline void __init pgdat_init_report_one_done(void)
1440{
1441 if (atomic_dec_and_test(&pgdat_init_n_undone))
1442 complete(&pgdat_init_all_done_comp);
1443}
1444
1445
1446static int __init deferred_init_memmap(void *data)
1447{
1448 pg_data_t *pgdat = data;
1449 int nid = pgdat->node_id;
1450 struct mminit_pfnnid_cache nid_init_state = { };
1451 unsigned long start = jiffies;
1452 unsigned long nr_pages = 0;
1453 unsigned long walk_start, walk_end;
1454 int i, zid;
1455 struct zone *zone;
1456 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
1457 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1458
1459 if (first_init_pfn == ULONG_MAX) {
1460 pgdat_init_report_one_done();
1461 return 0;
1462 }
1463
1464
1465 if (!cpumask_empty(cpumask))
1466 set_cpus_allowed_ptr(current, cpumask);
1467
1468
1469 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1470 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1471 pgdat->first_deferred_pfn = ULONG_MAX;
1472
1473
1474 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1475 zone = pgdat->node_zones + zid;
1476 if (first_init_pfn < zone_end_pfn(zone))
1477 break;
1478 }
1479
1480 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1481 unsigned long pfn, end_pfn;
1482 struct page *page = NULL;
1483 struct page *free_base_page = NULL;
1484 unsigned long free_base_pfn = 0;
1485 int nr_to_free = 0;
1486
1487 end_pfn = min(walk_end, zone_end_pfn(zone));
1488 pfn = first_init_pfn;
1489 if (pfn < walk_start)
1490 pfn = walk_start;
1491 if (pfn < zone->zone_start_pfn)
1492 pfn = zone->zone_start_pfn;
1493
1494 for (; pfn < end_pfn; pfn++) {
1495 if (!pfn_valid_within(pfn))
1496 goto free_range;
1497
1498
1499
1500
1501
1502 if ((pfn & (pageblock_nr_pages - 1)) == 0) {
1503 if (!pfn_valid(pfn)) {
1504 page = NULL;
1505 goto free_range;
1506 }
1507 }
1508
1509 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1510 page = NULL;
1511 goto free_range;
1512 }
1513
1514
1515 if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
1516 page++;
1517 } else {
1518 nr_pages += nr_to_free;
1519 deferred_free_range(free_base_page,
1520 free_base_pfn, nr_to_free);
1521 free_base_page = NULL;
1522 free_base_pfn = nr_to_free = 0;
1523
1524 page = pfn_to_page(pfn);
1525 cond_resched();
1526 }
1527
1528 if (page->flags) {
1529 VM_BUG_ON(page_zone(page) != zone);
1530 goto free_range;
1531 }
1532
1533 __init_single_page(page, pfn, zid, nid);
1534 if (!free_base_page) {
1535 free_base_page = page;
1536 free_base_pfn = pfn;
1537 nr_to_free = 0;
1538 }
1539 nr_to_free++;
1540
1541
1542 continue;
1543free_range:
1544
1545 nr_pages += nr_to_free;
1546 deferred_free_range(free_base_page, free_base_pfn,
1547 nr_to_free);
1548 free_base_page = NULL;
1549 free_base_pfn = nr_to_free = 0;
1550 }
1551
1552 nr_pages += nr_to_free;
1553 deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
1554
1555 first_init_pfn = max(end_pfn, first_init_pfn);
1556 }
1557
1558
1559 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1560
1561 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1562 jiffies_to_msecs(jiffies - start));
1563
1564 pgdat_init_report_one_done();
1565 return 0;
1566}
1567#endif
1568
1569void __init page_alloc_init_late(void)
1570{
1571 struct zone *zone;
1572
1573#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1574 int nid;
1575
1576
1577 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1578 for_each_node_state(nid, N_MEMORY) {
1579 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1580 }
1581
1582
1583 wait_for_completion(&pgdat_init_all_done_comp);
1584
1585
1586 files_maxfiles_init();
1587#endif
1588#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1589
1590 memblock_discard();
1591#endif
1592
1593 for_each_populated_zone(zone)
1594 set_zone_contiguous(zone);
1595}
1596
1597#ifdef CONFIG_CMA
1598
1599void __init init_cma_reserved_pageblock(struct page *page)
1600{
1601 unsigned i = pageblock_nr_pages;
1602 struct page *p = page;
1603
1604 do {
1605 __ClearPageReserved(p);
1606 set_page_count(p, 0);
1607 } while (++p, --i);
1608
1609 set_pageblock_migratetype(page, MIGRATE_CMA);
1610
1611 if (pageblock_order >= MAX_ORDER) {
1612 i = pageblock_nr_pages;
1613 p = page;
1614 do {
1615 set_page_refcounted(p);
1616 __free_pages(p, MAX_ORDER - 1);
1617 p += MAX_ORDER_NR_PAGES;
1618 } while (i -= MAX_ORDER_NR_PAGES);
1619 } else {
1620 set_page_refcounted(page);
1621 __free_pages(page, pageblock_order);
1622 }
1623
1624 adjust_managed_page_count(page, pageblock_nr_pages);
1625}
1626#endif
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642static inline void expand(struct zone *zone, struct page *page,
1643 int low, int high, struct free_area *area,
1644 int migratetype)
1645{
1646 unsigned long size = 1 << high;
1647
1648 while (high > low) {
1649 area--;
1650 high--;
1651 size >>= 1;
1652 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1653
1654
1655
1656
1657
1658
1659
1660 if (set_page_guard(zone, &page[size], high, migratetype))
1661 continue;
1662
1663 list_add(&page[size].lru, &area->free_list[migratetype]);
1664 area->nr_free++;
1665 set_page_order(&page[size], high);
1666 }
1667}
1668
1669static void check_new_page_bad(struct page *page)
1670{
1671 const char *bad_reason = NULL;
1672 unsigned long bad_flags = 0;
1673
1674 if (unlikely(atomic_read(&page->_mapcount) != -1))
1675 bad_reason = "nonzero mapcount";
1676 if (unlikely(page->mapping != NULL))
1677 bad_reason = "non-NULL mapping";
1678 if (unlikely(page_ref_count(page) != 0))
1679 bad_reason = "nonzero _count";
1680 if (unlikely(page->flags & __PG_HWPOISON)) {
1681 bad_reason = "HWPoisoned (hardware-corrupted)";
1682 bad_flags = __PG_HWPOISON;
1683
1684 page_mapcount_reset(page);
1685 return;
1686 }
1687 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1688 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1689 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1690 }
1691#ifdef CONFIG_MEMCG
1692 if (unlikely(page->mem_cgroup))
1693 bad_reason = "page still charged to cgroup";
1694#endif
1695 bad_page(page, bad_reason, bad_flags);
1696}
1697
1698
1699
1700
1701static inline int check_new_page(struct page *page)
1702{
1703 if (likely(page_expected_state(page,
1704 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1705 return 0;
1706
1707 check_new_page_bad(page);
1708 return 1;
1709}
1710
1711static inline bool free_pages_prezeroed(void)
1712{
1713 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1714 page_poisoning_enabled();
1715}
1716
1717#ifdef CONFIG_DEBUG_VM
1718static bool check_pcp_refill(struct page *page)
1719{
1720 return false;
1721}
1722
1723static bool check_new_pcp(struct page *page)
1724{
1725 return check_new_page(page);
1726}
1727#else
1728static bool check_pcp_refill(struct page *page)
1729{
1730 return check_new_page(page);
1731}
1732static bool check_new_pcp(struct page *page)
1733{
1734 return false;
1735}
1736#endif
1737
1738static bool check_new_pages(struct page *page, unsigned int order)
1739{
1740 int i;
1741 for (i = 0; i < (1 << order); i++) {
1742 struct page *p = page + i;
1743
1744 if (unlikely(check_new_page(p)))
1745 return true;
1746 }
1747
1748 return false;
1749}
1750
1751inline void post_alloc_hook(struct page *page, unsigned int order,
1752 gfp_t gfp_flags)
1753{
1754 set_page_private(page, 0);
1755 set_page_refcounted(page);
1756
1757 arch_alloc_page(page, order);
1758 kernel_map_pages(page, 1 << order, 1);
1759 kernel_poison_pages(page, 1 << order, 1);
1760 kasan_alloc_pages(page, order);
1761 set_page_owner(page, order, gfp_flags);
1762}
1763
1764static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1765 unsigned int alloc_flags)
1766{
1767 int i;
1768
1769 post_alloc_hook(page, order, gfp_flags);
1770
1771 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
1772 for (i = 0; i < (1 << order); i++)
1773 clear_highpage(page + i);
1774
1775 if (order && (gfp_flags & __GFP_COMP))
1776 prep_compound_page(page, order);
1777
1778
1779
1780
1781
1782
1783
1784 if (alloc_flags & ALLOC_NO_WATERMARKS)
1785 set_page_pfmemalloc(page);
1786 else
1787 clear_page_pfmemalloc(page);
1788}
1789
1790
1791
1792
1793
1794static inline
1795struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1796 int migratetype)
1797{
1798 unsigned int current_order;
1799 struct free_area *area;
1800 struct page *page;
1801
1802
1803 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1804 area = &(zone->free_area[current_order]);
1805 page = list_first_entry_or_null(&area->free_list[migratetype],
1806 struct page, lru);
1807 if (!page)
1808 continue;
1809 list_del(&page->lru);
1810 rmv_page_order(page);
1811 area->nr_free--;
1812 expand(zone, page, order, current_order, area, migratetype);
1813 set_pcppage_migratetype(page, migratetype);
1814 return page;
1815 }
1816
1817 return NULL;
1818}
1819
1820
1821
1822
1823
1824
1825static int fallbacks[MIGRATE_TYPES][4] = {
1826 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1827 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1828 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
1829#ifdef CONFIG_CMA
1830 [MIGRATE_CMA] = { MIGRATE_TYPES },
1831#endif
1832#ifdef CONFIG_MEMORY_ISOLATION
1833 [MIGRATE_ISOLATE] = { MIGRATE_TYPES },
1834#endif
1835};
1836
1837#ifdef CONFIG_CMA
1838static struct page *__rmqueue_cma_fallback(struct zone *zone,
1839 unsigned int order)
1840{
1841 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1842}
1843#else
1844static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1845 unsigned int order) { return NULL; }
1846#endif
1847
1848
1849
1850
1851
1852
1853static int move_freepages(struct zone *zone,
1854 struct page *start_page, struct page *end_page,
1855 int migratetype, int *num_movable)
1856{
1857 struct page *page;
1858 unsigned int order;
1859 int pages_moved = 0;
1860
1861#ifndef CONFIG_HOLES_IN_ZONE
1862
1863
1864
1865
1866
1867
1868
1869 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1870#endif
1871
1872 if (num_movable)
1873 *num_movable = 0;
1874
1875 for (page = start_page; page <= end_page;) {
1876 if (!pfn_valid_within(page_to_pfn(page))) {
1877 page++;
1878 continue;
1879 }
1880
1881
1882 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1883
1884 if (!PageBuddy(page)) {
1885
1886
1887
1888
1889
1890 if (num_movable &&
1891 (PageLRU(page) || __PageMovable(page)))
1892 (*num_movable)++;
1893
1894 page++;
1895 continue;
1896 }
1897
1898 order = page_order(page);
1899 list_move(&page->lru,
1900 &zone->free_area[order].free_list[migratetype]);
1901 page += 1 << order;
1902 pages_moved += 1 << order;
1903 }
1904
1905 return pages_moved;
1906}
1907
1908int move_freepages_block(struct zone *zone, struct page *page,
1909 int migratetype, int *num_movable)
1910{
1911 unsigned long start_pfn, end_pfn;
1912 struct page *start_page, *end_page;
1913
1914 start_pfn = page_to_pfn(page);
1915 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1916 start_page = pfn_to_page(start_pfn);
1917 end_page = start_page + pageblock_nr_pages - 1;
1918 end_pfn = start_pfn + pageblock_nr_pages - 1;
1919
1920
1921 if (!zone_spans_pfn(zone, start_pfn))
1922 start_page = page;
1923 if (!zone_spans_pfn(zone, end_pfn))
1924 return 0;
1925
1926 return move_freepages(zone, start_page, end_page, migratetype,
1927 num_movable);
1928}
1929
1930static void change_pageblock_range(struct page *pageblock_page,
1931 int start_order, int migratetype)
1932{
1933 int nr_pageblocks = 1 << (start_order - pageblock_order);
1934
1935 while (nr_pageblocks--) {
1936 set_pageblock_migratetype(pageblock_page, migratetype);
1937 pageblock_page += pageblock_nr_pages;
1938 }
1939}
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953static bool can_steal_fallback(unsigned int order, int start_mt)
1954{
1955
1956
1957
1958
1959
1960
1961
1962 if (order >= pageblock_order)
1963 return true;
1964
1965 if (order >= pageblock_order / 2 ||
1966 start_mt == MIGRATE_RECLAIMABLE ||
1967 start_mt == MIGRATE_UNMOVABLE ||
1968 page_group_by_mobility_disabled)
1969 return true;
1970
1971 return false;
1972}
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982static void steal_suitable_fallback(struct zone *zone, struct page *page,
1983 int start_type, bool whole_block)
1984{
1985 unsigned int current_order = page_order(page);
1986 struct free_area *area;
1987 int free_pages, movable_pages, alike_pages;
1988 int old_block_type;
1989
1990 old_block_type = get_pageblock_migratetype(page);
1991
1992
1993
1994
1995
1996 if (is_migrate_highatomic(old_block_type))
1997 goto single_page;
1998
1999
2000 if (current_order >= pageblock_order) {
2001 change_pageblock_range(page, current_order, start_type);
2002 goto single_page;
2003 }
2004
2005
2006 if (!whole_block)
2007 goto single_page;
2008
2009 free_pages = move_freepages_block(zone, page, start_type,
2010 &movable_pages);
2011
2012
2013
2014
2015
2016 if (start_type == MIGRATE_MOVABLE) {
2017 alike_pages = movable_pages;
2018 } else {
2019
2020
2021
2022
2023
2024
2025
2026 if (old_block_type == MIGRATE_MOVABLE)
2027 alike_pages = pageblock_nr_pages
2028 - (free_pages + movable_pages);
2029 else
2030 alike_pages = 0;
2031 }
2032
2033
2034 if (!free_pages)
2035 goto single_page;
2036
2037
2038
2039
2040
2041 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2042 page_group_by_mobility_disabled)
2043 set_pageblock_migratetype(page, start_type);
2044
2045 return;
2046
2047single_page:
2048 area = &zone->free_area[current_order];
2049 list_move(&page->lru, &area->free_list[start_type]);
2050}
2051
2052
2053
2054
2055
2056
2057
2058int find_suitable_fallback(struct free_area *area, unsigned int order,
2059 int migratetype, bool only_stealable, bool *can_steal)
2060{
2061 int i;
2062 int fallback_mt;
2063
2064 if (area->nr_free == 0)
2065 return -1;
2066
2067 *can_steal = false;
2068 for (i = 0;; i++) {
2069 fallback_mt = fallbacks[migratetype][i];
2070 if (fallback_mt == MIGRATE_TYPES)
2071 break;
2072
2073 if (list_empty(&area->free_list[fallback_mt]))
2074 continue;
2075
2076 if (can_steal_fallback(order, migratetype))
2077 *can_steal = true;
2078
2079 if (!only_stealable)
2080 return fallback_mt;
2081
2082 if (*can_steal)
2083 return fallback_mt;
2084 }
2085
2086 return -1;
2087}
2088
2089
2090
2091
2092
2093static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2094 unsigned int alloc_order)
2095{
2096 int mt;
2097 unsigned long max_managed, flags;
2098
2099
2100
2101
2102
2103 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2104 if (zone->nr_reserved_highatomic >= max_managed)
2105 return;
2106
2107 spin_lock_irqsave(&zone->lock, flags);
2108
2109
2110 if (zone->nr_reserved_highatomic >= max_managed)
2111 goto out_unlock;
2112
2113
2114 mt = get_pageblock_migratetype(page);
2115 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2116 && !is_migrate_cma(mt)) {
2117 zone->nr_reserved_highatomic += pageblock_nr_pages;
2118 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2119 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2120 }
2121
2122out_unlock:
2123 spin_unlock_irqrestore(&zone->lock, flags);
2124}
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2136 bool force)
2137{
2138 struct zonelist *zonelist = ac->zonelist;
2139 unsigned long flags;
2140 struct zoneref *z;
2141 struct zone *zone;
2142 struct page *page;
2143 int order;
2144 bool ret;
2145
2146 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2147 ac->nodemask) {
2148
2149
2150
2151
2152 if (!force && zone->nr_reserved_highatomic <=
2153 pageblock_nr_pages)
2154 continue;
2155
2156 spin_lock_irqsave(&zone->lock, flags);
2157 for (order = 0; order < MAX_ORDER; order++) {
2158 struct free_area *area = &(zone->free_area[order]);
2159
2160 page = list_first_entry_or_null(
2161 &area->free_list[MIGRATE_HIGHATOMIC],
2162 struct page, lru);
2163 if (!page)
2164 continue;
2165
2166
2167
2168
2169
2170
2171
2172
2173 if (is_migrate_highatomic_page(page)) {
2174
2175
2176
2177
2178
2179
2180
2181 zone->nr_reserved_highatomic -= min(
2182 pageblock_nr_pages,
2183 zone->nr_reserved_highatomic);
2184 }
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195 set_pageblock_migratetype(page, ac->migratetype);
2196 ret = move_freepages_block(zone, page, ac->migratetype,
2197 NULL);
2198 if (ret) {
2199 spin_unlock_irqrestore(&zone->lock, flags);
2200 return ret;
2201 }
2202 }
2203 spin_unlock_irqrestore(&zone->lock, flags);
2204 }
2205
2206 return false;
2207}
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219static inline bool
2220__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
2221{
2222 struct free_area *area;
2223 int current_order;
2224 struct page *page;
2225 int fallback_mt;
2226 bool can_steal;
2227
2228
2229
2230
2231
2232
2233 for (current_order = MAX_ORDER - 1; current_order >= order;
2234 --current_order) {
2235 area = &(zone->free_area[current_order]);
2236 fallback_mt = find_suitable_fallback(area, current_order,
2237 start_migratetype, false, &can_steal);
2238 if (fallback_mt == -1)
2239 continue;
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2250 && current_order > order)
2251 goto find_smallest;
2252
2253 goto do_steal;
2254 }
2255
2256 return false;
2257
2258find_smallest:
2259 for (current_order = order; current_order < MAX_ORDER;
2260 current_order++) {
2261 area = &(zone->free_area[current_order]);
2262 fallback_mt = find_suitable_fallback(area, current_order,
2263 start_migratetype, false, &can_steal);
2264 if (fallback_mt != -1)
2265 break;
2266 }
2267
2268
2269
2270
2271
2272 VM_BUG_ON(current_order == MAX_ORDER);
2273
2274do_steal:
2275 page = list_first_entry(&area->free_list[fallback_mt],
2276 struct page, lru);
2277
2278 steal_suitable_fallback(zone, page, start_migratetype, can_steal);
2279
2280 trace_mm_page_alloc_extfrag(page, order, current_order,
2281 start_migratetype, fallback_mt);
2282
2283 return true;
2284
2285}
2286
2287
2288
2289
2290
2291static struct page *__rmqueue(struct zone *zone, unsigned int order,
2292 int migratetype)
2293{
2294 struct page *page;
2295
2296retry:
2297 page = __rmqueue_smallest(zone, order, migratetype);
2298 if (unlikely(!page)) {
2299 if (migratetype == MIGRATE_MOVABLE)
2300 page = __rmqueue_cma_fallback(zone, order);
2301
2302 if (!page && __rmqueue_fallback(zone, order, migratetype))
2303 goto retry;
2304 }
2305
2306 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2307 return page;
2308}
2309
2310
2311
2312
2313
2314
2315static int rmqueue_bulk(struct zone *zone, unsigned int order,
2316 unsigned long count, struct list_head *list,
2317 int migratetype, bool cold)
2318{
2319 int i, alloced = 0;
2320
2321 spin_lock(&zone->lock);
2322 for (i = 0; i < count; ++i) {
2323 struct page *page = __rmqueue(zone, order, migratetype);
2324 if (unlikely(page == NULL))
2325 break;
2326
2327 if (unlikely(check_pcp_refill(page)))
2328 continue;
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339 if (likely(!cold))
2340 list_add(&page->lru, list);
2341 else
2342 list_add_tail(&page->lru, list);
2343 list = &page->lru;
2344 alloced++;
2345 if (is_migrate_cma(get_pcppage_migratetype(page)))
2346 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2347 -(1 << order));
2348 }
2349
2350
2351
2352
2353
2354
2355
2356 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2357 spin_unlock(&zone->lock);
2358 return alloced;
2359}
2360
2361#ifdef CONFIG_NUMA
2362
2363
2364
2365
2366
2367
2368
2369
2370void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2371{
2372 unsigned long flags;
2373 int to_drain, batch;
2374
2375 local_irq_save(flags);
2376 batch = READ_ONCE(pcp->batch);
2377 to_drain = min(pcp->count, batch);
2378 if (to_drain > 0) {
2379 free_pcppages_bulk(zone, to_drain, pcp);
2380 pcp->count -= to_drain;
2381 }
2382 local_irq_restore(flags);
2383}
2384#endif
2385
2386
2387
2388
2389
2390
2391
2392
2393static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2394{
2395 unsigned long flags;
2396 struct per_cpu_pageset *pset;
2397 struct per_cpu_pages *pcp;
2398
2399 local_irq_save(flags);
2400 pset = per_cpu_ptr(zone->pageset, cpu);
2401
2402 pcp = &pset->pcp;
2403 if (pcp->count) {
2404 free_pcppages_bulk(zone, pcp->count, pcp);
2405 pcp->count = 0;
2406 }
2407 local_irq_restore(flags);
2408}
2409
2410
2411
2412
2413
2414
2415
2416
2417static void drain_pages(unsigned int cpu)
2418{
2419 struct zone *zone;
2420
2421 for_each_populated_zone(zone) {
2422 drain_pages_zone(cpu, zone);
2423 }
2424}
2425
2426
2427
2428
2429
2430
2431
2432void drain_local_pages(struct zone *zone)
2433{
2434 int cpu = smp_processor_id();
2435
2436 if (zone)
2437 drain_pages_zone(cpu, zone);
2438 else
2439 drain_pages(cpu);
2440}
2441
2442static void drain_local_pages_wq(struct work_struct *work)
2443{
2444
2445
2446
2447
2448
2449
2450
2451 preempt_disable();
2452 drain_local_pages(NULL);
2453 preempt_enable();
2454}
2455
2456
2457
2458
2459
2460
2461
2462
2463void drain_all_pages(struct zone *zone)
2464{
2465 int cpu;
2466
2467
2468
2469
2470
2471 static cpumask_t cpus_with_pcps;
2472
2473
2474
2475
2476
2477 if (WARN_ON_ONCE(!mm_percpu_wq))
2478 return;
2479
2480
2481 if (current->flags & PF_WQ_WORKER)
2482 return;
2483
2484
2485
2486
2487
2488
2489 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2490 if (!zone)
2491 return;
2492 mutex_lock(&pcpu_drain_mutex);
2493 }
2494
2495
2496
2497
2498
2499
2500
2501 for_each_online_cpu(cpu) {
2502 struct per_cpu_pageset *pcp;
2503 struct zone *z;
2504 bool has_pcps = false;
2505
2506 if (zone) {
2507 pcp = per_cpu_ptr(zone->pageset, cpu);
2508 if (pcp->pcp.count)
2509 has_pcps = true;
2510 } else {
2511 for_each_populated_zone(z) {
2512 pcp = per_cpu_ptr(z->pageset, cpu);
2513 if (pcp->pcp.count) {
2514 has_pcps = true;
2515 break;
2516 }
2517 }
2518 }
2519
2520 if (has_pcps)
2521 cpumask_set_cpu(cpu, &cpus_with_pcps);
2522 else
2523 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2524 }
2525
2526 for_each_cpu(cpu, &cpus_with_pcps) {
2527 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2528 INIT_WORK(work, drain_local_pages_wq);
2529 queue_work_on(cpu, mm_percpu_wq, work);
2530 }
2531 for_each_cpu(cpu, &cpus_with_pcps)
2532 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
2533
2534 mutex_unlock(&pcpu_drain_mutex);
2535}
2536
2537#ifdef CONFIG_HIBERNATION
2538
2539
2540
2541
2542#define WD_PAGE_COUNT (128*1024)
2543
2544void mark_free_pages(struct zone *zone)
2545{
2546 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2547 unsigned long flags;
2548 unsigned int order, t;
2549 struct page *page;
2550
2551 if (zone_is_empty(zone))
2552 return;
2553
2554 spin_lock_irqsave(&zone->lock, flags);
2555
2556 max_zone_pfn = zone_end_pfn(zone);
2557 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2558 if (pfn_valid(pfn)) {
2559 page = pfn_to_page(pfn);
2560
2561 if (!--page_count) {
2562 touch_nmi_watchdog();
2563 page_count = WD_PAGE_COUNT;
2564 }
2565
2566 if (page_zone(page) != zone)
2567 continue;
2568
2569 if (!swsusp_page_is_forbidden(page))
2570 swsusp_unset_page_free(page);
2571 }
2572
2573 for_each_migratetype_order(order, t) {
2574 list_for_each_entry(page,
2575 &zone->free_area[order].free_list[t], lru) {
2576 unsigned long i;
2577
2578 pfn = page_to_pfn(page);
2579 for (i = 0; i < (1UL << order); i++) {
2580 if (!--page_count) {
2581 touch_nmi_watchdog();
2582 page_count = WD_PAGE_COUNT;
2583 }
2584 swsusp_set_page_free(pfn_to_page(pfn + i));
2585 }
2586 }
2587 }
2588 spin_unlock_irqrestore(&zone->lock, flags);
2589}
2590#endif
2591
2592
2593
2594
2595
2596void free_hot_cold_page(struct page *page, bool cold)
2597{
2598 struct zone *zone = page_zone(page);
2599 struct per_cpu_pages *pcp;
2600 unsigned long flags;
2601 unsigned long pfn = page_to_pfn(page);
2602 int migratetype;
2603
2604 if (!free_pcp_prepare(page))
2605 return;
2606
2607 migratetype = get_pfnblock_migratetype(page, pfn);
2608 set_pcppage_migratetype(page, migratetype);
2609 local_irq_save(flags);
2610 __count_vm_event(PGFREE);
2611
2612
2613
2614
2615
2616
2617
2618
2619 if (migratetype >= MIGRATE_PCPTYPES) {
2620 if (unlikely(is_migrate_isolate(migratetype))) {
2621 free_one_page(zone, page, pfn, 0, migratetype);
2622 goto out;
2623 }
2624 migratetype = MIGRATE_MOVABLE;
2625 }
2626
2627 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2628 if (!cold)
2629 list_add(&page->lru, &pcp->lists[migratetype]);
2630 else
2631 list_add_tail(&page->lru, &pcp->lists[migratetype]);
2632 pcp->count++;
2633 if (pcp->count >= pcp->high) {
2634 unsigned long batch = READ_ONCE(pcp->batch);
2635 free_pcppages_bulk(zone, batch, pcp);
2636 pcp->count -= batch;
2637 }
2638
2639out:
2640 local_irq_restore(flags);
2641}
2642
2643
2644
2645
2646void free_hot_cold_page_list(struct list_head *list, bool cold)
2647{
2648 struct page *page, *next;
2649
2650 list_for_each_entry_safe(page, next, list, lru) {
2651 trace_mm_page_free_batched(page, cold);
2652 free_hot_cold_page(page, cold);
2653 }
2654}
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664void split_page(struct page *page, unsigned int order)
2665{
2666 int i;
2667
2668 VM_BUG_ON_PAGE(PageCompound(page), page);
2669 VM_BUG_ON_PAGE(!page_count(page), page);
2670
2671#ifdef CONFIG_KMEMCHECK
2672
2673
2674
2675
2676 if (kmemcheck_page_is_tracked(page))
2677 split_page(virt_to_page(page[0].shadow), order);
2678#endif
2679
2680 for (i = 1; i < (1 << order); i++)
2681 set_page_refcounted(page + i);
2682 split_page_owner(page, order);
2683}
2684EXPORT_SYMBOL_GPL(split_page);
2685
2686int __isolate_free_page(struct page *page, unsigned int order)
2687{
2688 unsigned long watermark;
2689 struct zone *zone;
2690 int mt;
2691
2692 BUG_ON(!PageBuddy(page));
2693
2694 zone = page_zone(page);
2695 mt = get_pageblock_migratetype(page);
2696
2697 if (!is_migrate_isolate(mt)) {
2698
2699
2700
2701
2702
2703
2704 watermark = min_wmark_pages(zone) + (1UL << order);
2705 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2706 return 0;
2707
2708 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2709 }
2710
2711
2712 list_del(&page->lru);
2713 zone->free_area[order].nr_free--;
2714 rmv_page_order(page);
2715
2716
2717
2718
2719
2720 if (order >= pageblock_order - 1) {
2721 struct page *endpage = page + (1 << order) - 1;
2722 for (; page < endpage; page += pageblock_nr_pages) {
2723 int mt = get_pageblock_migratetype(page);
2724 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
2725 && !is_migrate_highatomic(mt))
2726 set_pageblock_migratetype(page,
2727 MIGRATE_MOVABLE);
2728 }
2729 }
2730
2731
2732 return 1UL << order;
2733}
2734
2735
2736
2737
2738
2739
2740static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
2741{
2742#ifdef CONFIG_NUMA
2743 enum zone_stat_item local_stat = NUMA_LOCAL;
2744
2745 if (z->node != numa_node_id())
2746 local_stat = NUMA_OTHER;
2747
2748 if (z->node == preferred_zone->node)
2749 __inc_zone_state(z, NUMA_HIT);
2750 else {
2751 __inc_zone_state(z, NUMA_MISS);
2752 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2753 }
2754 __inc_zone_state(z, local_stat);
2755#endif
2756}
2757
2758
2759static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2760 bool cold, struct per_cpu_pages *pcp,
2761 struct list_head *list)
2762{
2763 struct page *page;
2764
2765 do {
2766 if (list_empty(list)) {
2767 pcp->count += rmqueue_bulk(zone, 0,
2768 pcp->batch, list,
2769 migratetype, cold);
2770 if (unlikely(list_empty(list)))
2771 return NULL;
2772 }
2773
2774 if (cold)
2775 page = list_last_entry(list, struct page, lru);
2776 else
2777 page = list_first_entry(list, struct page, lru);
2778
2779 list_del(&page->lru);
2780 pcp->count--;
2781 } while (check_new_pcp(page));
2782
2783 return page;
2784}
2785
2786
2787static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2788 struct zone *zone, unsigned int order,
2789 gfp_t gfp_flags, int migratetype)
2790{
2791 struct per_cpu_pages *pcp;
2792 struct list_head *list;
2793 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2794 struct page *page;
2795 unsigned long flags;
2796
2797 local_irq_save(flags);
2798 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2799 list = &pcp->lists[migratetype];
2800 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
2801 if (page) {
2802 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2803 zone_statistics(preferred_zone, zone);
2804 }
2805 local_irq_restore(flags);
2806 return page;
2807}
2808
2809
2810
2811
2812static inline
2813struct page *rmqueue(struct zone *preferred_zone,
2814 struct zone *zone, unsigned int order,
2815 gfp_t gfp_flags, unsigned int alloc_flags,
2816 int migratetype)
2817{
2818 unsigned long flags;
2819 struct page *page;
2820
2821 if (likely(order == 0)) {
2822 page = rmqueue_pcplist(preferred_zone, zone, order,
2823 gfp_flags, migratetype);
2824 goto out;
2825 }
2826
2827
2828
2829
2830
2831 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2832 spin_lock_irqsave(&zone->lock, flags);
2833
2834 do {
2835 page = NULL;
2836 if (alloc_flags & ALLOC_HARDER) {
2837 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2838 if (page)
2839 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2840 }
2841 if (!page)
2842 page = __rmqueue(zone, order, migratetype);
2843 } while (page && check_new_pages(page, order));
2844 spin_unlock(&zone->lock);
2845 if (!page)
2846 goto failed;
2847 __mod_zone_freepage_state(zone, -(1 << order),
2848 get_pcppage_migratetype(page));
2849
2850 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2851 zone_statistics(preferred_zone, zone);
2852 local_irq_restore(flags);
2853
2854out:
2855 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
2856 return page;
2857
2858failed:
2859 local_irq_restore(flags);
2860 return NULL;
2861}
2862
2863#ifdef CONFIG_FAIL_PAGE_ALLOC
2864
2865static struct {
2866 struct fault_attr attr;
2867
2868 bool ignore_gfp_highmem;
2869 bool ignore_gfp_reclaim;
2870 u32 min_order;
2871} fail_page_alloc = {
2872 .attr = FAULT_ATTR_INITIALIZER,
2873 .ignore_gfp_reclaim = true,
2874 .ignore_gfp_highmem = true,
2875 .min_order = 1,
2876};
2877
2878static int __init setup_fail_page_alloc(char *str)
2879{
2880 return setup_fault_attr(&fail_page_alloc.attr, str);
2881}
2882__setup("fail_page_alloc=", setup_fail_page_alloc);
2883
2884static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2885{
2886 if (order < fail_page_alloc.min_order)
2887 return false;
2888 if (gfp_mask & __GFP_NOFAIL)
2889 return false;
2890 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
2891 return false;
2892 if (fail_page_alloc.ignore_gfp_reclaim &&
2893 (gfp_mask & __GFP_DIRECT_RECLAIM))
2894 return false;
2895
2896 return should_fail(&fail_page_alloc.attr, 1 << order);
2897}
2898
2899#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2900
2901static int __init fail_page_alloc_debugfs(void)
2902{
2903 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
2904 struct dentry *dir;
2905
2906 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2907 &fail_page_alloc.attr);
2908 if (IS_ERR(dir))
2909 return PTR_ERR(dir);
2910
2911 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
2912 &fail_page_alloc.ignore_gfp_reclaim))
2913 goto fail;
2914 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2915 &fail_page_alloc.ignore_gfp_highmem))
2916 goto fail;
2917 if (!debugfs_create_u32("min-order", mode, dir,
2918 &fail_page_alloc.min_order))
2919 goto fail;
2920
2921 return 0;
2922fail:
2923 debugfs_remove_recursive(dir);
2924
2925 return -ENOMEM;
2926}
2927
2928late_initcall(fail_page_alloc_debugfs);
2929
2930#endif
2931
2932#else
2933
2934static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2935{
2936 return false;
2937}
2938
2939#endif
2940
2941
2942
2943
2944
2945
2946
2947bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2948 int classzone_idx, unsigned int alloc_flags,
2949 long free_pages)
2950{
2951 long min = mark;
2952 int o;
2953 const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
2954
2955
2956 free_pages -= (1 << order) - 1;
2957
2958 if (alloc_flags & ALLOC_HIGH)
2959 min -= min / 2;
2960
2961
2962
2963
2964
2965
2966 if (likely(!alloc_harder))
2967 free_pages -= z->nr_reserved_highatomic;
2968 else
2969 min -= min / 4;
2970
2971#ifdef CONFIG_CMA
2972
2973 if (!(alloc_flags & ALLOC_CMA))
2974 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
2975#endif
2976
2977
2978
2979
2980
2981
2982 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
2983 return false;
2984
2985
2986 if (!order)
2987 return true;
2988
2989
2990 for (o = order; o < MAX_ORDER; o++) {
2991 struct free_area *area = &z->free_area[o];
2992 int mt;
2993
2994 if (!area->nr_free)
2995 continue;
2996
2997 if (alloc_harder)
2998 return true;
2999
3000 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3001 if (!list_empty(&area->free_list[mt]))
3002 return true;
3003 }
3004
3005#ifdef CONFIG_CMA
3006 if ((alloc_flags & ALLOC_CMA) &&
3007 !list_empty(&area->free_list[MIGRATE_CMA])) {
3008 return true;
3009 }
3010#endif
3011 }
3012 return false;
3013}
3014
3015bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3016 int classzone_idx, unsigned int alloc_flags)
3017{
3018 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3019 zone_page_state(z, NR_FREE_PAGES));
3020}
3021
3022static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3023 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
3024{
3025 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3026 long cma_pages = 0;
3027
3028#ifdef CONFIG_CMA
3029
3030 if (!(alloc_flags & ALLOC_CMA))
3031 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3032#endif
3033
3034
3035
3036
3037
3038
3039
3040
3041 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3042 return true;
3043
3044 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3045 free_pages);
3046}
3047
3048bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3049 unsigned long mark, int classzone_idx)
3050{
3051 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3052
3053 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3054 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3055
3056 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
3057 free_pages);
3058}
3059
3060#ifdef CONFIG_NUMA
3061static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3062{
3063 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3064 RECLAIM_DISTANCE;
3065}
3066#else
3067static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3068{
3069 return true;
3070}
3071#endif
3072
3073
3074
3075
3076
3077static struct page *
3078get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3079 const struct alloc_context *ac)
3080{
3081 struct zoneref *z = ac->preferred_zoneref;
3082 struct zone *zone;
3083 struct pglist_data *last_pgdat_dirty_limit = NULL;
3084
3085
3086
3087
3088
3089 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3090 ac->nodemask) {
3091 struct page *page;
3092 unsigned long mark;
3093
3094 if (cpusets_enabled() &&
3095 (alloc_flags & ALLOC_CPUSET) &&
3096 !__cpuset_zone_allowed(zone, gfp_mask))
3097 continue;
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117 if (ac->spread_dirty_pages) {
3118 if (last_pgdat_dirty_limit == zone->zone_pgdat)
3119 continue;
3120
3121 if (!node_dirty_ok(zone->zone_pgdat)) {
3122 last_pgdat_dirty_limit = zone->zone_pgdat;
3123 continue;
3124 }
3125 }
3126
3127 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
3128 if (!zone_watermark_fast(zone, order, mark,
3129 ac_classzone_idx(ac), alloc_flags)) {
3130 int ret;
3131
3132
3133 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3134 if (alloc_flags & ALLOC_NO_WATERMARKS)
3135 goto try_this_zone;
3136
3137 if (node_reclaim_mode == 0 ||
3138 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3139 continue;
3140
3141 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3142 switch (ret) {
3143 case NODE_RECLAIM_NOSCAN:
3144
3145 continue;
3146 case NODE_RECLAIM_FULL:
3147
3148 continue;
3149 default:
3150
3151 if (zone_watermark_ok(zone, order, mark,
3152 ac_classzone_idx(ac), alloc_flags))
3153 goto try_this_zone;
3154
3155 continue;
3156 }
3157 }
3158
3159try_this_zone:
3160 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3161 gfp_mask, alloc_flags, ac->migratetype);
3162 if (page) {
3163 prep_new_page(page, order, gfp_mask, alloc_flags);
3164
3165
3166
3167
3168
3169 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3170 reserve_highatomic_pageblock(page, zone, order);
3171
3172 return page;
3173 }
3174 }
3175
3176 return NULL;
3177}
3178
3179
3180
3181
3182
3183static inline bool should_suppress_show_mem(void)
3184{
3185 bool ret = false;
3186
3187#if NODES_SHIFT > 8
3188 ret = in_interrupt();
3189#endif
3190 return ret;
3191}
3192
3193static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3194{
3195 unsigned int filter = SHOW_MEM_FILTER_NODES;
3196 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
3197
3198 if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
3199 return;
3200
3201
3202
3203
3204
3205
3206 if (!(gfp_mask & __GFP_NOMEMALLOC))
3207 if (test_thread_flag(TIF_MEMDIE) ||
3208 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3209 filter &= ~SHOW_MEM_FILTER_NODES;
3210 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3211 filter &= ~SHOW_MEM_FILTER_NODES;
3212
3213 show_mem(filter, nodemask);
3214}
3215
3216void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3217{
3218 struct va_format vaf;
3219 va_list args;
3220 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3221 DEFAULT_RATELIMIT_BURST);
3222
3223 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3224 return;
3225
3226 pr_warn("%s: ", current->comm);
3227
3228 va_start(args, fmt);
3229 vaf.fmt = fmt;
3230 vaf.va = &args;
3231 pr_cont("%pV", &vaf);
3232 va_end(args);
3233
3234 pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask);
3235 if (nodemask)
3236 pr_cont("%*pbl\n", nodemask_pr_args(nodemask));
3237 else
3238 pr_cont("(null)\n");
3239
3240 cpuset_print_current_mems_allowed();
3241
3242 dump_stack();
3243 warn_alloc_show_mem(gfp_mask, nodemask);
3244}
3245
3246static inline struct page *
3247__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3248 unsigned int alloc_flags,
3249 const struct alloc_context *ac)
3250{
3251 struct page *page;
3252
3253 page = get_page_from_freelist(gfp_mask, order,
3254 alloc_flags|ALLOC_CPUSET, ac);
3255
3256
3257
3258
3259 if (!page)
3260 page = get_page_from_freelist(gfp_mask, order,
3261 alloc_flags, ac);
3262
3263 return page;
3264}
3265
3266static inline struct page *
3267__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3268 const struct alloc_context *ac, unsigned long *did_some_progress)
3269{
3270 struct oom_control oc = {
3271 .zonelist = ac->zonelist,
3272 .nodemask = ac->nodemask,
3273 .memcg = NULL,
3274 .gfp_mask = gfp_mask,
3275 .order = order,
3276 };
3277 struct page *page;
3278
3279 *did_some_progress = 0;
3280
3281
3282
3283
3284
3285 if (!mutex_trylock(&oom_lock)) {
3286 *did_some_progress = 1;
3287 schedule_timeout_uninterruptible(1);
3288 return NULL;
3289 }
3290
3291
3292
3293
3294
3295
3296
3297
3298 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3299 ~__GFP_DIRECT_RECLAIM, order,
3300 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3301 if (page)
3302 goto out;
3303
3304
3305 if (current->flags & PF_DUMPCORE)
3306 goto out;
3307
3308 if (order > PAGE_ALLOC_COSTLY_ORDER)
3309 goto out;
3310
3311
3312
3313
3314
3315
3316 if (gfp_mask & __GFP_RETRY_MAYFAIL)
3317 goto out;
3318
3319 if (ac->high_zoneidx < ZONE_NORMAL)
3320 goto out;
3321 if (pm_suspended_storage())
3322 goto out;
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334 if (gfp_mask & __GFP_THISNODE)
3335 goto out;
3336
3337
3338 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3339 *did_some_progress = 1;
3340
3341
3342
3343
3344
3345 if (gfp_mask & __GFP_NOFAIL)
3346 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3347 ALLOC_NO_WATERMARKS, ac);
3348 }
3349out:
3350 mutex_unlock(&oom_lock);
3351 return page;
3352}
3353
3354
3355
3356
3357
3358#define MAX_COMPACT_RETRIES 16
3359
3360#ifdef CONFIG_COMPACTION
3361
3362static struct page *
3363__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3364 unsigned int alloc_flags, const struct alloc_context *ac,
3365 enum compact_priority prio, enum compact_result *compact_result)
3366{
3367 struct page *page;
3368 unsigned int noreclaim_flag;
3369
3370 if (!order)
3371 return NULL;
3372
3373 noreclaim_flag = memalloc_noreclaim_save();
3374 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3375 prio);
3376 memalloc_noreclaim_restore(noreclaim_flag);
3377
3378 if (*compact_result <= COMPACT_INACTIVE)
3379 return NULL;
3380
3381
3382
3383
3384
3385 count_vm_event(COMPACTSTALL);
3386
3387 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3388
3389 if (page) {
3390 struct zone *zone = page_zone(page);
3391
3392 zone->compact_blockskip_flush = false;
3393 compaction_defer_reset(zone, order, true);
3394 count_vm_event(COMPACTSUCCESS);
3395 return page;
3396 }
3397
3398
3399
3400
3401
3402 count_vm_event(COMPACTFAIL);
3403
3404 cond_resched();
3405
3406 return NULL;
3407}
3408
3409static inline bool
3410should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3411 enum compact_result compact_result,
3412 enum compact_priority *compact_priority,
3413 int *compaction_retries)
3414{
3415 int max_retries = MAX_COMPACT_RETRIES;
3416 int min_priority;
3417 bool ret = false;
3418 int retries = *compaction_retries;
3419 enum compact_priority priority = *compact_priority;
3420
3421 if (!order)
3422 return false;
3423
3424 if (compaction_made_progress(compact_result))
3425 (*compaction_retries)++;
3426
3427
3428
3429
3430
3431
3432 if (compaction_failed(compact_result))
3433 goto check_priority;
3434
3435
3436
3437
3438
3439
3440
3441 if (compaction_withdrawn(compact_result)) {
3442 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3443 goto out;
3444 }
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454 if (order > PAGE_ALLOC_COSTLY_ORDER)
3455 max_retries /= 4;
3456 if (*compaction_retries <= max_retries) {
3457 ret = true;
3458 goto out;
3459 }
3460
3461
3462
3463
3464
3465check_priority:
3466 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3467 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3468
3469 if (*compact_priority > min_priority) {
3470 (*compact_priority)--;
3471 *compaction_retries = 0;
3472 ret = true;
3473 }
3474out:
3475 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3476 return ret;
3477}
3478#else
3479static inline struct page *
3480__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3481 unsigned int alloc_flags, const struct alloc_context *ac,
3482 enum compact_priority prio, enum compact_result *compact_result)
3483{
3484 *compact_result = COMPACT_SKIPPED;
3485 return NULL;
3486}
3487
3488static inline bool
3489should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3490 enum compact_result compact_result,
3491 enum compact_priority *compact_priority,
3492 int *compaction_retries)
3493{
3494 struct zone *zone;
3495 struct zoneref *z;
3496
3497 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3498 return false;
3499
3500
3501
3502
3503
3504
3505
3506 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3507 ac->nodemask) {
3508 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3509 ac_classzone_idx(ac), alloc_flags))
3510 return true;
3511 }
3512 return false;
3513}
3514#endif
3515
3516
3517static int
3518__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3519 const struct alloc_context *ac)
3520{
3521 struct reclaim_state reclaim_state;
3522 int progress;
3523 unsigned int noreclaim_flag;
3524
3525 cond_resched();
3526
3527
3528 cpuset_memory_pressure_bump();
3529 noreclaim_flag = memalloc_noreclaim_save();
3530 lockdep_set_current_reclaim_state(gfp_mask);
3531 reclaim_state.reclaimed_slab = 0;
3532 current->reclaim_state = &reclaim_state;
3533
3534 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3535 ac->nodemask);
3536
3537 current->reclaim_state = NULL;
3538 lockdep_clear_current_reclaim_state();
3539 memalloc_noreclaim_restore(noreclaim_flag);
3540
3541 cond_resched();
3542
3543 return progress;
3544}
3545
3546
3547static inline struct page *
3548__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3549 unsigned int alloc_flags, const struct alloc_context *ac,
3550 unsigned long *did_some_progress)
3551{
3552 struct page *page = NULL;
3553 bool drained = false;
3554
3555 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3556 if (unlikely(!(*did_some_progress)))
3557 return NULL;
3558
3559retry:
3560 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3561
3562
3563
3564
3565
3566
3567 if (!page && !drained) {
3568 unreserve_highatomic_pageblock(ac, false);
3569 drain_all_pages(NULL);
3570 drained = true;
3571 goto retry;
3572 }
3573
3574 return page;
3575}
3576
3577static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3578{
3579 struct zoneref *z;
3580 struct zone *zone;
3581 pg_data_t *last_pgdat = NULL;
3582
3583 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3584 ac->high_zoneidx, ac->nodemask) {
3585 if (last_pgdat != zone->zone_pgdat)
3586 wakeup_kswapd(zone, order, ac->high_zoneidx);
3587 last_pgdat = zone->zone_pgdat;
3588 }
3589}
3590
3591static inline unsigned int
3592gfp_to_alloc_flags(gfp_t gfp_mask)
3593{
3594 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3595
3596
3597 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
3598
3599
3600
3601
3602
3603
3604
3605 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
3606
3607 if (gfp_mask & __GFP_ATOMIC) {
3608
3609
3610
3611
3612 if (!(gfp_mask & __GFP_NOMEMALLOC))
3613 alloc_flags |= ALLOC_HARDER;
3614
3615
3616
3617
3618 alloc_flags &= ~ALLOC_CPUSET;
3619 } else if (unlikely(rt_task(current)) && !in_interrupt())
3620 alloc_flags |= ALLOC_HARDER;
3621
3622#ifdef CONFIG_CMA
3623 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3624 alloc_flags |= ALLOC_CMA;
3625#endif
3626 return alloc_flags;
3627}
3628
3629bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3630{
3631 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3632 return false;
3633
3634 if (gfp_mask & __GFP_MEMALLOC)
3635 return true;
3636 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3637 return true;
3638 if (!in_interrupt() &&
3639 ((current->flags & PF_MEMALLOC) ||
3640 unlikely(test_thread_flag(TIF_MEMDIE))))
3641 return true;
3642
3643 return false;
3644}
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656static inline bool
3657should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3658 struct alloc_context *ac, int alloc_flags,
3659 bool did_some_progress, int *no_progress_loops)
3660{
3661 struct zone *zone;
3662 struct zoneref *z;
3663
3664
3665
3666
3667
3668
3669 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3670 *no_progress_loops = 0;
3671 else
3672 (*no_progress_loops)++;
3673
3674
3675
3676
3677
3678 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
3679
3680 return unreserve_highatomic_pageblock(ac, true);
3681 }
3682
3683
3684
3685
3686
3687
3688
3689 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3690 ac->nodemask) {
3691 unsigned long available;
3692 unsigned long reclaimable;
3693 unsigned long min_wmark = min_wmark_pages(zone);
3694 bool wmark;
3695
3696 available = reclaimable = zone_reclaimable_pages(zone);
3697 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3698
3699
3700
3701
3702
3703 wmark = __zone_watermark_ok(zone, order, min_wmark,
3704 ac_classzone_idx(ac), alloc_flags, available);
3705 trace_reclaim_retry_zone(z, order, reclaimable,
3706 available, min_wmark, *no_progress_loops, wmark);
3707 if (wmark) {
3708
3709
3710
3711
3712
3713
3714 if (!did_some_progress) {
3715 unsigned long write_pending;
3716
3717 write_pending = zone_page_state_snapshot(zone,
3718 NR_ZONE_WRITE_PENDING);
3719
3720 if (2 * write_pending > reclaimable) {
3721 congestion_wait(BLK_RW_ASYNC, HZ/10);
3722 return true;
3723 }
3724 }
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735 if (current->flags & PF_WQ_WORKER)
3736 schedule_timeout_uninterruptible(1);
3737 else
3738 cond_resched();
3739
3740 return true;
3741 }
3742 }
3743
3744 return false;
3745}
3746
3747static inline bool
3748check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
3749{
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761 if (cpusets_enabled() && ac->nodemask &&
3762 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
3763 ac->nodemask = NULL;
3764 return true;
3765 }
3766
3767
3768
3769
3770
3771
3772
3773
3774 if (read_mems_allowed_retry(cpuset_mems_cookie))
3775 return true;
3776
3777 return false;
3778}
3779
3780static inline struct page *
3781__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3782 struct alloc_context *ac)
3783{
3784 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3785 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3786 struct page *page = NULL;
3787 unsigned int alloc_flags;
3788 unsigned long did_some_progress;
3789 enum compact_priority compact_priority;
3790 enum compact_result compact_result;
3791 int compaction_retries;
3792 int no_progress_loops;
3793 unsigned long alloc_start = jiffies;
3794 unsigned int stall_timeout = 10 * HZ;
3795 unsigned int cpuset_mems_cookie;
3796
3797
3798
3799
3800
3801
3802
3803 if (order >= MAX_ORDER) {
3804 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
3805 return NULL;
3806 }
3807
3808
3809
3810
3811
3812 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3813 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3814 gfp_mask &= ~__GFP_ATOMIC;
3815
3816retry_cpuset:
3817 compaction_retries = 0;
3818 no_progress_loops = 0;
3819 compact_priority = DEF_COMPACT_PRIORITY;
3820 cpuset_mems_cookie = read_mems_allowed_begin();
3821
3822
3823
3824
3825
3826
3827 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3828
3829
3830
3831
3832
3833
3834
3835 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3836 ac->high_zoneidx, ac->nodemask);
3837 if (!ac->preferred_zoneref->zone)
3838 goto nopage;
3839
3840 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3841 wake_all_kswapds(order, ac);
3842
3843
3844
3845
3846
3847 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3848 if (page)
3849 goto got_pg;
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860 if (can_direct_reclaim &&
3861 (costly_order ||
3862 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3863 && !gfp_pfmemalloc_allowed(gfp_mask)) {
3864 page = __alloc_pages_direct_compact(gfp_mask, order,
3865 alloc_flags, ac,
3866 INIT_COMPACT_PRIORITY,
3867 &compact_result);
3868 if (page)
3869 goto got_pg;
3870
3871
3872
3873
3874
3875 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
3876
3877
3878
3879
3880
3881
3882
3883
3884 if (compact_result == COMPACT_DEFERRED)
3885 goto nopage;
3886
3887
3888
3889
3890
3891
3892 compact_priority = INIT_COMPACT_PRIORITY;
3893 }
3894 }
3895
3896retry:
3897
3898 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3899 wake_all_kswapds(order, ac);
3900
3901 if (gfp_pfmemalloc_allowed(gfp_mask))
3902 alloc_flags = ALLOC_NO_WATERMARKS;
3903
3904
3905
3906
3907
3908
3909 if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) {
3910 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3911 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3912 ac->high_zoneidx, ac->nodemask);
3913 }
3914
3915
3916 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3917 if (page)
3918 goto got_pg;
3919
3920
3921 if (!can_direct_reclaim)
3922 goto nopage;
3923
3924
3925 if (time_after(jiffies, alloc_start + stall_timeout)) {
3926 warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
3927 "page allocation stalls for %ums, order:%u",
3928 jiffies_to_msecs(jiffies-alloc_start), order);
3929 stall_timeout += 10 * HZ;
3930 }
3931
3932
3933 if (current->flags & PF_MEMALLOC)
3934 goto nopage;
3935
3936
3937 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3938 &did_some_progress);
3939 if (page)
3940 goto got_pg;
3941
3942
3943 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3944 compact_priority, &compact_result);
3945 if (page)
3946 goto got_pg;
3947
3948
3949 if (gfp_mask & __GFP_NORETRY)
3950 goto nopage;
3951
3952
3953
3954
3955
3956 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
3957 goto nopage;
3958
3959 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
3960 did_some_progress > 0, &no_progress_loops))
3961 goto retry;
3962
3963
3964
3965
3966
3967
3968
3969 if (did_some_progress > 0 &&
3970 should_compact_retry(ac, order, alloc_flags,
3971 compact_result, &compact_priority,
3972 &compaction_retries))
3973 goto retry;
3974
3975
3976
3977 if (check_retry_cpuset(cpuset_mems_cookie, ac))
3978 goto retry_cpuset;
3979
3980
3981 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3982 if (page)
3983 goto got_pg;
3984
3985
3986 if (test_thread_flag(TIF_MEMDIE) &&
3987 (alloc_flags == ALLOC_NO_WATERMARKS ||
3988 (gfp_mask & __GFP_NOMEMALLOC)))
3989 goto nopage;
3990
3991
3992 if (did_some_progress) {
3993 no_progress_loops = 0;
3994 goto retry;
3995 }
3996
3997nopage:
3998
3999 if (check_retry_cpuset(cpuset_mems_cookie, ac))
4000 goto retry_cpuset;
4001
4002
4003
4004
4005
4006 if (gfp_mask & __GFP_NOFAIL) {
4007
4008
4009
4010
4011 if (WARN_ON_ONCE(!can_direct_reclaim))
4012 goto fail;
4013
4014
4015
4016
4017
4018
4019 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4020
4021
4022
4023
4024
4025
4026
4027 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4028
4029
4030
4031
4032
4033
4034
4035 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4036 if (page)
4037 goto got_pg;
4038
4039 cond_resched();
4040 goto retry;
4041 }
4042fail:
4043 warn_alloc(gfp_mask, ac->nodemask,
4044 "page allocation failure: order:%u", order);
4045got_pg:
4046 return page;
4047}
4048
4049static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4050 int preferred_nid, nodemask_t *nodemask,
4051 struct alloc_context *ac, gfp_t *alloc_mask,
4052 unsigned int *alloc_flags)
4053{
4054 ac->high_zoneidx = gfp_zone(gfp_mask);
4055 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4056 ac->nodemask = nodemask;
4057 ac->migratetype = gfpflags_to_migratetype(gfp_mask);
4058
4059 if (cpusets_enabled()) {
4060 *alloc_mask |= __GFP_HARDWALL;
4061 if (!ac->nodemask)
4062 ac->nodemask = &cpuset_current_mems_allowed;
4063 else
4064 *alloc_flags |= ALLOC_CPUSET;
4065 }
4066
4067 lockdep_trace_alloc(gfp_mask);
4068
4069 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4070
4071 if (should_fail_alloc_page(gfp_mask, order))
4072 return false;
4073
4074 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4075 *alloc_flags |= ALLOC_CMA;
4076
4077 return true;
4078}
4079
4080
4081static inline void finalise_ac(gfp_t gfp_mask,
4082 unsigned int order, struct alloc_context *ac)
4083{
4084
4085 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4086
4087
4088
4089
4090
4091
4092 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4093 ac->high_zoneidx, ac->nodemask);
4094}
4095
4096
4097
4098
4099struct page *
4100__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4101 nodemask_t *nodemask)
4102{
4103 struct page *page;
4104 unsigned int alloc_flags = ALLOC_WMARK_LOW;
4105 gfp_t alloc_mask = gfp_mask;
4106 struct alloc_context ac = { };
4107
4108 gfp_mask &= gfp_allowed_mask;
4109 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4110 return NULL;
4111
4112 finalise_ac(gfp_mask, order, &ac);
4113
4114
4115 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4116 if (likely(page))
4117 goto out;
4118
4119
4120
4121
4122
4123
4124
4125 alloc_mask = current_gfp_context(gfp_mask);
4126 ac.spread_dirty_pages = false;
4127
4128
4129
4130
4131
4132 if (unlikely(ac.nodemask != nodemask))
4133 ac.nodemask = nodemask;
4134
4135 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
4136
4137out:
4138 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4139 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4140 __free_pages(page, order);
4141 page = NULL;
4142 }
4143
4144 if (kmemcheck_enabled && page)
4145 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
4146
4147 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4148
4149 return page;
4150}
4151EXPORT_SYMBOL(__alloc_pages_nodemask);
4152
4153
4154
4155
4156unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4157{
4158 struct page *page;
4159
4160
4161
4162
4163
4164 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
4165
4166 page = alloc_pages(gfp_mask, order);
4167 if (!page)
4168 return 0;
4169 return (unsigned long) page_address(page);
4170}
4171EXPORT_SYMBOL(__get_free_pages);
4172
4173unsigned long get_zeroed_page(gfp_t gfp_mask)
4174{
4175 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
4176}
4177EXPORT_SYMBOL(get_zeroed_page);
4178
4179void __free_pages(struct page *page, unsigned int order)
4180{
4181 if (put_page_testzero(page)) {
4182 if (order == 0)
4183 free_hot_cold_page(page, false);
4184 else
4185 __free_pages_ok(page, order);
4186 }
4187}
4188
4189EXPORT_SYMBOL(__free_pages);
4190
4191void free_pages(unsigned long addr, unsigned int order)
4192{
4193 if (addr != 0) {
4194 VM_BUG_ON(!virt_addr_valid((void *)addr));
4195 __free_pages(virt_to_page((void *)addr), order);
4196 }
4197}
4198
4199EXPORT_SYMBOL(free_pages);
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4213 gfp_t gfp_mask)
4214{
4215 struct page *page = NULL;
4216 gfp_t gfp = gfp_mask;
4217
4218#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4219 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4220 __GFP_NOMEMALLOC;
4221 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4222 PAGE_FRAG_CACHE_MAX_ORDER);
4223 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4224#endif
4225 if (unlikely(!page))
4226 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4227
4228 nc->va = page ? page_address(page) : NULL;
4229
4230 return page;
4231}
4232
4233void __page_frag_cache_drain(struct page *page, unsigned int count)
4234{
4235 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4236
4237 if (page_ref_sub_and_test(page, count)) {
4238 unsigned int order = compound_order(page);
4239
4240 if (order == 0)
4241 free_hot_cold_page(page, false);
4242 else
4243 __free_pages_ok(page, order);
4244 }
4245}
4246EXPORT_SYMBOL(__page_frag_cache_drain);
4247
4248void *page_frag_alloc(struct page_frag_cache *nc,
4249 unsigned int fragsz, gfp_t gfp_mask)
4250{
4251 unsigned int size = PAGE_SIZE;
4252 struct page *page;
4253 int offset;
4254
4255 if (unlikely(!nc->va)) {
4256refill:
4257 page = __page_frag_cache_refill(nc, gfp_mask);
4258 if (!page)
4259 return NULL;
4260
4261#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4262
4263 size = nc->size;
4264#endif
4265
4266
4267
4268 page_ref_add(page, size - 1);
4269
4270
4271 nc->pfmemalloc = page_is_pfmemalloc(page);
4272 nc->pagecnt_bias = size;
4273 nc->offset = size;
4274 }
4275
4276 offset = nc->offset - fragsz;
4277 if (unlikely(offset < 0)) {
4278 page = virt_to_page(nc->va);
4279
4280 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4281 goto refill;
4282
4283#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4284
4285 size = nc->size;
4286#endif
4287
4288 set_page_count(page, size);
4289
4290
4291 nc->pagecnt_bias = size;
4292 offset = size - fragsz;
4293 }
4294
4295 nc->pagecnt_bias--;
4296 nc->offset = offset;
4297
4298 return nc->va + offset;
4299}
4300EXPORT_SYMBOL(page_frag_alloc);
4301
4302
4303
4304
4305void page_frag_free(void *addr)
4306{
4307 struct page *page = virt_to_head_page(addr);
4308
4309 if (unlikely(put_page_testzero(page)))
4310 __free_pages_ok(page, compound_order(page));
4311}
4312EXPORT_SYMBOL(page_frag_free);
4313
4314static void *make_alloc_exact(unsigned long addr, unsigned int order,
4315 size_t size)
4316{
4317 if (addr) {
4318 unsigned long alloc_end = addr + (PAGE_SIZE << order);
4319 unsigned long used = addr + PAGE_ALIGN(size);
4320
4321 split_page(virt_to_page((void *)addr), order);
4322 while (used < alloc_end) {
4323 free_page(used);
4324 used += PAGE_SIZE;
4325 }
4326 }
4327 return (void *)addr;
4328}
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4344{
4345 unsigned int order = get_order(size);
4346 unsigned long addr;
4347
4348 addr = __get_free_pages(gfp_mask, order);
4349 return make_alloc_exact(addr, order, size);
4350}
4351EXPORT_SYMBOL(alloc_pages_exact);
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4364{
4365 unsigned int order = get_order(size);
4366 struct page *p = alloc_pages_node(nid, gfp_mask, order);
4367 if (!p)
4368 return NULL;
4369 return make_alloc_exact((unsigned long)page_address(p), order, size);
4370}
4371
4372
4373
4374
4375
4376
4377
4378
4379void free_pages_exact(void *virt, size_t size)
4380{
4381 unsigned long addr = (unsigned long)virt;
4382 unsigned long end = addr + PAGE_ALIGN(size);
4383
4384 while (addr < end) {
4385 free_page(addr);
4386 addr += PAGE_SIZE;
4387 }
4388}
4389EXPORT_SYMBOL(free_pages_exact);
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401static unsigned long nr_free_zone_pages(int offset)
4402{
4403 struct zoneref *z;
4404 struct zone *zone;
4405
4406
4407 unsigned long sum = 0;
4408
4409 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4410
4411 for_each_zone_zonelist(zone, z, zonelist, offset) {
4412 unsigned long size = zone->managed_pages;
4413 unsigned long high = high_wmark_pages(zone);
4414 if (size > high)
4415 sum += size - high;
4416 }
4417
4418 return sum;
4419}
4420
4421
4422
4423
4424
4425
4426
4427unsigned long nr_free_buffer_pages(void)
4428{
4429 return nr_free_zone_pages(gfp_zone(GFP_USER));
4430}
4431EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4432
4433
4434
4435
4436
4437
4438
4439unsigned long nr_free_pagecache_pages(void)
4440{
4441 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
4442}
4443
4444static inline void show_node(struct zone *zone)
4445{
4446 if (IS_ENABLED(CONFIG_NUMA))
4447 printk("Node %d ", zone_to_nid(zone));
4448}
4449
4450long si_mem_available(void)
4451{
4452 long available;
4453 unsigned long pagecache;
4454 unsigned long wmark_low = 0;
4455 unsigned long pages[NR_LRU_LISTS];
4456 struct zone *zone;
4457 int lru;
4458
4459 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4460 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4461
4462 for_each_zone(zone)
4463 wmark_low += zone->watermark[WMARK_LOW];
4464
4465
4466
4467
4468
4469 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
4470
4471
4472
4473
4474
4475
4476 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4477 pagecache -= min(pagecache / 2, wmark_low);
4478 available += pagecache;
4479
4480
4481
4482
4483
4484 available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
4485 min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
4486 wmark_low);
4487
4488 if (available < 0)
4489 available = 0;
4490 return available;
4491}
4492EXPORT_SYMBOL_GPL(si_mem_available);
4493
4494void si_meminfo(struct sysinfo *val)
4495{
4496 val->totalram = totalram_pages;
4497 val->sharedram = global_node_page_state(NR_SHMEM);
4498 val->freeram = global_page_state(NR_FREE_PAGES);
4499 val->bufferram = nr_blockdev_pages();
4500 val->totalhigh = totalhigh_pages;
4501 val->freehigh = nr_free_highpages();
4502 val->mem_unit = PAGE_SIZE;
4503}
4504
4505EXPORT_SYMBOL(si_meminfo);
4506
4507#ifdef CONFIG_NUMA
4508void si_meminfo_node(struct sysinfo *val, int nid)
4509{
4510 int zone_type;
4511 unsigned long managed_pages = 0;
4512 unsigned long managed_highpages = 0;
4513 unsigned long free_highpages = 0;
4514 pg_data_t *pgdat = NODE_DATA(nid);
4515
4516 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4517 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4518 val->totalram = managed_pages;
4519 val->sharedram = node_page_state(pgdat, NR_SHMEM);
4520 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
4521#ifdef CONFIG_HIGHMEM
4522 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4523 struct zone *zone = &pgdat->node_zones[zone_type];
4524
4525 if (is_highmem(zone)) {
4526 managed_highpages += zone->managed_pages;
4527 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4528 }
4529 }
4530 val->totalhigh = managed_highpages;
4531 val->freehigh = free_highpages;
4532#else
4533 val->totalhigh = managed_highpages;
4534 val->freehigh = free_highpages;
4535#endif
4536 val->mem_unit = PAGE_SIZE;
4537}
4538#endif
4539
4540
4541
4542
4543
4544static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
4545{
4546 if (!(flags & SHOW_MEM_FILTER_NODES))
4547 return false;
4548
4549
4550
4551
4552
4553
4554 if (!nodemask)
4555 nodemask = &cpuset_current_mems_allowed;
4556
4557 return !node_isset(nid, *nodemask);
4558}
4559
4560#define K(x) ((x) << (PAGE_SHIFT-10))
4561
4562static void show_migration_types(unsigned char type)
4563{
4564 static const char types[MIGRATE_TYPES] = {
4565 [MIGRATE_UNMOVABLE] = 'U',
4566 [MIGRATE_MOVABLE] = 'M',
4567 [MIGRATE_RECLAIMABLE] = 'E',
4568 [MIGRATE_HIGHATOMIC] = 'H',
4569#ifdef CONFIG_CMA
4570 [MIGRATE_CMA] = 'C',
4571#endif
4572#ifdef CONFIG_MEMORY_ISOLATION
4573 [MIGRATE_ISOLATE] = 'I',
4574#endif
4575 };
4576 char tmp[MIGRATE_TYPES + 1];
4577 char *p = tmp;
4578 int i;
4579
4580 for (i = 0; i < MIGRATE_TYPES; i++) {
4581 if (type & (1 << i))
4582 *p++ = types[i];
4583 }
4584
4585 *p = '\0';
4586 printk(KERN_CONT "(%s) ", tmp);
4587}
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4599{
4600 unsigned long free_pcp = 0;
4601 int cpu;
4602 struct zone *zone;
4603 pg_data_t *pgdat;
4604
4605 for_each_populated_zone(zone) {
4606 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4607 continue;
4608
4609 for_each_online_cpu(cpu)
4610 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4611 }
4612
4613 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4614 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
4615 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4616 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4617 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
4618 " free:%lu free_pcp:%lu free_cma:%lu\n",
4619 global_node_page_state(NR_ACTIVE_ANON),
4620 global_node_page_state(NR_INACTIVE_ANON),
4621 global_node_page_state(NR_ISOLATED_ANON),
4622 global_node_page_state(NR_ACTIVE_FILE),
4623 global_node_page_state(NR_INACTIVE_FILE),
4624 global_node_page_state(NR_ISOLATED_FILE),
4625 global_node_page_state(NR_UNEVICTABLE),
4626 global_node_page_state(NR_FILE_DIRTY),
4627 global_node_page_state(NR_WRITEBACK),
4628 global_node_page_state(NR_UNSTABLE_NFS),
4629 global_node_page_state(NR_SLAB_RECLAIMABLE),
4630 global_node_page_state(NR_SLAB_UNRECLAIMABLE),
4631 global_node_page_state(NR_FILE_MAPPED),
4632 global_node_page_state(NR_SHMEM),
4633 global_page_state(NR_PAGETABLE),
4634 global_page_state(NR_BOUNCE),
4635 global_page_state(NR_FREE_PAGES),
4636 free_pcp,
4637 global_page_state(NR_FREE_CMA_PAGES));
4638
4639 for_each_online_pgdat(pgdat) {
4640 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
4641 continue;
4642
4643 printk("Node %d"
4644 " active_anon:%lukB"
4645 " inactive_anon:%lukB"
4646 " active_file:%lukB"
4647 " inactive_file:%lukB"
4648 " unevictable:%lukB"
4649 " isolated(anon):%lukB"
4650 " isolated(file):%lukB"
4651 " mapped:%lukB"
4652 " dirty:%lukB"
4653 " writeback:%lukB"
4654 " shmem:%lukB"
4655#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4656 " shmem_thp: %lukB"
4657 " shmem_pmdmapped: %lukB"
4658 " anon_thp: %lukB"
4659#endif
4660 " writeback_tmp:%lukB"
4661 " unstable:%lukB"
4662 " all_unreclaimable? %s"
4663 "\n",
4664 pgdat->node_id,
4665 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4666 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4667 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4668 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4669 K(node_page_state(pgdat, NR_UNEVICTABLE)),
4670 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4671 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
4672 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4673 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4674 K(node_page_state(pgdat, NR_WRITEBACK)),
4675 K(node_page_state(pgdat, NR_SHMEM)),
4676#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4677 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4678 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4679 * HPAGE_PMD_NR),
4680 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4681#endif
4682 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4683 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4684 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4685 "yes" : "no");
4686 }
4687
4688 for_each_populated_zone(zone) {
4689 int i;
4690
4691 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4692 continue;
4693
4694 free_pcp = 0;
4695 for_each_online_cpu(cpu)
4696 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4697
4698 show_node(zone);
4699 printk(KERN_CONT
4700 "%s"
4701 " free:%lukB"
4702 " min:%lukB"
4703 " low:%lukB"
4704 " high:%lukB"
4705 " active_anon:%lukB"
4706 " inactive_anon:%lukB"
4707 " active_file:%lukB"
4708 " inactive_file:%lukB"
4709 " unevictable:%lukB"
4710 " writepending:%lukB"
4711 " present:%lukB"
4712 " managed:%lukB"
4713 " mlocked:%lukB"
4714 " kernel_stack:%lukB"
4715 " pagetables:%lukB"
4716 " bounce:%lukB"
4717 " free_pcp:%lukB"
4718 " local_pcp:%ukB"
4719 " free_cma:%lukB"
4720 "\n",
4721 zone->name,
4722 K(zone_page_state(zone, NR_FREE_PAGES)),
4723 K(min_wmark_pages(zone)),
4724 K(low_wmark_pages(zone)),
4725 K(high_wmark_pages(zone)),
4726 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4727 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4728 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4729 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4730 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
4731 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
4732 K(zone->present_pages),
4733 K(zone->managed_pages),
4734 K(zone_page_state(zone, NR_MLOCK)),
4735 zone_page_state(zone, NR_KERNEL_STACK_KB),
4736 K(zone_page_state(zone, NR_PAGETABLE)),
4737 K(zone_page_state(zone, NR_BOUNCE)),
4738 K(free_pcp),
4739 K(this_cpu_read(zone->pageset->pcp.count)),
4740 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
4741 printk("lowmem_reserve[]:");
4742 for (i = 0; i < MAX_NR_ZONES; i++)
4743 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4744 printk(KERN_CONT "\n");
4745 }
4746
4747 for_each_populated_zone(zone) {
4748 unsigned int order;
4749 unsigned long nr[MAX_ORDER], flags, total = 0;
4750 unsigned char types[MAX_ORDER];
4751
4752 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4753 continue;
4754 show_node(zone);
4755 printk(KERN_CONT "%s: ", zone->name);
4756
4757 spin_lock_irqsave(&zone->lock, flags);
4758 for (order = 0; order < MAX_ORDER; order++) {
4759 struct free_area *area = &zone->free_area[order];
4760 int type;
4761
4762 nr[order] = area->nr_free;
4763 total += nr[order] << order;
4764
4765 types[order] = 0;
4766 for (type = 0; type < MIGRATE_TYPES; type++) {
4767 if (!list_empty(&area->free_list[type]))
4768 types[order] |= 1 << type;
4769 }
4770 }
4771 spin_unlock_irqrestore(&zone->lock, flags);
4772 for (order = 0; order < MAX_ORDER; order++) {
4773 printk(KERN_CONT "%lu*%lukB ",
4774 nr[order], K(1UL) << order);
4775 if (nr[order])
4776 show_migration_types(types[order]);
4777 }
4778 printk(KERN_CONT "= %lukB\n", K(total));
4779 }
4780
4781 hugetlb_show_meminfo();
4782
4783 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
4784
4785 show_swap_cache_info();
4786}
4787
4788static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4789{
4790 zoneref->zone = zone;
4791 zoneref->zone_idx = zone_idx(zone);
4792}
4793
4794
4795
4796
4797
4798
4799static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
4800 int nr_zones)
4801{
4802 struct zone *zone;
4803 enum zone_type zone_type = MAX_NR_ZONES;
4804
4805 do {
4806 zone_type--;
4807 zone = pgdat->node_zones + zone_type;
4808 if (managed_zone(zone)) {
4809 zoneref_set_zone(zone,
4810 &zonelist->_zonerefs[nr_zones++]);
4811 check_highest_zone(zone_type);
4812 }
4813 } while (zone_type);
4814
4815 return nr_zones;
4816}
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828#define ZONELIST_ORDER_DEFAULT 0
4829#define ZONELIST_ORDER_NODE 1
4830#define ZONELIST_ORDER_ZONE 2
4831
4832
4833
4834
4835static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4836static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4837
4838
4839#ifdef CONFIG_NUMA
4840
4841static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4842
4843#define NUMA_ZONELIST_ORDER_LEN 16
4844char numa_zonelist_order[16] = "default";
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854static int __parse_numa_zonelist_order(char *s)
4855{
4856 if (*s == 'd' || *s == 'D') {
4857 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4858 } else if (*s == 'n' || *s == 'N') {
4859 user_zonelist_order = ZONELIST_ORDER_NODE;
4860 } else if (*s == 'z' || *s == 'Z') {
4861 user_zonelist_order = ZONELIST_ORDER_ZONE;
4862 } else {
4863 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
4864 return -EINVAL;
4865 }
4866 return 0;
4867}
4868
4869static __init int setup_numa_zonelist_order(char *s)
4870{
4871 int ret;
4872
4873 if (!s)
4874 return 0;
4875
4876 ret = __parse_numa_zonelist_order(s);
4877 if (ret == 0)
4878 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4879
4880 return ret;
4881}
4882early_param("numa_zonelist_order", setup_numa_zonelist_order);
4883
4884
4885
4886
4887int numa_zonelist_order_handler(struct ctl_table *table, int write,
4888 void __user *buffer, size_t *length,
4889 loff_t *ppos)
4890{
4891 char saved_string[NUMA_ZONELIST_ORDER_LEN];
4892 int ret;
4893 static DEFINE_MUTEX(zl_order_mutex);
4894
4895 mutex_lock(&zl_order_mutex);
4896 if (write) {
4897 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4898 ret = -EINVAL;
4899 goto out;
4900 }
4901 strcpy(saved_string, (char *)table->data);
4902 }
4903 ret = proc_dostring(table, write, buffer, length, ppos);
4904 if (ret)
4905 goto out;
4906 if (write) {
4907 int oldval = user_zonelist_order;
4908
4909 ret = __parse_numa_zonelist_order((char *)table->data);
4910 if (ret) {
4911
4912
4913
4914 strncpy((char *)table->data, saved_string,
4915 NUMA_ZONELIST_ORDER_LEN);
4916 user_zonelist_order = oldval;
4917 } else if (oldval != user_zonelist_order) {
4918 mem_hotplug_begin();
4919 mutex_lock(&zonelists_mutex);
4920 build_all_zonelists(NULL, NULL);
4921 mutex_unlock(&zonelists_mutex);
4922 mem_hotplug_done();
4923 }
4924 }
4925out:
4926 mutex_unlock(&zl_order_mutex);
4927 return ret;
4928}
4929
4930
4931#define MAX_NODE_LOAD (nr_online_nodes)
4932static int node_load[MAX_NUMNODES];
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948static int find_next_best_node(int node, nodemask_t *used_node_mask)
4949{
4950 int n, val;
4951 int min_val = INT_MAX;
4952 int best_node = NUMA_NO_NODE;
4953 const struct cpumask *tmp = cpumask_of_node(0);
4954
4955
4956 if (!node_isset(node, *used_node_mask)) {
4957 node_set(node, *used_node_mask);
4958 return node;
4959 }
4960
4961 for_each_node_state(n, N_MEMORY) {
4962
4963
4964 if (node_isset(n, *used_node_mask))
4965 continue;
4966
4967
4968 val = node_distance(node, n);
4969
4970
4971 val += (n < node);
4972
4973
4974 tmp = cpumask_of_node(n);
4975 if (!cpumask_empty(tmp))
4976 val += PENALTY_FOR_NODE_WITH_CPUS;
4977
4978
4979 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4980 val += node_load[n];
4981
4982 if (val < min_val) {
4983 min_val = val;
4984 best_node = n;
4985 }
4986 }
4987
4988 if (best_node >= 0)
4989 node_set(best_node, *used_node_mask);
4990
4991 return best_node;
4992}
4993
4994
4995
4996
4997
4998
4999
5000static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
5001{
5002 int j;
5003 struct zonelist *zonelist;
5004
5005 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5006 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
5007 ;
5008 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5009 zonelist->_zonerefs[j].zone = NULL;
5010 zonelist->_zonerefs[j].zone_idx = 0;
5011}
5012
5013
5014
5015
5016static void build_thisnode_zonelists(pg_data_t *pgdat)
5017{
5018 int j;
5019 struct zonelist *zonelist;
5020
5021 zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
5022 j = build_zonelists_node(pgdat, zonelist, 0);
5023 zonelist->_zonerefs[j].zone = NULL;
5024 zonelist->_zonerefs[j].zone_idx = 0;
5025}
5026
5027
5028
5029
5030
5031
5032
5033static int node_order[MAX_NUMNODES];
5034
5035static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
5036{
5037 int pos, j, node;
5038 int zone_type;
5039 struct zone *z;
5040 struct zonelist *zonelist;
5041
5042 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5043 pos = 0;
5044 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
5045 for (j = 0; j < nr_nodes; j++) {
5046 node = node_order[j];
5047 z = &NODE_DATA(node)->node_zones[zone_type];
5048 if (managed_zone(z)) {
5049 zoneref_set_zone(z,
5050 &zonelist->_zonerefs[pos++]);
5051 check_highest_zone(zone_type);
5052 }
5053 }
5054 }
5055 zonelist->_zonerefs[pos].zone = NULL;
5056 zonelist->_zonerefs[pos].zone_idx = 0;
5057}
5058
5059#if defined(CONFIG_64BIT)
5060
5061
5062
5063
5064
5065static int default_zonelist_order(void)
5066{
5067 return ZONELIST_ORDER_NODE;
5068}
5069#else
5070
5071
5072
5073
5074
5075
5076
5077
5078static int default_zonelist_order(void)
5079{
5080 return ZONELIST_ORDER_ZONE;
5081}
5082#endif
5083
5084static void set_zonelist_order(void)
5085{
5086 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
5087 current_zonelist_order = default_zonelist_order();
5088 else
5089 current_zonelist_order = user_zonelist_order;
5090}
5091
5092static void build_zonelists(pg_data_t *pgdat)
5093{
5094 int i, node, load;
5095 nodemask_t used_mask;
5096 int local_node, prev_node;
5097 struct zonelist *zonelist;
5098 unsigned int order = current_zonelist_order;
5099
5100
5101 for (i = 0; i < MAX_ZONELISTS; i++) {
5102 zonelist = pgdat->node_zonelists + i;
5103 zonelist->_zonerefs[0].zone = NULL;
5104 zonelist->_zonerefs[0].zone_idx = 0;
5105 }
5106
5107
5108 local_node = pgdat->node_id;
5109 load = nr_online_nodes;
5110 prev_node = local_node;
5111 nodes_clear(used_mask);
5112
5113 memset(node_order, 0, sizeof(node_order));
5114 i = 0;
5115
5116 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5117
5118
5119
5120
5121
5122 if (node_distance(local_node, node) !=
5123 node_distance(local_node, prev_node))
5124 node_load[node] = load;
5125
5126 prev_node = node;
5127 load--;
5128 if (order == ZONELIST_ORDER_NODE)
5129 build_zonelists_in_node_order(pgdat, node);
5130 else
5131 node_order[i++] = node;
5132 }
5133
5134 if (order == ZONELIST_ORDER_ZONE) {
5135
5136 build_zonelists_in_zone_order(pgdat, i);
5137 }
5138
5139 build_thisnode_zonelists(pgdat);
5140}
5141
5142#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5143
5144
5145
5146
5147
5148
5149int local_memory_node(int node)
5150{
5151 struct zoneref *z;
5152
5153 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5154 gfp_zone(GFP_KERNEL),
5155 NULL);
5156 return z->zone->node;
5157}
5158#endif
5159
5160static void setup_min_unmapped_ratio(void);
5161static void setup_min_slab_ratio(void);
5162#else
5163
5164static void set_zonelist_order(void)
5165{
5166 current_zonelist_order = ZONELIST_ORDER_ZONE;
5167}
5168
5169static void build_zonelists(pg_data_t *pgdat)
5170{
5171 int node, local_node;
5172 enum zone_type j;
5173 struct zonelist *zonelist;
5174
5175 local_node = pgdat->node_id;
5176
5177 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5178 j = build_zonelists_node(pgdat, zonelist, 0);
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5189 if (!node_online(node))
5190 continue;
5191 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5192 }
5193 for (node = 0; node < local_node; node++) {
5194 if (!node_online(node))
5195 continue;
5196 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5197 }
5198
5199 zonelist->_zonerefs[j].zone = NULL;
5200 zonelist->_zonerefs[j].zone_idx = 0;
5201}
5202
5203#endif
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5221static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5222static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5223static void setup_zone_pageset(struct zone *zone);
5224
5225
5226
5227
5228
5229DEFINE_MUTEX(zonelists_mutex);
5230
5231
5232static int __build_all_zonelists(void *data)
5233{
5234 int nid;
5235 int cpu;
5236 pg_data_t *self = data;
5237
5238#ifdef CONFIG_NUMA
5239 memset(node_load, 0, sizeof(node_load));
5240#endif
5241
5242 if (self && !node_online(self->node_id)) {
5243 build_zonelists(self);
5244 }
5245
5246 for_each_online_node(nid) {
5247 pg_data_t *pgdat = NODE_DATA(nid);
5248
5249 build_zonelists(pgdat);
5250 }
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265 for_each_possible_cpu(cpu) {
5266 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5267
5268#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5269
5270
5271
5272
5273
5274
5275
5276
5277 if (cpu_online(cpu))
5278 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5279#endif
5280 }
5281
5282 return 0;
5283}
5284
5285static noinline void __init
5286build_all_zonelists_init(void)
5287{
5288 __build_all_zonelists(NULL);
5289 mminit_verify_zonelist();
5290 cpuset_init_current_mems_allowed();
5291}
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
5303{
5304 set_zonelist_order();
5305
5306 if (system_state == SYSTEM_BOOTING) {
5307 build_all_zonelists_init();
5308 } else {
5309#ifdef CONFIG_MEMORY_HOTPLUG
5310 if (zone)
5311 setup_zone_pageset(zone);
5312#endif
5313
5314
5315 stop_machine_cpuslocked(__build_all_zonelists, pgdat, NULL);
5316
5317 }
5318 vm_total_pages = nr_free_pagecache_pages();
5319
5320
5321
5322
5323
5324
5325
5326 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5327 page_group_by_mobility_disabled = 1;
5328 else
5329 page_group_by_mobility_disabled = 0;
5330
5331 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
5332 nr_online_nodes,
5333 zonelist_order_name[current_zonelist_order],
5334 page_group_by_mobility_disabled ? "off" : "on",
5335 vm_total_pages);
5336#ifdef CONFIG_NUMA
5337 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5338#endif
5339}
5340
5341
5342
5343
5344
5345
5346void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5347 unsigned long start_pfn, enum memmap_context context)
5348{
5349 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
5350 unsigned long end_pfn = start_pfn + size;
5351 pg_data_t *pgdat = NODE_DATA(nid);
5352 unsigned long pfn;
5353 unsigned long nr_initialised = 0;
5354#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5355 struct memblock_region *r = NULL, *tmp;
5356#endif
5357
5358 if (highest_memmap_pfn < end_pfn - 1)
5359 highest_memmap_pfn = end_pfn - 1;
5360
5361
5362
5363
5364
5365 if (altmap && start_pfn == altmap->base_pfn)
5366 start_pfn += altmap->reserve;
5367
5368 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5369
5370
5371
5372
5373 if (context != MEMMAP_EARLY)
5374 goto not_early;
5375
5376 if (!early_pfn_valid(pfn)) {
5377#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5378
5379
5380
5381
5382
5383 pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
5384#endif
5385 continue;
5386 }
5387 if (!early_pfn_in_nid(pfn, nid))
5388 continue;
5389 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5390 break;
5391
5392#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5393
5394
5395
5396
5397
5398 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5399 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5400 for_each_memblock(memory, tmp)
5401 if (pfn < memblock_region_memory_end_pfn(tmp))
5402 break;
5403 r = tmp;
5404 }
5405 if (pfn >= memblock_region_memory_base_pfn(r) &&
5406 memblock_is_mirror(r)) {
5407
5408 pfn = memblock_region_memory_end_pfn(r);
5409 continue;
5410 }
5411 }
5412#endif
5413
5414not_early:
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427 if (!(pfn & (pageblock_nr_pages - 1))) {
5428 struct page *page = pfn_to_page(pfn);
5429
5430 __init_single_page(page, pfn, zone, nid);
5431 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5432 } else {
5433 __init_single_pfn(pfn, zone, nid);
5434 }
5435 }
5436}
5437
5438static void __meminit zone_init_free_lists(struct zone *zone)
5439{
5440 unsigned int order, t;
5441 for_each_migratetype_order(order, t) {
5442 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
5443 zone->free_area[order].nr_free = 0;
5444 }
5445}
5446
5447#ifndef __HAVE_ARCH_MEMMAP_INIT
5448#define memmap_init(size, nid, zone, start_pfn) \
5449 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
5450#endif
5451
5452static int zone_batchsize(struct zone *zone)
5453{
5454#ifdef CONFIG_MMU
5455 int batch;
5456
5457
5458
5459
5460
5461
5462
5463 batch = zone->managed_pages / 1024;
5464 if (batch * PAGE_SIZE > 512 * 1024)
5465 batch = (512 * 1024) / PAGE_SIZE;
5466 batch /= 4;
5467 if (batch < 1)
5468 batch = 1;
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480 batch = rounddown_pow_of_two(batch + batch/2) - 1;
5481
5482 return batch;
5483
5484#else
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498 return 0;
5499#endif
5500}
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5516 unsigned long batch)
5517{
5518
5519 pcp->batch = 1;
5520 smp_wmb();
5521
5522
5523 pcp->high = high;
5524 smp_wmb();
5525
5526 pcp->batch = batch;
5527}
5528
5529
5530static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5531{
5532 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
5533}
5534
5535static void pageset_init(struct per_cpu_pageset *p)
5536{
5537 struct per_cpu_pages *pcp;
5538 int migratetype;
5539
5540 memset(p, 0, sizeof(*p));
5541
5542 pcp = &p->pcp;
5543 pcp->count = 0;
5544 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5545 INIT_LIST_HEAD(&pcp->lists[migratetype]);
5546}
5547
5548static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5549{
5550 pageset_init(p);
5551 pageset_set_batch(p, batch);
5552}
5553
5554
5555
5556
5557
5558static void pageset_set_high(struct per_cpu_pageset *p,
5559 unsigned long high)
5560{
5561 unsigned long batch = max(1UL, high / 4);
5562 if ((high / 4) > (PAGE_SHIFT * 8))
5563 batch = PAGE_SHIFT * 8;
5564
5565 pageset_update(&p->pcp, high, batch);
5566}
5567
5568static void pageset_set_high_and_batch(struct zone *zone,
5569 struct per_cpu_pageset *pcp)
5570{
5571 if (percpu_pagelist_fraction)
5572 pageset_set_high(pcp,
5573 (zone->managed_pages /
5574 percpu_pagelist_fraction));
5575 else
5576 pageset_set_batch(pcp, zone_batchsize(zone));
5577}
5578
5579static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5580{
5581 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5582
5583 pageset_init(pcp);
5584 pageset_set_high_and_batch(zone, pcp);
5585}
5586
5587static void __meminit setup_zone_pageset(struct zone *zone)
5588{
5589 int cpu;
5590 zone->pageset = alloc_percpu(struct per_cpu_pageset);
5591 for_each_possible_cpu(cpu)
5592 zone_pageset_init(zone, cpu);
5593}
5594
5595
5596
5597
5598
5599void __init setup_per_cpu_pageset(void)
5600{
5601 struct pglist_data *pgdat;
5602 struct zone *zone;
5603
5604 for_each_populated_zone(zone)
5605 setup_zone_pageset(zone);
5606
5607 for_each_online_pgdat(pgdat)
5608 pgdat->per_cpu_nodestats =
5609 alloc_percpu(struct per_cpu_nodestat);
5610}
5611
5612static __meminit void zone_pcp_init(struct zone *zone)
5613{
5614
5615
5616
5617
5618
5619 zone->pageset = &boot_pageset;
5620
5621 if (populated_zone(zone))
5622 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
5623 zone->name, zone->present_pages,
5624 zone_batchsize(zone));
5625}
5626
5627void __meminit init_currently_empty_zone(struct zone *zone,
5628 unsigned long zone_start_pfn,
5629 unsigned long size)
5630{
5631 struct pglist_data *pgdat = zone->zone_pgdat;
5632
5633 pgdat->nr_zones = zone_idx(zone) + 1;
5634
5635 zone->zone_start_pfn = zone_start_pfn;
5636
5637 mminit_dprintk(MMINIT_TRACE, "memmap_init",
5638 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5639 pgdat->node_id,
5640 (unsigned long)zone_idx(zone),
5641 zone_start_pfn, (zone_start_pfn + size));
5642
5643 zone_init_free_lists(zone);
5644 zone->initialized = 1;
5645}
5646
5647#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5648#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
5649
5650
5651
5652
5653int __meminit __early_pfn_to_nid(unsigned long pfn,
5654 struct mminit_pfnnid_cache *state)
5655{
5656 unsigned long start_pfn, end_pfn;
5657 int nid;
5658
5659 if (state->last_start <= pfn && pfn < state->last_end)
5660 return state->last_nid;
5661
5662 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5663 if (nid != -1) {
5664 state->last_start = start_pfn;
5665 state->last_end = end_pfn;
5666 state->last_nid = nid;
5667 }
5668
5669 return nid;
5670}
5671#endif
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
5683{
5684 unsigned long start_pfn, end_pfn;
5685 int i, this_nid;
5686
5687 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5688 start_pfn = min(start_pfn, max_low_pfn);
5689 end_pfn = min(end_pfn, max_low_pfn);
5690
5691 if (start_pfn < end_pfn)
5692 memblock_free_early_nid(PFN_PHYS(start_pfn),
5693 (end_pfn - start_pfn) << PAGE_SHIFT,
5694 this_nid);
5695 }
5696}
5697
5698
5699
5700
5701
5702
5703
5704
5705void __init sparse_memory_present_with_active_regions(int nid)
5706{
5707 unsigned long start_pfn, end_pfn;
5708 int i, this_nid;
5709
5710 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5711 memory_present(this_nid, start_pfn, end_pfn);
5712}
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725void __meminit get_pfn_range_for_nid(unsigned int nid,
5726 unsigned long *start_pfn, unsigned long *end_pfn)
5727{
5728 unsigned long this_start_pfn, this_end_pfn;
5729 int i;
5730
5731 *start_pfn = -1UL;
5732 *end_pfn = 0;
5733
5734 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5735 *start_pfn = min(*start_pfn, this_start_pfn);
5736 *end_pfn = max(*end_pfn, this_end_pfn);
5737 }
5738
5739 if (*start_pfn == -1UL)
5740 *start_pfn = 0;
5741}
5742
5743
5744
5745
5746
5747
5748static void __init find_usable_zone_for_movable(void)
5749{
5750 int zone_index;
5751 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5752 if (zone_index == ZONE_MOVABLE)
5753 continue;
5754
5755 if (arch_zone_highest_possible_pfn[zone_index] >
5756 arch_zone_lowest_possible_pfn[zone_index])
5757 break;
5758 }
5759
5760 VM_BUG_ON(zone_index == -1);
5761 movable_zone = zone_index;
5762}
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774static void __meminit adjust_zone_range_for_zone_movable(int nid,
5775 unsigned long zone_type,
5776 unsigned long node_start_pfn,
5777 unsigned long node_end_pfn,
5778 unsigned long *zone_start_pfn,
5779 unsigned long *zone_end_pfn)
5780{
5781
5782 if (zone_movable_pfn[nid]) {
5783
5784 if (zone_type == ZONE_MOVABLE) {
5785 *zone_start_pfn = zone_movable_pfn[nid];
5786 *zone_end_pfn = min(node_end_pfn,
5787 arch_zone_highest_possible_pfn[movable_zone]);
5788
5789
5790 } else if (!mirrored_kernelcore &&
5791 *zone_start_pfn < zone_movable_pfn[nid] &&
5792 *zone_end_pfn > zone_movable_pfn[nid]) {
5793 *zone_end_pfn = zone_movable_pfn[nid];
5794
5795
5796 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5797 *zone_start_pfn = *zone_end_pfn;
5798 }
5799}
5800
5801
5802
5803
5804
5805static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5806 unsigned long zone_type,
5807 unsigned long node_start_pfn,
5808 unsigned long node_end_pfn,
5809 unsigned long *zone_start_pfn,
5810 unsigned long *zone_end_pfn,
5811 unsigned long *ignored)
5812{
5813
5814 if (!node_start_pfn && !node_end_pfn)
5815 return 0;
5816
5817
5818 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5819 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
5820 adjust_zone_range_for_zone_movable(nid, zone_type,
5821 node_start_pfn, node_end_pfn,
5822 zone_start_pfn, zone_end_pfn);
5823
5824
5825 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
5826 return 0;
5827
5828
5829 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5830 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
5831
5832
5833 return *zone_end_pfn - *zone_start_pfn;
5834}
5835
5836
5837
5838
5839
5840unsigned long __meminit __absent_pages_in_range(int nid,
5841 unsigned long range_start_pfn,
5842 unsigned long range_end_pfn)
5843{
5844 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5845 unsigned long start_pfn, end_pfn;
5846 int i;
5847
5848 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5849 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5850 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5851 nr_absent -= end_pfn - start_pfn;
5852 }
5853 return nr_absent;
5854}
5855
5856
5857
5858
5859
5860
5861
5862
5863unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5864 unsigned long end_pfn)
5865{
5866 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5867}
5868
5869
5870static unsigned long __meminit zone_absent_pages_in_node(int nid,
5871 unsigned long zone_type,
5872 unsigned long node_start_pfn,
5873 unsigned long node_end_pfn,
5874 unsigned long *ignored)
5875{
5876 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5877 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5878 unsigned long zone_start_pfn, zone_end_pfn;
5879 unsigned long nr_absent;
5880
5881
5882 if (!node_start_pfn && !node_end_pfn)
5883 return 0;
5884
5885 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5886 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5887
5888 adjust_zone_range_for_zone_movable(nid, zone_type,
5889 node_start_pfn, node_end_pfn,
5890 &zone_start_pfn, &zone_end_pfn);
5891 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5892
5893
5894
5895
5896
5897
5898 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5899 unsigned long start_pfn, end_pfn;
5900 struct memblock_region *r;
5901
5902 for_each_memblock(memory, r) {
5903 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5904 zone_start_pfn, zone_end_pfn);
5905 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5906 zone_start_pfn, zone_end_pfn);
5907
5908 if (zone_type == ZONE_MOVABLE &&
5909 memblock_is_mirror(r))
5910 nr_absent += end_pfn - start_pfn;
5911
5912 if (zone_type == ZONE_NORMAL &&
5913 !memblock_is_mirror(r))
5914 nr_absent += end_pfn - start_pfn;
5915 }
5916 }
5917
5918 return nr_absent;
5919}
5920
5921#else
5922static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
5923 unsigned long zone_type,
5924 unsigned long node_start_pfn,
5925 unsigned long node_end_pfn,
5926 unsigned long *zone_start_pfn,
5927 unsigned long *zone_end_pfn,
5928 unsigned long *zones_size)
5929{
5930 unsigned int zone;
5931
5932 *zone_start_pfn = node_start_pfn;
5933 for (zone = 0; zone < zone_type; zone++)
5934 *zone_start_pfn += zones_size[zone];
5935
5936 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5937
5938 return zones_size[zone_type];
5939}
5940
5941static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
5942 unsigned long zone_type,
5943 unsigned long node_start_pfn,
5944 unsigned long node_end_pfn,
5945 unsigned long *zholes_size)
5946{
5947 if (!zholes_size)
5948 return 0;
5949
5950 return zholes_size[zone_type];
5951}
5952
5953#endif
5954
5955static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
5956 unsigned long node_start_pfn,
5957 unsigned long node_end_pfn,
5958 unsigned long *zones_size,
5959 unsigned long *zholes_size)
5960{
5961 unsigned long realtotalpages = 0, totalpages = 0;
5962 enum zone_type i;
5963
5964 for (i = 0; i < MAX_NR_ZONES; i++) {
5965 struct zone *zone = pgdat->node_zones + i;
5966 unsigned long zone_start_pfn, zone_end_pfn;
5967 unsigned long size, real_size;
5968
5969 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5970 node_start_pfn,
5971 node_end_pfn,
5972 &zone_start_pfn,
5973 &zone_end_pfn,
5974 zones_size);
5975 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
5976 node_start_pfn, node_end_pfn,
5977 zholes_size);
5978 if (size)
5979 zone->zone_start_pfn = zone_start_pfn;
5980 else
5981 zone->zone_start_pfn = 0;
5982 zone->spanned_pages = size;
5983 zone->present_pages = real_size;
5984
5985 totalpages += size;
5986 realtotalpages += real_size;
5987 }
5988
5989 pgdat->node_spanned_pages = totalpages;
5990 pgdat->node_present_pages = realtotalpages;
5991 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5992 realtotalpages);
5993}
5994
5995#ifndef CONFIG_SPARSEMEM
5996
5997
5998
5999
6000
6001
6002
6003static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6004{
6005 unsigned long usemapsize;
6006
6007 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6008 usemapsize = roundup(zonesize, pageblock_nr_pages);
6009 usemapsize = usemapsize >> pageblock_order;
6010 usemapsize *= NR_PAGEBLOCK_BITS;
6011 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6012
6013 return usemapsize / 8;
6014}
6015
6016static void __init setup_usemap(struct pglist_data *pgdat,
6017 struct zone *zone,
6018 unsigned long zone_start_pfn,
6019 unsigned long zonesize)
6020{
6021 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6022 zone->pageblock_flags = NULL;
6023 if (usemapsize)
6024 zone->pageblock_flags =
6025 memblock_virt_alloc_node_nopanic(usemapsize,
6026 pgdat->node_id);
6027}
6028#else
6029static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6030 unsigned long zone_start_pfn, unsigned long zonesize) {}
6031#endif
6032
6033#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6034
6035
6036void __paginginit set_pageblock_order(void)
6037{
6038 unsigned int order;
6039
6040
6041 if (pageblock_order)
6042 return;
6043
6044 if (HPAGE_SHIFT > PAGE_SHIFT)
6045 order = HUGETLB_PAGE_ORDER;
6046 else
6047 order = MAX_ORDER - 1;
6048
6049
6050
6051
6052
6053
6054 pageblock_order = order;
6055}
6056#else
6057
6058
6059
6060
6061
6062
6063
6064void __paginginit set_pageblock_order(void)
6065{
6066}
6067
6068#endif
6069
6070static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
6071 unsigned long present_pages)
6072{
6073 unsigned long pages = spanned_pages;
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083 if (spanned_pages > present_pages + (present_pages >> 4) &&
6084 IS_ENABLED(CONFIG_SPARSEMEM))
6085 pages = present_pages;
6086
6087 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6088}
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098static void __paginginit free_area_init_core(struct pglist_data *pgdat)
6099{
6100 enum zone_type j;
6101 int nid = pgdat->node_id;
6102
6103 pgdat_resize_init(pgdat);
6104#ifdef CONFIG_NUMA_BALANCING
6105 spin_lock_init(&pgdat->numabalancing_migrate_lock);
6106 pgdat->numabalancing_migrate_nr_pages = 0;
6107 pgdat->numabalancing_migrate_next_window = jiffies;
6108#endif
6109#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6110 spin_lock_init(&pgdat->split_queue_lock);
6111 INIT_LIST_HEAD(&pgdat->split_queue);
6112 pgdat->split_queue_len = 0;
6113#endif
6114 init_waitqueue_head(&pgdat->kswapd_wait);
6115 init_waitqueue_head(&pgdat->pfmemalloc_wait);
6116#ifdef CONFIG_COMPACTION
6117 init_waitqueue_head(&pgdat->kcompactd_wait);
6118#endif
6119 pgdat_page_ext_init(pgdat);
6120 spin_lock_init(&pgdat->lru_lock);
6121 lruvec_init(node_lruvec(pgdat));
6122
6123 pgdat->per_cpu_nodestats = &boot_nodestats;
6124
6125 for (j = 0; j < MAX_NR_ZONES; j++) {
6126 struct zone *zone = pgdat->node_zones + j;
6127 unsigned long size, realsize, freesize, memmap_pages;
6128 unsigned long zone_start_pfn = zone->zone_start_pfn;
6129
6130 size = zone->spanned_pages;
6131 realsize = freesize = zone->present_pages;
6132
6133
6134
6135
6136
6137
6138 memmap_pages = calc_memmap_size(size, realsize);
6139 if (!is_highmem_idx(j)) {
6140 if (freesize >= memmap_pages) {
6141 freesize -= memmap_pages;
6142 if (memmap_pages)
6143 printk(KERN_DEBUG
6144 " %s zone: %lu pages used for memmap\n",
6145 zone_names[j], memmap_pages);
6146 } else
6147 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
6148 zone_names[j], memmap_pages, freesize);
6149 }
6150
6151
6152 if (j == 0 && freesize > dma_reserve) {
6153 freesize -= dma_reserve;
6154 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6155 zone_names[0], dma_reserve);
6156 }
6157
6158 if (!is_highmem_idx(j))
6159 nr_kernel_pages += freesize;
6160
6161 else if (nr_kernel_pages > memmap_pages * 2)
6162 nr_kernel_pages -= memmap_pages;
6163 nr_all_pages += freesize;
6164
6165
6166
6167
6168
6169
6170 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
6171#ifdef CONFIG_NUMA
6172 zone->node = nid;
6173#endif
6174 zone->name = zone_names[j];
6175 zone->zone_pgdat = pgdat;
6176 spin_lock_init(&zone->lock);
6177 zone_seqlock_init(zone);
6178 zone_pcp_init(zone);
6179
6180 if (!size)
6181 continue;
6182
6183 set_pageblock_order();
6184 setup_usemap(pgdat, zone, zone_start_pfn, size);
6185 init_currently_empty_zone(zone, zone_start_pfn, size);
6186 memmap_init(size, nid, j, zone_start_pfn);
6187 }
6188}
6189
6190static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6191{
6192 unsigned long __maybe_unused start = 0;
6193 unsigned long __maybe_unused offset = 0;
6194
6195
6196 if (!pgdat->node_spanned_pages)
6197 return;
6198
6199#ifdef CONFIG_FLAT_NODE_MEM_MAP
6200 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6201 offset = pgdat->node_start_pfn - start;
6202
6203 if (!pgdat->node_mem_map) {
6204 unsigned long size, end;
6205 struct page *map;
6206
6207
6208
6209
6210
6211
6212 end = pgdat_end_pfn(pgdat);
6213 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6214 size = (end - start) * sizeof(struct page);
6215 map = alloc_remap(pgdat->node_id, size);
6216 if (!map)
6217 map = memblock_virt_alloc_node_nopanic(size,
6218 pgdat->node_id);
6219 pgdat->node_mem_map = map + offset;
6220 }
6221#ifndef CONFIG_NEED_MULTIPLE_NODES
6222
6223
6224
6225 if (pgdat == NODE_DATA(0)) {
6226 mem_map = NODE_DATA(0)->node_mem_map;
6227#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
6228 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
6229 mem_map -= offset;
6230#endif
6231 }
6232#endif
6233#endif
6234}
6235
6236void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6237 unsigned long node_start_pfn, unsigned long *zholes_size)
6238{
6239 pg_data_t *pgdat = NODE_DATA(nid);
6240 unsigned long start_pfn = 0;
6241 unsigned long end_pfn = 0;
6242
6243
6244 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
6245
6246 pgdat->node_id = nid;
6247 pgdat->node_start_pfn = node_start_pfn;
6248 pgdat->per_cpu_nodestats = NULL;
6249#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6250 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
6251 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
6252 (u64)start_pfn << PAGE_SHIFT,
6253 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
6254#else
6255 start_pfn = node_start_pfn;
6256#endif
6257 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6258 zones_size, zholes_size);
6259
6260 alloc_node_mem_map(pgdat);
6261#ifdef CONFIG_FLAT_NODE_MEM_MAP
6262 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
6263 nid, (unsigned long)pgdat,
6264 (unsigned long)pgdat->node_mem_map);
6265#endif
6266
6267 reset_deferred_meminit(pgdat);
6268 free_area_init_core(pgdat);
6269}
6270
6271#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6272
6273#if MAX_NUMNODES > 1
6274
6275
6276
6277void __init setup_nr_node_ids(void)
6278{
6279 unsigned int highest;
6280
6281 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
6282 nr_node_ids = highest + 1;
6283}
6284#endif
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304
6305unsigned long __init node_map_pfn_alignment(void)
6306{
6307 unsigned long accl_mask = 0, last_end = 0;
6308 unsigned long start, end, mask;
6309 int last_nid = -1;
6310 int i, nid;
6311
6312 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
6313 if (!start || last_nid < 0 || last_nid == nid) {
6314 last_nid = nid;
6315 last_end = end;
6316 continue;
6317 }
6318
6319
6320
6321
6322
6323
6324 mask = ~((1 << __ffs(start)) - 1);
6325 while (mask && last_end <= (start & (mask << 1)))
6326 mask <<= 1;
6327
6328
6329 accl_mask |= mask;
6330 }
6331
6332
6333 return ~accl_mask + 1;
6334}
6335
6336
6337static unsigned long __init find_min_pfn_for_node(int nid)
6338{
6339 unsigned long min_pfn = ULONG_MAX;
6340 unsigned long start_pfn;
6341 int i;
6342
6343 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6344 min_pfn = min(min_pfn, start_pfn);
6345
6346 if (min_pfn == ULONG_MAX) {
6347 pr_warn("Could not find start_pfn for node %d\n", nid);
6348 return 0;
6349 }
6350
6351 return min_pfn;
6352}
6353
6354
6355
6356
6357
6358
6359
6360unsigned long __init find_min_pfn_with_active_regions(void)
6361{
6362 return find_min_pfn_for_node(MAX_NUMNODES);
6363}
6364
6365
6366
6367
6368
6369
6370static unsigned long __init early_calculate_totalpages(void)
6371{
6372 unsigned long totalpages = 0;
6373 unsigned long start_pfn, end_pfn;
6374 int i, nid;
6375
6376 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6377 unsigned long pages = end_pfn - start_pfn;
6378
6379 totalpages += pages;
6380 if (pages)
6381 node_set_state(nid, N_MEMORY);
6382 }
6383 return totalpages;
6384}
6385
6386
6387
6388
6389
6390
6391
6392static void __init find_zone_movable_pfns_for_nodes(void)
6393{
6394 int i, nid;
6395 unsigned long usable_startpfn;
6396 unsigned long kernelcore_node, kernelcore_remaining;
6397
6398 nodemask_t saved_node_state = node_states[N_MEMORY];
6399 unsigned long totalpages = early_calculate_totalpages();
6400 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
6401 struct memblock_region *r;
6402
6403
6404 find_usable_zone_for_movable();
6405
6406
6407
6408
6409
6410 if (movable_node_is_enabled()) {
6411 for_each_memblock(memory, r) {
6412 if (!memblock_is_hotpluggable(r))
6413 continue;
6414
6415 nid = r->nid;
6416
6417 usable_startpfn = PFN_DOWN(r->base);
6418 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6419 min(usable_startpfn, zone_movable_pfn[nid]) :
6420 usable_startpfn;
6421 }
6422
6423 goto out2;
6424 }
6425
6426
6427
6428
6429 if (mirrored_kernelcore) {
6430 bool mem_below_4gb_not_mirrored = false;
6431
6432 for_each_memblock(memory, r) {
6433 if (memblock_is_mirror(r))
6434 continue;
6435
6436 nid = r->nid;
6437
6438 usable_startpfn = memblock_region_memory_base_pfn(r);
6439
6440 if (usable_startpfn < 0x100000) {
6441 mem_below_4gb_not_mirrored = true;
6442 continue;
6443 }
6444
6445 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6446 min(usable_startpfn, zone_movable_pfn[nid]) :
6447 usable_startpfn;
6448 }
6449
6450 if (mem_below_4gb_not_mirrored)
6451 pr_warn("This configuration results in unmirrored kernel memory.");
6452
6453 goto out2;
6454 }
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464 if (required_movablecore) {
6465 unsigned long corepages;
6466
6467
6468
6469
6470
6471 required_movablecore =
6472 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
6473 required_movablecore = min(totalpages, required_movablecore);
6474 corepages = totalpages - required_movablecore;
6475
6476 required_kernelcore = max(required_kernelcore, corepages);
6477 }
6478
6479
6480
6481
6482
6483 if (!required_kernelcore || required_kernelcore >= totalpages)
6484 goto out;
6485
6486
6487 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6488
6489restart:
6490
6491 kernelcore_node = required_kernelcore / usable_nodes;
6492 for_each_node_state(nid, N_MEMORY) {
6493 unsigned long start_pfn, end_pfn;
6494
6495
6496
6497
6498
6499
6500 if (required_kernelcore < kernelcore_node)
6501 kernelcore_node = required_kernelcore / usable_nodes;
6502
6503
6504
6505
6506
6507
6508 kernelcore_remaining = kernelcore_node;
6509
6510
6511 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6512 unsigned long size_pages;
6513
6514 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
6515 if (start_pfn >= end_pfn)
6516 continue;
6517
6518
6519 if (start_pfn < usable_startpfn) {
6520 unsigned long kernel_pages;
6521 kernel_pages = min(end_pfn, usable_startpfn)
6522 - start_pfn;
6523
6524 kernelcore_remaining -= min(kernel_pages,
6525 kernelcore_remaining);
6526 required_kernelcore -= min(kernel_pages,
6527 required_kernelcore);
6528
6529
6530 if (end_pfn <= usable_startpfn) {
6531
6532
6533
6534
6535
6536
6537
6538 zone_movable_pfn[nid] = end_pfn;
6539 continue;
6540 }
6541 start_pfn = usable_startpfn;
6542 }
6543
6544
6545
6546
6547
6548
6549 size_pages = end_pfn - start_pfn;
6550 if (size_pages > kernelcore_remaining)
6551 size_pages = kernelcore_remaining;
6552 zone_movable_pfn[nid] = start_pfn + size_pages;
6553
6554
6555
6556
6557
6558
6559 required_kernelcore -= min(required_kernelcore,
6560 size_pages);
6561 kernelcore_remaining -= size_pages;
6562 if (!kernelcore_remaining)
6563 break;
6564 }
6565 }
6566
6567
6568
6569
6570
6571
6572
6573 usable_nodes--;
6574 if (usable_nodes && required_kernelcore > usable_nodes)
6575 goto restart;
6576
6577out2:
6578
6579 for (nid = 0; nid < MAX_NUMNODES; nid++)
6580 zone_movable_pfn[nid] =
6581 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
6582
6583out:
6584
6585 node_states[N_MEMORY] = saved_node_state;
6586}
6587
6588
6589static void check_for_memory(pg_data_t *pgdat, int nid)
6590{
6591 enum zone_type zone_type;
6592
6593 if (N_MEMORY == N_NORMAL_MEMORY)
6594 return;
6595
6596 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
6597 struct zone *zone = &pgdat->node_zones[zone_type];
6598 if (populated_zone(zone)) {
6599 node_set_state(nid, N_HIGH_MEMORY);
6600 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6601 zone_type <= ZONE_NORMAL)
6602 node_set_state(nid, N_NORMAL_MEMORY);
6603 break;
6604 }
6605 }
6606}
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6622{
6623 unsigned long start_pfn, end_pfn;
6624 int i, nid;
6625
6626
6627 memset(arch_zone_lowest_possible_pfn, 0,
6628 sizeof(arch_zone_lowest_possible_pfn));
6629 memset(arch_zone_highest_possible_pfn, 0,
6630 sizeof(arch_zone_highest_possible_pfn));
6631
6632 start_pfn = find_min_pfn_with_active_regions();
6633
6634 for (i = 0; i < MAX_NR_ZONES; i++) {
6635 if (i == ZONE_MOVABLE)
6636 continue;
6637
6638 end_pfn = max(max_zone_pfn[i], start_pfn);
6639 arch_zone_lowest_possible_pfn[i] = start_pfn;
6640 arch_zone_highest_possible_pfn[i] = end_pfn;
6641
6642 start_pfn = end_pfn;
6643 }
6644
6645
6646 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
6647 find_zone_movable_pfns_for_nodes();
6648
6649
6650 pr_info("Zone ranges:\n");
6651 for (i = 0; i < MAX_NR_ZONES; i++) {
6652 if (i == ZONE_MOVABLE)
6653 continue;
6654 pr_info(" %-8s ", zone_names[i]);
6655 if (arch_zone_lowest_possible_pfn[i] ==
6656 arch_zone_highest_possible_pfn[i])
6657 pr_cont("empty\n");
6658 else
6659 pr_cont("[mem %#018Lx-%#018Lx]\n",
6660 (u64)arch_zone_lowest_possible_pfn[i]
6661 << PAGE_SHIFT,
6662 ((u64)arch_zone_highest_possible_pfn[i]
6663 << PAGE_SHIFT) - 1);
6664 }
6665
6666
6667 pr_info("Movable zone start for each node\n");
6668 for (i = 0; i < MAX_NUMNODES; i++) {
6669 if (zone_movable_pfn[i])
6670 pr_info(" Node %d: %#018Lx\n", i,
6671 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
6672 }
6673
6674
6675 pr_info("Early memory node ranges\n");
6676 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
6677 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6678 (u64)start_pfn << PAGE_SHIFT,
6679 ((u64)end_pfn << PAGE_SHIFT) - 1);
6680
6681
6682 mminit_verify_pageflags_layout();
6683 setup_nr_node_ids();
6684 for_each_online_node(nid) {
6685 pg_data_t *pgdat = NODE_DATA(nid);
6686 free_area_init_node(nid, NULL,
6687 find_min_pfn_for_node(nid), NULL);
6688
6689
6690 if (pgdat->node_present_pages)
6691 node_set_state(nid, N_MEMORY);
6692 check_for_memory(pgdat, nid);
6693 }
6694}
6695
6696static int __init cmdline_parse_core(char *p, unsigned long *core)
6697{
6698 unsigned long long coremem;
6699 if (!p)
6700 return -EINVAL;
6701
6702 coremem = memparse(p, &p);
6703 *core = coremem >> PAGE_SHIFT;
6704
6705
6706 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6707
6708 return 0;
6709}
6710
6711
6712
6713
6714
6715static int __init cmdline_parse_kernelcore(char *p)
6716{
6717
6718 if (parse_option_str(p, "mirror")) {
6719 mirrored_kernelcore = true;
6720 return 0;
6721 }
6722
6723 return cmdline_parse_core(p, &required_kernelcore);
6724}
6725
6726
6727
6728
6729
6730static int __init cmdline_parse_movablecore(char *p)
6731{
6732 return cmdline_parse_core(p, &required_movablecore);
6733}
6734
6735early_param("kernelcore", cmdline_parse_kernelcore);
6736early_param("movablecore", cmdline_parse_movablecore);
6737
6738#endif
6739
6740void adjust_managed_page_count(struct page *page, long count)
6741{
6742 spin_lock(&managed_page_count_lock);
6743 page_zone(page)->managed_pages += count;
6744 totalram_pages += count;
6745#ifdef CONFIG_HIGHMEM
6746 if (PageHighMem(page))
6747 totalhigh_pages += count;
6748#endif
6749 spin_unlock(&managed_page_count_lock);
6750}
6751EXPORT_SYMBOL(adjust_managed_page_count);
6752
6753unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
6754{
6755 void *pos;
6756 unsigned long pages = 0;
6757
6758 start = (void *)PAGE_ALIGN((unsigned long)start);
6759 end = (void *)((unsigned long)end & PAGE_MASK);
6760 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
6761 if ((unsigned int)poison <= 0xFF)
6762 memset(pos, poison, PAGE_SIZE);
6763 free_reserved_page(virt_to_page(pos));
6764 }
6765
6766 if (pages && s)
6767 pr_info("Freeing %s memory: %ldK\n",
6768 s, pages << (PAGE_SHIFT - 10));
6769
6770 return pages;
6771}
6772EXPORT_SYMBOL(free_reserved_area);
6773
6774#ifdef CONFIG_HIGHMEM
6775void free_highmem_page(struct page *page)
6776{
6777 __free_reserved_page(page);
6778 totalram_pages++;
6779 page_zone(page)->managed_pages++;
6780 totalhigh_pages++;
6781}
6782#endif
6783
6784
6785void __init mem_init_print_info(const char *str)
6786{
6787 unsigned long physpages, codesize, datasize, rosize, bss_size;
6788 unsigned long init_code_size, init_data_size;
6789
6790 physpages = get_num_physpages();
6791 codesize = _etext - _stext;
6792 datasize = _edata - _sdata;
6793 rosize = __end_rodata - __start_rodata;
6794 bss_size = __bss_stop - __bss_start;
6795 init_data_size = __init_end - __init_begin;
6796 init_code_size = _einittext - _sinittext;
6797
6798
6799
6800
6801
6802
6803
6804
6805#define adj_init_size(start, end, size, pos, adj) \
6806 do { \
6807 if (start <= pos && pos < end && size > adj) \
6808 size -= adj; \
6809 } while (0)
6810
6811 adj_init_size(__init_begin, __init_end, init_data_size,
6812 _sinittext, init_code_size);
6813 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6814 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6815 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6816 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6817
6818#undef adj_init_size
6819
6820 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
6821#ifdef CONFIG_HIGHMEM
6822 ", %luK highmem"
6823#endif
6824 "%s%s)\n",
6825 nr_free_pages() << (PAGE_SHIFT - 10),
6826 physpages << (PAGE_SHIFT - 10),
6827 codesize >> 10, datasize >> 10, rosize >> 10,
6828 (init_data_size + init_code_size) >> 10, bss_size >> 10,
6829 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6830 totalcma_pages << (PAGE_SHIFT - 10),
6831#ifdef CONFIG_HIGHMEM
6832 totalhigh_pages << (PAGE_SHIFT - 10),
6833#endif
6834 str ? ", " : "", str ? str : "");
6835}
6836
6837
6838
6839
6840
6841
6842
6843
6844
6845
6846
6847
6848void __init set_dma_reserve(unsigned long new_dma_reserve)
6849{
6850 dma_reserve = new_dma_reserve;
6851}
6852
6853void __init free_area_init(unsigned long *zones_size)
6854{
6855 free_area_init_node(0, zones_size,
6856 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6857}
6858
6859static int page_alloc_cpu_dead(unsigned int cpu)
6860{
6861
6862 lru_add_drain_cpu(cpu);
6863 drain_pages(cpu);
6864
6865
6866
6867
6868
6869
6870
6871 vm_events_fold_cpu(cpu);
6872
6873
6874
6875
6876
6877
6878
6879
6880 cpu_vm_stats_fold(cpu);
6881 return 0;
6882}
6883
6884void __init page_alloc_init(void)
6885{
6886 int ret;
6887
6888 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
6889 "mm/page_alloc:dead", NULL,
6890 page_alloc_cpu_dead);
6891 WARN_ON(ret < 0);
6892}
6893
6894
6895
6896
6897
6898static void calculate_totalreserve_pages(void)
6899{
6900 struct pglist_data *pgdat;
6901 unsigned long reserve_pages = 0;
6902 enum zone_type i, j;
6903
6904 for_each_online_pgdat(pgdat) {
6905
6906 pgdat->totalreserve_pages = 0;
6907
6908 for (i = 0; i < MAX_NR_ZONES; i++) {
6909 struct zone *zone = pgdat->node_zones + i;
6910 long max = 0;
6911
6912
6913 for (j = i; j < MAX_NR_ZONES; j++) {
6914 if (zone->lowmem_reserve[j] > max)
6915 max = zone->lowmem_reserve[j];
6916 }
6917
6918
6919 max += high_wmark_pages(zone);
6920
6921 if (max > zone->managed_pages)
6922 max = zone->managed_pages;
6923
6924 pgdat->totalreserve_pages += max;
6925
6926 reserve_pages += max;
6927 }
6928 }
6929 totalreserve_pages = reserve_pages;
6930}
6931
6932
6933
6934
6935
6936
6937
6938static void setup_per_zone_lowmem_reserve(void)
6939{
6940 struct pglist_data *pgdat;
6941 enum zone_type j, idx;
6942
6943 for_each_online_pgdat(pgdat) {
6944 for (j = 0; j < MAX_NR_ZONES; j++) {
6945 struct zone *zone = pgdat->node_zones + j;
6946 unsigned long managed_pages = zone->managed_pages;
6947
6948 zone->lowmem_reserve[j] = 0;
6949
6950 idx = j;
6951 while (idx) {
6952 struct zone *lower_zone;
6953
6954 idx--;
6955
6956 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6957 sysctl_lowmem_reserve_ratio[idx] = 1;
6958
6959 lower_zone = pgdat->node_zones + idx;
6960 lower_zone->lowmem_reserve[j] = managed_pages /
6961 sysctl_lowmem_reserve_ratio[idx];
6962 managed_pages += lower_zone->managed_pages;
6963 }
6964 }
6965 }
6966
6967
6968 calculate_totalreserve_pages();
6969}
6970
6971static void __setup_per_zone_wmarks(void)
6972{
6973 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6974 unsigned long lowmem_pages = 0;
6975 struct zone *zone;
6976 unsigned long flags;
6977
6978
6979 for_each_zone(zone) {
6980 if (!is_highmem(zone))
6981 lowmem_pages += zone->managed_pages;
6982 }
6983
6984 for_each_zone(zone) {
6985 u64 tmp;
6986
6987 spin_lock_irqsave(&zone->lock, flags);
6988 tmp = (u64)pages_min * zone->managed_pages;
6989 do_div(tmp, lowmem_pages);
6990 if (is_highmem(zone)) {
6991
6992
6993
6994
6995
6996
6997
6998
6999
7000 unsigned long min_pages;
7001
7002 min_pages = zone->managed_pages / 1024;
7003 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7004 zone->watermark[WMARK_MIN] = min_pages;
7005 } else {
7006
7007
7008
7009
7010 zone->watermark[WMARK_MIN] = tmp;
7011 }
7012
7013
7014
7015
7016
7017
7018 tmp = max_t(u64, tmp >> 2,
7019 mult_frac(zone->managed_pages,
7020 watermark_scale_factor, 10000));
7021
7022 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
7023 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7024
7025 spin_unlock_irqrestore(&zone->lock, flags);
7026 }
7027
7028
7029 calculate_totalreserve_pages();
7030}
7031
7032
7033
7034
7035
7036
7037
7038
7039void setup_per_zone_wmarks(void)
7040{
7041 mutex_lock(&zonelists_mutex);
7042 __setup_per_zone_wmarks();
7043 mutex_unlock(&zonelists_mutex);
7044}
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070int __meminit init_per_zone_wmark_min(void)
7071{
7072 unsigned long lowmem_kbytes;
7073 int new_min_free_kbytes;
7074
7075 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
7076 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7077
7078 if (new_min_free_kbytes > user_min_free_kbytes) {
7079 min_free_kbytes = new_min_free_kbytes;
7080 if (min_free_kbytes < 128)
7081 min_free_kbytes = 128;
7082 if (min_free_kbytes > 65536)
7083 min_free_kbytes = 65536;
7084 } else {
7085 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7086 new_min_free_kbytes, user_min_free_kbytes);
7087 }
7088 setup_per_zone_wmarks();
7089 refresh_zone_stat_thresholds();
7090 setup_per_zone_lowmem_reserve();
7091
7092#ifdef CONFIG_NUMA
7093 setup_min_unmapped_ratio();
7094 setup_min_slab_ratio();
7095#endif
7096
7097 return 0;
7098}
7099core_initcall(init_per_zone_wmark_min)
7100
7101
7102
7103
7104
7105
7106int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
7107 void __user *buffer, size_t *length, loff_t *ppos)
7108{
7109 int rc;
7110
7111 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7112 if (rc)
7113 return rc;
7114
7115 if (write) {
7116 user_min_free_kbytes = min_free_kbytes;
7117 setup_per_zone_wmarks();
7118 }
7119 return 0;
7120}
7121
7122int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7123 void __user *buffer, size_t *length, loff_t *ppos)
7124{
7125 int rc;
7126
7127 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7128 if (rc)
7129 return rc;
7130
7131 if (write)
7132 setup_per_zone_wmarks();
7133
7134 return 0;
7135}
7136
7137#ifdef CONFIG_NUMA
7138static void setup_min_unmapped_ratio(void)
7139{
7140 pg_data_t *pgdat;
7141 struct zone *zone;
7142
7143 for_each_online_pgdat(pgdat)
7144 pgdat->min_unmapped_pages = 0;
7145
7146 for_each_zone(zone)
7147 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
7148 sysctl_min_unmapped_ratio) / 100;
7149}
7150
7151
7152int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
7153 void __user *buffer, size_t *length, loff_t *ppos)
7154{
7155 int rc;
7156
7157 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7158 if (rc)
7159 return rc;
7160
7161 setup_min_unmapped_ratio();
7162
7163 return 0;
7164}
7165
7166static void setup_min_slab_ratio(void)
7167{
7168 pg_data_t *pgdat;
7169 struct zone *zone;
7170
7171 for_each_online_pgdat(pgdat)
7172 pgdat->min_slab_pages = 0;
7173
7174 for_each_zone(zone)
7175 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
7176 sysctl_min_slab_ratio) / 100;
7177}
7178
7179int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7180 void __user *buffer, size_t *length, loff_t *ppos)
7181{
7182 int rc;
7183
7184 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7185 if (rc)
7186 return rc;
7187
7188 setup_min_slab_ratio();
7189
7190 return 0;
7191}
7192#endif
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
7204 void __user *buffer, size_t *length, loff_t *ppos)
7205{
7206 proc_dointvec_minmax(table, write, buffer, length, ppos);
7207 setup_per_zone_lowmem_reserve();
7208 return 0;
7209}
7210
7211
7212
7213
7214
7215
7216int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
7217 void __user *buffer, size_t *length, loff_t *ppos)
7218{
7219 struct zone *zone;
7220 int old_percpu_pagelist_fraction;
7221 int ret;
7222
7223 mutex_lock(&pcp_batch_high_lock);
7224 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7225
7226 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7227 if (!write || ret < 0)
7228 goto out;
7229
7230
7231 if (percpu_pagelist_fraction &&
7232 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7233 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7234 ret = -EINVAL;
7235 goto out;
7236 }
7237
7238
7239 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7240 goto out;
7241
7242 for_each_populated_zone(zone) {
7243 unsigned int cpu;
7244
7245 for_each_possible_cpu(cpu)
7246 pageset_set_high_and_batch(zone,
7247 per_cpu_ptr(zone->pageset, cpu));
7248 }
7249out:
7250 mutex_unlock(&pcp_batch_high_lock);
7251 return ret;
7252}
7253
7254#ifdef CONFIG_NUMA
7255int hashdist = HASHDIST_DEFAULT;
7256
7257static int __init set_hashdist(char *str)
7258{
7259 if (!str)
7260 return 0;
7261 hashdist = simple_strtoul(str, &str, 0);
7262 return 1;
7263}
7264__setup("hashdist=", set_hashdist);
7265#endif
7266
7267#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7268
7269
7270
7271
7272static unsigned long __init arch_reserved_kernel_pages(void)
7273{
7274 return 0;
7275}
7276#endif
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287#if __BITS_PER_LONG > 32
7288#define ADAPT_SCALE_BASE (64ul << 30)
7289#define ADAPT_SCALE_SHIFT 2
7290#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
7291#endif
7292
7293
7294
7295
7296
7297
7298
7299void *__init alloc_large_system_hash(const char *tablename,
7300 unsigned long bucketsize,
7301 unsigned long numentries,
7302 int scale,
7303 int flags,
7304 unsigned int *_hash_shift,
7305 unsigned int *_hash_mask,
7306 unsigned long low_limit,
7307 unsigned long high_limit)
7308{
7309 unsigned long long max = high_limit;
7310 unsigned long log2qty, size;
7311 void *table = NULL;
7312 gfp_t gfp_flags;
7313
7314
7315 if (!numentries) {
7316
7317 numentries = nr_kernel_pages;
7318 numentries -= arch_reserved_kernel_pages();
7319
7320
7321 if (PAGE_SHIFT < 20)
7322 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
7323
7324#if __BITS_PER_LONG > 32
7325 if (!high_limit) {
7326 unsigned long adapt;
7327
7328 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
7329 adapt <<= ADAPT_SCALE_SHIFT)
7330 scale++;
7331 }
7332#endif
7333
7334
7335 if (scale > PAGE_SHIFT)
7336 numentries >>= (scale - PAGE_SHIFT);
7337 else
7338 numentries <<= (PAGE_SHIFT - scale);
7339
7340
7341 if (unlikely(flags & HASH_SMALL)) {
7342
7343 WARN_ON(!(flags & HASH_EARLY));
7344 if (!(numentries >> *_hash_shift)) {
7345 numentries = 1UL << *_hash_shift;
7346 BUG_ON(!numentries);
7347 }
7348 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
7349 numentries = PAGE_SIZE / bucketsize;
7350 }
7351 numentries = roundup_pow_of_two(numentries);
7352
7353
7354 if (max == 0) {
7355 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7356 do_div(max, bucketsize);
7357 }
7358 max = min(max, 0x80000000ULL);
7359
7360 if (numentries < low_limit)
7361 numentries = low_limit;
7362 if (numentries > max)
7363 numentries = max;
7364
7365 log2qty = ilog2(numentries);
7366
7367
7368
7369
7370
7371 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
7372 do {
7373 size = bucketsize << log2qty;
7374 if (flags & HASH_EARLY)
7375 table = memblock_virt_alloc_nopanic(size, 0);
7376 else if (hashdist)
7377 table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
7378 else {
7379
7380
7381
7382
7383
7384 if (get_order(size) < MAX_ORDER) {
7385 table = alloc_pages_exact(size, gfp_flags);
7386 kmemleak_alloc(table, size, 1, gfp_flags);
7387 }
7388 }
7389 } while (!table && size > PAGE_SIZE && --log2qty);
7390
7391 if (!table)
7392 panic("Failed to allocate %s hash table\n", tablename);
7393
7394 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7395 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
7396
7397 if (_hash_shift)
7398 *_hash_shift = log2qty;
7399 if (_hash_mask)
7400 *_hash_mask = (1 << log2qty) - 1;
7401
7402 return table;
7403}
7404
7405
7406
7407
7408
7409
7410
7411
7412
7413
7414bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7415 bool skip_hwpoisoned_pages)
7416{
7417 unsigned long pfn, iter, found;
7418 int mt;
7419
7420
7421
7422
7423
7424 if (zone_idx(zone) == ZONE_MOVABLE)
7425 return false;
7426 mt = get_pageblock_migratetype(page);
7427 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
7428 return false;
7429
7430 pfn = page_to_pfn(page);
7431 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7432 unsigned long check = pfn + iter;
7433
7434 if (!pfn_valid_within(check))
7435 continue;
7436
7437 page = pfn_to_page(check);
7438
7439
7440
7441
7442
7443
7444 if (PageHuge(page)) {
7445 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7446 continue;
7447 }
7448
7449
7450
7451
7452
7453
7454
7455 if (!page_ref_count(page)) {
7456 if (PageBuddy(page))
7457 iter += (1 << page_order(page)) - 1;
7458 continue;
7459 }
7460
7461
7462
7463
7464
7465 if (skip_hwpoisoned_pages && PageHWPoison(page))
7466 continue;
7467
7468 if (__PageMovable(page))
7469 continue;
7470
7471 if (!PageLRU(page))
7472 found++;
7473
7474
7475
7476
7477
7478
7479
7480
7481
7482
7483
7484
7485
7486 if (found > count)
7487 return true;
7488 }
7489 return false;
7490}
7491
7492bool is_pageblock_removable_nolock(struct page *page)
7493{
7494 struct zone *zone;
7495 unsigned long pfn;
7496
7497
7498
7499
7500
7501
7502
7503
7504 if (!node_online(page_to_nid(page)))
7505 return false;
7506
7507 zone = page_zone(page);
7508 pfn = page_to_pfn(page);
7509 if (!zone_spans_pfn(zone, pfn))
7510 return false;
7511
7512 return !has_unmovable_pages(zone, page, 0, true);
7513}
7514
7515#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
7516
7517static unsigned long pfn_max_align_down(unsigned long pfn)
7518{
7519 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7520 pageblock_nr_pages) - 1);
7521}
7522
7523static unsigned long pfn_max_align_up(unsigned long pfn)
7524{
7525 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7526 pageblock_nr_pages));
7527}
7528
7529
7530static int __alloc_contig_migrate_range(struct compact_control *cc,
7531 unsigned long start, unsigned long end)
7532{
7533
7534 unsigned long nr_reclaimed;
7535 unsigned long pfn = start;
7536 unsigned int tries = 0;
7537 int ret = 0;
7538
7539 migrate_prep();
7540
7541 while (pfn < end || !list_empty(&cc->migratepages)) {
7542 if (fatal_signal_pending(current)) {
7543 ret = -EINTR;
7544 break;
7545 }
7546
7547 if (list_empty(&cc->migratepages)) {
7548 cc->nr_migratepages = 0;
7549 pfn = isolate_migratepages_range(cc, pfn, end);
7550 if (!pfn) {
7551 ret = -EINTR;
7552 break;
7553 }
7554 tries = 0;
7555 } else if (++tries == 5) {
7556 ret = ret < 0 ? ret : -EBUSY;
7557 break;
7558 }
7559
7560 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7561 &cc->migratepages);
7562 cc->nr_migratepages -= nr_reclaimed;
7563
7564 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
7565 NULL, 0, cc->mode, MR_CMA);
7566 }
7567 if (ret < 0) {
7568 putback_movable_pages(&cc->migratepages);
7569 return ret;
7570 }
7571 return 0;
7572}
7573
7574
7575
7576
7577
7578
7579
7580
7581
7582
7583
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595int alloc_contig_range(unsigned long start, unsigned long end,
7596 unsigned migratetype, gfp_t gfp_mask)
7597{
7598 unsigned long outer_start, outer_end;
7599 unsigned int order;
7600 int ret = 0;
7601
7602 struct compact_control cc = {
7603 .nr_migratepages = 0,
7604 .order = -1,
7605 .zone = page_zone(pfn_to_page(start)),
7606 .mode = MIGRATE_SYNC,
7607 .ignore_skip_hint = true,
7608 .gfp_mask = current_gfp_context(gfp_mask),
7609 };
7610 INIT_LIST_HEAD(&cc.migratepages);
7611
7612
7613
7614
7615
7616
7617
7618
7619
7620
7621
7622
7623
7624
7625
7626
7627
7628
7629
7630
7631
7632
7633
7634
7635
7636 ret = start_isolate_page_range(pfn_max_align_down(start),
7637 pfn_max_align_up(end), migratetype,
7638 false);
7639 if (ret)
7640 return ret;
7641
7642
7643
7644
7645
7646 ret = __alloc_contig_migrate_range(&cc, start, end);
7647 if (ret && ret != -EBUSY)
7648 goto done;
7649
7650
7651
7652
7653
7654
7655
7656
7657
7658
7659
7660
7661
7662
7663
7664
7665
7666
7667 lru_add_drain_all();
7668 drain_all_pages(cc.zone);
7669
7670 order = 0;
7671 outer_start = start;
7672 while (!PageBuddy(pfn_to_page(outer_start))) {
7673 if (++order >= MAX_ORDER) {
7674 outer_start = start;
7675 break;
7676 }
7677 outer_start &= ~0UL << order;
7678 }
7679
7680 if (outer_start != start) {
7681 order = page_order(pfn_to_page(outer_start));
7682
7683
7684
7685
7686
7687
7688
7689 if (outer_start + (1UL << order) <= start)
7690 outer_start = start;
7691 }
7692
7693
7694 if (test_pages_isolated(outer_start, end, false)) {
7695 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
7696 __func__, outer_start, end);
7697 ret = -EBUSY;
7698 goto done;
7699 }
7700
7701
7702 outer_end = isolate_freepages_range(&cc, outer_start, end);
7703 if (!outer_end) {
7704 ret = -EBUSY;
7705 goto done;
7706 }
7707
7708
7709 if (start != outer_start)
7710 free_contig_range(outer_start, start - outer_start);
7711 if (end != outer_end)
7712 free_contig_range(end, outer_end - end);
7713
7714done:
7715 undo_isolate_page_range(pfn_max_align_down(start),
7716 pfn_max_align_up(end), migratetype);
7717 return ret;
7718}
7719
7720void free_contig_range(unsigned long pfn, unsigned nr_pages)
7721{
7722 unsigned int count = 0;
7723
7724 for (; nr_pages--; pfn++) {
7725 struct page *page = pfn_to_page(pfn);
7726
7727 count += page_count(page) != 1;
7728 __free_page(page);
7729 }
7730 WARN(count != 0, "%d pages are still in use!\n", count);
7731}
7732#endif
7733
7734#ifdef CONFIG_MEMORY_HOTPLUG
7735
7736
7737
7738
7739void __meminit zone_pcp_update(struct zone *zone)
7740{
7741 unsigned cpu;
7742 mutex_lock(&pcp_batch_high_lock);
7743 for_each_possible_cpu(cpu)
7744 pageset_set_high_and_batch(zone,
7745 per_cpu_ptr(zone->pageset, cpu));
7746 mutex_unlock(&pcp_batch_high_lock);
7747}
7748#endif
7749
7750void zone_pcp_reset(struct zone *zone)
7751{
7752 unsigned long flags;
7753 int cpu;
7754 struct per_cpu_pageset *pset;
7755
7756
7757 local_irq_save(flags);
7758 if (zone->pageset != &boot_pageset) {
7759 for_each_online_cpu(cpu) {
7760 pset = per_cpu_ptr(zone->pageset, cpu);
7761 drain_zonestat(zone, pset);
7762 }
7763 free_percpu(zone->pageset);
7764 zone->pageset = &boot_pageset;
7765 }
7766 local_irq_restore(flags);
7767}
7768
7769#ifdef CONFIG_MEMORY_HOTREMOVE
7770
7771
7772
7773
7774void
7775__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7776{
7777 struct page *page;
7778 struct zone *zone;
7779 unsigned int order, i;
7780 unsigned long pfn;
7781 unsigned long flags;
7782
7783 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7784 if (pfn_valid(pfn))
7785 break;
7786 if (pfn == end_pfn)
7787 return;
7788 offline_mem_sections(pfn, end_pfn);
7789 zone = page_zone(pfn_to_page(pfn));
7790 spin_lock_irqsave(&zone->lock, flags);
7791 pfn = start_pfn;
7792 while (pfn < end_pfn) {
7793 if (!pfn_valid(pfn)) {
7794 pfn++;
7795 continue;
7796 }
7797 page = pfn_to_page(pfn);
7798
7799
7800
7801
7802 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7803 pfn++;
7804 SetPageReserved(page);
7805 continue;
7806 }
7807
7808 BUG_ON(page_count(page));
7809 BUG_ON(!PageBuddy(page));
7810 order = page_order(page);
7811#ifdef CONFIG_DEBUG_VM
7812 pr_info("remove from free list %lx %d %lx\n",
7813 pfn, 1 << order, end_pfn);
7814#endif
7815 list_del(&page->lru);
7816 rmv_page_order(page);
7817 zone->free_area[order].nr_free--;
7818 for (i = 0; i < (1 << order); i++)
7819 SetPageReserved((page+i));
7820 pfn += (1 << order);
7821 }
7822 spin_unlock_irqrestore(&zone->lock, flags);
7823}
7824#endif
7825
7826bool is_free_buddy_page(struct page *page)
7827{
7828 struct zone *zone = page_zone(page);
7829 unsigned long pfn = page_to_pfn(page);
7830 unsigned long flags;
7831 unsigned int order;
7832
7833 spin_lock_irqsave(&zone->lock, flags);
7834 for (order = 0; order < MAX_ORDER; order++) {
7835 struct page *page_head = page - (pfn & ((1 << order) - 1));
7836
7837 if (PageBuddy(page_head) && page_order(page_head) >= order)
7838 break;
7839 }
7840 spin_unlock_irqrestore(&zone->lock, flags);
7841
7842 return order < MAX_ORDER;
7843}
7844