1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h>
25#include <linux/compiler.h>
26#include <linux/kernel.h>
27#include <linux/kasan.h>
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
33#include <linux/ratelimit.h>
34#include <linux/oom.h>
35#include <linux/topology.h>
36#include <linux/sysctl.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
39#include <linux/memory_hotplug.h>
40#include <linux/nodemask.h>
41#include <linux/vmalloc.h>
42#include <linux/vmstat.h>
43#include <linux/mempolicy.h>
44#include <linux/memremap.h>
45#include <linux/stop_machine.h>
46#include <linux/sort.h>
47#include <linux/pfn.h>
48#include <linux/backing-dev.h>
49#include <linux/fault-inject.h>
50#include <linux/page-isolation.h>
51#include <linux/page_ext.h>
52#include <linux/debugobjects.h>
53#include <linux/kmemleak.h>
54#include <linux/compaction.h>
55#include <trace/events/kmem.h>
56#include <trace/events/oom.h>
57#include <linux/prefetch.h>
58#include <linux/mm_inline.h>
59#include <linux/migrate.h>
60#include <linux/hugetlb.h>
61#include <linux/sched/rt.h>
62#include <linux/sched/mm.h>
63#include <linux/page_owner.h>
64#include <linux/kthread.h>
65#include <linux/memcontrol.h>
66#include <linux/ftrace.h>
67#include <linux/lockdep.h>
68#include <linux/nmi.h>
69
70#include <asm/sections.h>
71#include <asm/tlbflush.h>
72#include <asm/div64.h>
73#include "internal.h"
74
75
76static DEFINE_MUTEX(pcp_batch_high_lock);
77#define MIN_PERCPU_PAGELIST_FRACTION (8)
78
79#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
80DEFINE_PER_CPU(int, numa_node);
81EXPORT_PER_CPU_SYMBOL(numa_node);
82#endif
83
84DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
85
86#ifdef CONFIG_HAVE_MEMORYLESS_NODES
87
88
89
90
91
92
93DEFINE_PER_CPU(int, _numa_mem_);
94EXPORT_PER_CPU_SYMBOL(_numa_mem_);
95int _node_numa_mem_[MAX_NUMNODES];
96#endif
97
98
99DEFINE_MUTEX(pcpu_drain_mutex);
100DEFINE_PER_CPU(struct work_struct, pcpu_drain);
101
102#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
103volatile unsigned long latent_entropy __latent_entropy;
104EXPORT_SYMBOL(latent_entropy);
105#endif
106
107
108
109
110nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
111 [N_POSSIBLE] = NODE_MASK_ALL,
112 [N_ONLINE] = { { [0] = 1UL } },
113#ifndef CONFIG_NUMA
114 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
115#ifdef CONFIG_HIGHMEM
116 [N_HIGH_MEMORY] = { { [0] = 1UL } },
117#endif
118 [N_MEMORY] = { { [0] = 1UL } },
119 [N_CPU] = { { [0] = 1UL } },
120#endif
121};
122EXPORT_SYMBOL(node_states);
123
124
125static DEFINE_SPINLOCK(managed_page_count_lock);
126
127unsigned long totalram_pages __read_mostly;
128unsigned long totalreserve_pages __read_mostly;
129unsigned long totalcma_pages __read_mostly;
130
131int percpu_pagelist_fraction;
132gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
133
134
135
136
137
138
139
140
141
142static inline int get_pcppage_migratetype(struct page *page)
143{
144 return page->index;
145}
146
147static inline void set_pcppage_migratetype(struct page *page, int migratetype)
148{
149 page->index = migratetype;
150}
151
152#ifdef CONFIG_PM_SLEEP
153
154
155
156
157
158
159
160
161
162
163static gfp_t saved_gfp_mask;
164
165void pm_restore_gfp_mask(void)
166{
167 WARN_ON(!mutex_is_locked(&system_transition_mutex));
168 if (saved_gfp_mask) {
169 gfp_allowed_mask = saved_gfp_mask;
170 saved_gfp_mask = 0;
171 }
172}
173
174void pm_restrict_gfp_mask(void)
175{
176 WARN_ON(!mutex_is_locked(&system_transition_mutex));
177 WARN_ON(saved_gfp_mask);
178 saved_gfp_mask = gfp_allowed_mask;
179 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
180}
181
182bool pm_suspended_storage(void)
183{
184 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
185 return false;
186 return true;
187}
188#endif
189
190#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
191unsigned int pageblock_order __read_mostly;
192#endif
193
194static void __free_pages_ok(struct page *page, unsigned int order);
195
196
197
198
199
200
201
202
203
204
205
206
207int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
208#ifdef CONFIG_ZONE_DMA
209 [ZONE_DMA] = 256,
210#endif
211#ifdef CONFIG_ZONE_DMA32
212 [ZONE_DMA32] = 256,
213#endif
214 [ZONE_NORMAL] = 32,
215#ifdef CONFIG_HIGHMEM
216 [ZONE_HIGHMEM] = 0,
217#endif
218 [ZONE_MOVABLE] = 0,
219};
220
221EXPORT_SYMBOL(totalram_pages);
222
223static char * const zone_names[MAX_NR_ZONES] = {
224#ifdef CONFIG_ZONE_DMA
225 "DMA",
226#endif
227#ifdef CONFIG_ZONE_DMA32
228 "DMA32",
229#endif
230 "Normal",
231#ifdef CONFIG_HIGHMEM
232 "HighMem",
233#endif
234 "Movable",
235#ifdef CONFIG_ZONE_DEVICE
236 "Device",
237#endif
238};
239
240char * const migratetype_names[MIGRATE_TYPES] = {
241 "Unmovable",
242 "Movable",
243 "Reclaimable",
244 "HighAtomic",
245#ifdef CONFIG_CMA
246 "CMA",
247#endif
248#ifdef CONFIG_MEMORY_ISOLATION
249 "Isolate",
250#endif
251};
252
253compound_page_dtor * const compound_page_dtors[] = {
254 NULL,
255 free_compound_page,
256#ifdef CONFIG_HUGETLB_PAGE
257 free_huge_page,
258#endif
259#ifdef CONFIG_TRANSPARENT_HUGEPAGE
260 free_transhuge_page,
261#endif
262};
263
264int min_free_kbytes = 1024;
265int user_min_free_kbytes = -1;
266int watermark_scale_factor = 10;
267
268static unsigned long nr_kernel_pages __meminitdata;
269static unsigned long nr_all_pages __meminitdata;
270static unsigned long dma_reserve __meminitdata;
271
272#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
273static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __meminitdata;
274static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __meminitdata;
275static unsigned long required_kernelcore __initdata;
276static unsigned long required_kernelcore_percent __initdata;
277static unsigned long required_movablecore __initdata;
278static unsigned long required_movablecore_percent __initdata;
279static unsigned long zone_movable_pfn[MAX_NUMNODES] __meminitdata;
280static bool mirrored_kernelcore __meminitdata;
281
282
283int movable_zone;
284EXPORT_SYMBOL(movable_zone);
285#endif
286
287#if MAX_NUMNODES > 1
288int nr_node_ids __read_mostly = MAX_NUMNODES;
289int nr_online_nodes __read_mostly = 1;
290EXPORT_SYMBOL(nr_node_ids);
291EXPORT_SYMBOL(nr_online_nodes);
292#endif
293
294int page_group_by_mobility_disabled __read_mostly;
295
296#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
297
298static inline bool __meminit early_page_uninitialised(unsigned long pfn)
299{
300 int nid = early_pfn_to_nid(pfn);
301
302 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
303 return true;
304
305 return false;
306}
307
308
309
310
311
312static inline bool update_defer_init(pg_data_t *pgdat,
313 unsigned long pfn, unsigned long zone_end,
314 unsigned long *nr_initialised)
315{
316
317 if (zone_end < pgdat_end_pfn(pgdat))
318 return true;
319 (*nr_initialised)++;
320 if ((*nr_initialised > pgdat->static_init_pgcnt) &&
321 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
322 pgdat->first_deferred_pfn = pfn;
323 return false;
324 }
325
326 return true;
327}
328#else
329static inline bool early_page_uninitialised(unsigned long pfn)
330{
331 return false;
332}
333
334static inline bool update_defer_init(pg_data_t *pgdat,
335 unsigned long pfn, unsigned long zone_end,
336 unsigned long *nr_initialised)
337{
338 return true;
339}
340#endif
341
342
343static inline unsigned long *get_pageblock_bitmap(struct page *page,
344 unsigned long pfn)
345{
346#ifdef CONFIG_SPARSEMEM
347 return __pfn_to_section(pfn)->pageblock_flags;
348#else
349 return page_zone(page)->pageblock_flags;
350#endif
351}
352
353static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
354{
355#ifdef CONFIG_SPARSEMEM
356 pfn &= (PAGES_PER_SECTION-1);
357 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
358#else
359 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
360 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
361#endif
362}
363
364
365
366
367
368
369
370
371
372
373static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
374 unsigned long pfn,
375 unsigned long end_bitidx,
376 unsigned long mask)
377{
378 unsigned long *bitmap;
379 unsigned long bitidx, word_bitidx;
380 unsigned long word;
381
382 bitmap = get_pageblock_bitmap(page, pfn);
383 bitidx = pfn_to_bitidx(page, pfn);
384 word_bitidx = bitidx / BITS_PER_LONG;
385 bitidx &= (BITS_PER_LONG-1);
386
387 word = bitmap[word_bitidx];
388 bitidx += end_bitidx;
389 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
390}
391
392unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
393 unsigned long end_bitidx,
394 unsigned long mask)
395{
396 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
397}
398
399static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
400{
401 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
402}
403
404
405
406
407
408
409
410
411
412void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
413 unsigned long pfn,
414 unsigned long end_bitidx,
415 unsigned long mask)
416{
417 unsigned long *bitmap;
418 unsigned long bitidx, word_bitidx;
419 unsigned long old_word, word;
420
421 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
422
423 bitmap = get_pageblock_bitmap(page, pfn);
424 bitidx = pfn_to_bitidx(page, pfn);
425 word_bitidx = bitidx / BITS_PER_LONG;
426 bitidx &= (BITS_PER_LONG-1);
427
428 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
429
430 bitidx += end_bitidx;
431 mask <<= (BITS_PER_LONG - bitidx - 1);
432 flags <<= (BITS_PER_LONG - bitidx - 1);
433
434 word = READ_ONCE(bitmap[word_bitidx]);
435 for (;;) {
436 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
437 if (word == old_word)
438 break;
439 word = old_word;
440 }
441}
442
443void set_pageblock_migratetype(struct page *page, int migratetype)
444{
445 if (unlikely(page_group_by_mobility_disabled &&
446 migratetype < MIGRATE_PCPTYPES))
447 migratetype = MIGRATE_UNMOVABLE;
448
449 set_pageblock_flags_group(page, (unsigned long)migratetype,
450 PB_migrate, PB_migrate_end);
451}
452
453#ifdef CONFIG_DEBUG_VM
454static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
455{
456 int ret = 0;
457 unsigned seq;
458 unsigned long pfn = page_to_pfn(page);
459 unsigned long sp, start_pfn;
460
461 do {
462 seq = zone_span_seqbegin(zone);
463 start_pfn = zone->zone_start_pfn;
464 sp = zone->spanned_pages;
465 if (!zone_spans_pfn(zone, pfn))
466 ret = 1;
467 } while (zone_span_seqretry(zone, seq));
468
469 if (ret)
470 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
471 pfn, zone_to_nid(zone), zone->name,
472 start_pfn, start_pfn + sp);
473
474 return ret;
475}
476
477static int page_is_consistent(struct zone *zone, struct page *page)
478{
479 if (!pfn_valid_within(page_to_pfn(page)))
480 return 0;
481 if (zone != page_zone(page))
482 return 0;
483
484 return 1;
485}
486
487
488
489static int __maybe_unused bad_range(struct zone *zone, struct page *page)
490{
491 if (page_outside_zone_boundaries(zone, page))
492 return 1;
493 if (!page_is_consistent(zone, page))
494 return 1;
495
496 return 0;
497}
498#else
499static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
500{
501 return 0;
502}
503#endif
504
505static void bad_page(struct page *page, const char *reason,
506 unsigned long bad_flags)
507{
508 static unsigned long resume;
509 static unsigned long nr_shown;
510 static unsigned long nr_unshown;
511
512
513
514
515
516 if (nr_shown == 60) {
517 if (time_before(jiffies, resume)) {
518 nr_unshown++;
519 goto out;
520 }
521 if (nr_unshown) {
522 pr_alert(
523 "BUG: Bad page state: %lu messages suppressed\n",
524 nr_unshown);
525 nr_unshown = 0;
526 }
527 nr_shown = 0;
528 }
529 if (nr_shown++ == 0)
530 resume = jiffies + 60 * HZ;
531
532 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
533 current->comm, page_to_pfn(page));
534 __dump_page(page, reason);
535 bad_flags &= page->flags;
536 if (bad_flags)
537 pr_alert("bad because of flags: %#lx(%pGp)\n",
538 bad_flags, &bad_flags);
539 dump_page_owner(page);
540
541 print_modules();
542 dump_stack();
543out:
544
545 page_mapcount_reset(page);
546 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
547}
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564void free_compound_page(struct page *page)
565{
566 __free_pages_ok(page, compound_order(page));
567}
568
569void prep_compound_page(struct page *page, unsigned int order)
570{
571 int i;
572 int nr_pages = 1 << order;
573
574 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
575 set_compound_order(page, order);
576 __SetPageHead(page);
577 for (i = 1; i < nr_pages; i++) {
578 struct page *p = page + i;
579 set_page_count(p, 0);
580 p->mapping = TAIL_MAPPING;
581 set_compound_head(p, page);
582 }
583 atomic_set(compound_mapcount_ptr(page), -1);
584}
585
586#ifdef CONFIG_DEBUG_PAGEALLOC
587unsigned int _debug_guardpage_minorder;
588bool _debug_pagealloc_enabled __read_mostly
589 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
590EXPORT_SYMBOL(_debug_pagealloc_enabled);
591bool _debug_guardpage_enabled __read_mostly;
592
593static int __init early_debug_pagealloc(char *buf)
594{
595 if (!buf)
596 return -EINVAL;
597 return kstrtobool(buf, &_debug_pagealloc_enabled);
598}
599early_param("debug_pagealloc", early_debug_pagealloc);
600
601static bool need_debug_guardpage(void)
602{
603
604 if (!debug_pagealloc_enabled())
605 return false;
606
607 if (!debug_guardpage_minorder())
608 return false;
609
610 return true;
611}
612
613static void init_debug_guardpage(void)
614{
615 if (!debug_pagealloc_enabled())
616 return;
617
618 if (!debug_guardpage_minorder())
619 return;
620
621 _debug_guardpage_enabled = true;
622}
623
624struct page_ext_operations debug_guardpage_ops = {
625 .need = need_debug_guardpage,
626 .init = init_debug_guardpage,
627};
628
629static int __init debug_guardpage_minorder_setup(char *buf)
630{
631 unsigned long res;
632
633 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
634 pr_err("Bad debug_guardpage_minorder value\n");
635 return 0;
636 }
637 _debug_guardpage_minorder = res;
638 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
639 return 0;
640}
641early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
642
643static inline bool set_page_guard(struct zone *zone, struct page *page,
644 unsigned int order, int migratetype)
645{
646 struct page_ext *page_ext;
647
648 if (!debug_guardpage_enabled())
649 return false;
650
651 if (order >= debug_guardpage_minorder())
652 return false;
653
654 page_ext = lookup_page_ext(page);
655 if (unlikely(!page_ext))
656 return false;
657
658 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
659
660 INIT_LIST_HEAD(&page->lru);
661 set_page_private(page, order);
662
663 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
664
665 return true;
666}
667
668static inline void clear_page_guard(struct zone *zone, struct page *page,
669 unsigned int order, int migratetype)
670{
671 struct page_ext *page_ext;
672
673 if (!debug_guardpage_enabled())
674 return;
675
676 page_ext = lookup_page_ext(page);
677 if (unlikely(!page_ext))
678 return;
679
680 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
681
682 set_page_private(page, 0);
683 if (!is_migrate_isolate(migratetype))
684 __mod_zone_freepage_state(zone, (1 << order), migratetype);
685}
686#else
687struct page_ext_operations debug_guardpage_ops;
688static inline bool set_page_guard(struct zone *zone, struct page *page,
689 unsigned int order, int migratetype) { return false; }
690static inline void clear_page_guard(struct zone *zone, struct page *page,
691 unsigned int order, int migratetype) {}
692#endif
693
694static inline void set_page_order(struct page *page, unsigned int order)
695{
696 set_page_private(page, order);
697 __SetPageBuddy(page);
698}
699
700static inline void rmv_page_order(struct page *page)
701{
702 __ClearPageBuddy(page);
703 set_page_private(page, 0);
704}
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719static inline int page_is_buddy(struct page *page, struct page *buddy,
720 unsigned int order)
721{
722 if (page_is_guard(buddy) && page_order(buddy) == order) {
723 if (page_zone_id(page) != page_zone_id(buddy))
724 return 0;
725
726 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
727
728 return 1;
729 }
730
731 if (PageBuddy(buddy) && page_order(buddy) == order) {
732
733
734
735
736
737 if (page_zone_id(page) != page_zone_id(buddy))
738 return 0;
739
740 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
741
742 return 1;
743 }
744 return 0;
745}
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771static inline void __free_one_page(struct page *page,
772 unsigned long pfn,
773 struct zone *zone, unsigned int order,
774 int migratetype)
775{
776 unsigned long combined_pfn;
777 unsigned long uninitialized_var(buddy_pfn);
778 struct page *buddy;
779 unsigned int max_order;
780
781 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
782
783 VM_BUG_ON(!zone_is_initialized(zone));
784 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
785
786 VM_BUG_ON(migratetype == -1);
787 if (likely(!is_migrate_isolate(migratetype)))
788 __mod_zone_freepage_state(zone, 1 << order, migratetype);
789
790 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
791 VM_BUG_ON_PAGE(bad_range(zone, page), page);
792
793continue_merging:
794 while (order < max_order - 1) {
795 buddy_pfn = __find_buddy_pfn(pfn, order);
796 buddy = page + (buddy_pfn - pfn);
797
798 if (!pfn_valid_within(buddy_pfn))
799 goto done_merging;
800 if (!page_is_buddy(page, buddy, order))
801 goto done_merging;
802
803
804
805
806 if (page_is_guard(buddy)) {
807 clear_page_guard(zone, buddy, order, migratetype);
808 } else {
809 list_del(&buddy->lru);
810 zone->free_area[order].nr_free--;
811 rmv_page_order(buddy);
812 }
813 combined_pfn = buddy_pfn & pfn;
814 page = page + (combined_pfn - pfn);
815 pfn = combined_pfn;
816 order++;
817 }
818 if (max_order < MAX_ORDER) {
819
820
821
822
823
824
825
826
827 if (unlikely(has_isolate_pageblock(zone))) {
828 int buddy_mt;
829
830 buddy_pfn = __find_buddy_pfn(pfn, order);
831 buddy = page + (buddy_pfn - pfn);
832 buddy_mt = get_pageblock_migratetype(buddy);
833
834 if (migratetype != buddy_mt
835 && (is_migrate_isolate(migratetype) ||
836 is_migrate_isolate(buddy_mt)))
837 goto done_merging;
838 }
839 max_order++;
840 goto continue_merging;
841 }
842
843done_merging:
844 set_page_order(page, order);
845
846
847
848
849
850
851
852
853
854 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
855 struct page *higher_page, *higher_buddy;
856 combined_pfn = buddy_pfn & pfn;
857 higher_page = page + (combined_pfn - pfn);
858 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
859 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
860 if (pfn_valid_within(buddy_pfn) &&
861 page_is_buddy(higher_page, higher_buddy, order + 1)) {
862 list_add_tail(&page->lru,
863 &zone->free_area[order].free_list[migratetype]);
864 goto out;
865 }
866 }
867
868 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
869out:
870 zone->free_area[order].nr_free++;
871}
872
873
874
875
876
877
878static inline bool page_expected_state(struct page *page,
879 unsigned long check_flags)
880{
881 if (unlikely(atomic_read(&page->_mapcount) != -1))
882 return false;
883
884 if (unlikely((unsigned long)page->mapping |
885 page_ref_count(page) |
886#ifdef CONFIG_MEMCG
887 (unsigned long)page->mem_cgroup |
888#endif
889 (page->flags & check_flags)))
890 return false;
891
892 return true;
893}
894
895static void free_pages_check_bad(struct page *page)
896{
897 const char *bad_reason;
898 unsigned long bad_flags;
899
900 bad_reason = NULL;
901 bad_flags = 0;
902
903 if (unlikely(atomic_read(&page->_mapcount) != -1))
904 bad_reason = "nonzero mapcount";
905 if (unlikely(page->mapping != NULL))
906 bad_reason = "non-NULL mapping";
907 if (unlikely(page_ref_count(page) != 0))
908 bad_reason = "nonzero _refcount";
909 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
910 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
911 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
912 }
913#ifdef CONFIG_MEMCG
914 if (unlikely(page->mem_cgroup))
915 bad_reason = "page still charged to cgroup";
916#endif
917 bad_page(page, bad_reason, bad_flags);
918}
919
920static inline int free_pages_check(struct page *page)
921{
922 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
923 return 0;
924
925
926 free_pages_check_bad(page);
927 return 1;
928}
929
930static int free_tail_pages_check(struct page *head_page, struct page *page)
931{
932 int ret = 1;
933
934
935
936
937
938 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
939
940 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
941 ret = 0;
942 goto out;
943 }
944 switch (page - head_page) {
945 case 1:
946
947 if (unlikely(compound_mapcount(page))) {
948 bad_page(page, "nonzero compound_mapcount", 0);
949 goto out;
950 }
951 break;
952 case 2:
953
954
955
956
957 break;
958 default:
959 if (page->mapping != TAIL_MAPPING) {
960 bad_page(page, "corrupted mapping in tail page", 0);
961 goto out;
962 }
963 break;
964 }
965 if (unlikely(!PageTail(page))) {
966 bad_page(page, "PageTail not set", 0);
967 goto out;
968 }
969 if (unlikely(compound_head(page) != head_page)) {
970 bad_page(page, "compound_head not consistent", 0);
971 goto out;
972 }
973 ret = 0;
974out:
975 page->mapping = NULL;
976 clear_compound_head(page);
977 return ret;
978}
979
980static __always_inline bool free_pages_prepare(struct page *page,
981 unsigned int order, bool check_free)
982{
983 int bad = 0;
984
985 VM_BUG_ON_PAGE(PageTail(page), page);
986
987 trace_mm_page_free(page, order);
988
989
990
991
992
993 if (unlikely(order)) {
994 bool compound = PageCompound(page);
995 int i;
996
997 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
998
999 if (compound)
1000 ClearPageDoubleMap(page);
1001 for (i = 1; i < (1 << order); i++) {
1002 if (compound)
1003 bad += free_tail_pages_check(page, page + i);
1004 if (unlikely(free_pages_check(page + i))) {
1005 bad++;
1006 continue;
1007 }
1008 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1009 }
1010 }
1011 if (PageMappingFlags(page))
1012 page->mapping = NULL;
1013 if (memcg_kmem_enabled() && PageKmemcg(page))
1014 memcg_kmem_uncharge(page, order);
1015 if (check_free)
1016 bad += free_pages_check(page);
1017 if (bad)
1018 return false;
1019
1020 page_cpupid_reset_last(page);
1021 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1022 reset_page_owner(page, order);
1023
1024 if (!PageHighMem(page)) {
1025 debug_check_no_locks_freed(page_address(page),
1026 PAGE_SIZE << order);
1027 debug_check_no_obj_freed(page_address(page),
1028 PAGE_SIZE << order);
1029 }
1030 arch_free_page(page, order);
1031 kernel_poison_pages(page, 1 << order, 0);
1032 kernel_map_pages(page, 1 << order, 0);
1033 kasan_free_pages(page, order);
1034
1035 return true;
1036}
1037
1038#ifdef CONFIG_DEBUG_VM
1039static inline bool free_pcp_prepare(struct page *page)
1040{
1041 return free_pages_prepare(page, 0, true);
1042}
1043
1044static inline bool bulkfree_pcp_prepare(struct page *page)
1045{
1046 return false;
1047}
1048#else
1049static bool free_pcp_prepare(struct page *page)
1050{
1051 return free_pages_prepare(page, 0, false);
1052}
1053
1054static bool bulkfree_pcp_prepare(struct page *page)
1055{
1056 return free_pages_check(page);
1057}
1058#endif
1059
1060static inline void prefetch_buddy(struct page *page)
1061{
1062 unsigned long pfn = page_to_pfn(page);
1063 unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1064 struct page *buddy = page + (buddy_pfn - pfn);
1065
1066 prefetch(buddy);
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static void free_pcppages_bulk(struct zone *zone, int count,
1081 struct per_cpu_pages *pcp)
1082{
1083 int migratetype = 0;
1084 int batch_free = 0;
1085 int prefetch_nr = 0;
1086 bool isolated_pageblocks;
1087 struct page *page, *tmp;
1088 LIST_HEAD(head);
1089
1090 while (count) {
1091 struct list_head *list;
1092
1093
1094
1095
1096
1097
1098
1099
1100 do {
1101 batch_free++;
1102 if (++migratetype == MIGRATE_PCPTYPES)
1103 migratetype = 0;
1104 list = &pcp->lists[migratetype];
1105 } while (list_empty(list));
1106
1107
1108 if (batch_free == MIGRATE_PCPTYPES)
1109 batch_free = count;
1110
1111 do {
1112 page = list_last_entry(list, struct page, lru);
1113
1114 list_del(&page->lru);
1115 pcp->count--;
1116
1117 if (bulkfree_pcp_prepare(page))
1118 continue;
1119
1120 list_add_tail(&page->lru, &head);
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 if (prefetch_nr++ < pcp->batch)
1132 prefetch_buddy(page);
1133 } while (--count && --batch_free && !list_empty(list));
1134 }
1135
1136 spin_lock(&zone->lock);
1137 isolated_pageblocks = has_isolate_pageblock(zone);
1138
1139
1140
1141
1142
1143 list_for_each_entry_safe(page, tmp, &head, lru) {
1144 int mt = get_pcppage_migratetype(page);
1145
1146 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1147
1148 if (unlikely(isolated_pageblocks))
1149 mt = get_pageblock_migratetype(page);
1150
1151 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1152 trace_mm_page_pcpu_drain(page, 0, mt);
1153 }
1154 spin_unlock(&zone->lock);
1155}
1156
1157static void free_one_page(struct zone *zone,
1158 struct page *page, unsigned long pfn,
1159 unsigned int order,
1160 int migratetype)
1161{
1162 spin_lock(&zone->lock);
1163 if (unlikely(has_isolate_pageblock(zone) ||
1164 is_migrate_isolate(migratetype))) {
1165 migratetype = get_pfnblock_migratetype(page, pfn);
1166 }
1167 __free_one_page(page, pfn, zone, order, migratetype);
1168 spin_unlock(&zone->lock);
1169}
1170
1171static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1172 unsigned long zone, int nid)
1173{
1174 mm_zero_struct_page(page);
1175 set_page_links(page, zone, nid, pfn);
1176 init_page_count(page);
1177 page_mapcount_reset(page);
1178 page_cpupid_reset_last(page);
1179
1180 INIT_LIST_HEAD(&page->lru);
1181#ifdef WANT_PAGE_VIRTUAL
1182
1183 if (!is_highmem_idx(zone))
1184 set_page_address(page, __va(pfn << PAGE_SHIFT));
1185#endif
1186}
1187
1188#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1189static void __meminit init_reserved_page(unsigned long pfn)
1190{
1191 pg_data_t *pgdat;
1192 int nid, zid;
1193
1194 if (!early_page_uninitialised(pfn))
1195 return;
1196
1197 nid = early_pfn_to_nid(pfn);
1198 pgdat = NODE_DATA(nid);
1199
1200 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1201 struct zone *zone = &pgdat->node_zones[zid];
1202
1203 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1204 break;
1205 }
1206 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1207}
1208#else
1209static inline void init_reserved_page(unsigned long pfn)
1210{
1211}
1212#endif
1213
1214
1215
1216
1217
1218
1219
1220void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1221{
1222 unsigned long start_pfn = PFN_DOWN(start);
1223 unsigned long end_pfn = PFN_UP(end);
1224
1225 for (; start_pfn < end_pfn; start_pfn++) {
1226 if (pfn_valid(start_pfn)) {
1227 struct page *page = pfn_to_page(start_pfn);
1228
1229 init_reserved_page(start_pfn);
1230
1231
1232 INIT_LIST_HEAD(&page->lru);
1233
1234 SetPageReserved(page);
1235 }
1236 }
1237}
1238
1239static void __free_pages_ok(struct page *page, unsigned int order)
1240{
1241 unsigned long flags;
1242 int migratetype;
1243 unsigned long pfn = page_to_pfn(page);
1244
1245 if (!free_pages_prepare(page, order, true))
1246 return;
1247
1248 migratetype = get_pfnblock_migratetype(page, pfn);
1249 local_irq_save(flags);
1250 __count_vm_events(PGFREE, 1 << order);
1251 free_one_page(page_zone(page), page, pfn, order, migratetype);
1252 local_irq_restore(flags);
1253}
1254
1255static void __init __free_pages_boot_core(struct page *page, unsigned int order)
1256{
1257 unsigned int nr_pages = 1 << order;
1258 struct page *p = page;
1259 unsigned int loop;
1260
1261 prefetchw(p);
1262 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1263 prefetchw(p + 1);
1264 __ClearPageReserved(p);
1265 set_page_count(p, 0);
1266 }
1267 __ClearPageReserved(p);
1268 set_page_count(p, 0);
1269
1270 page_zone(page)->managed_pages += nr_pages;
1271 set_page_refcounted(page);
1272 __free_pages(page, order);
1273}
1274
1275#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1276 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1277
1278static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1279
1280int __meminit early_pfn_to_nid(unsigned long pfn)
1281{
1282 static DEFINE_SPINLOCK(early_pfn_lock);
1283 int nid;
1284
1285 spin_lock(&early_pfn_lock);
1286 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1287 if (nid < 0)
1288 nid = first_online_node;
1289 spin_unlock(&early_pfn_lock);
1290
1291 return nid;
1292}
1293#endif
1294
1295#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1296static inline bool __meminit __maybe_unused
1297meminit_pfn_in_nid(unsigned long pfn, int node,
1298 struct mminit_pfnnid_cache *state)
1299{
1300 int nid;
1301
1302 nid = __early_pfn_to_nid(pfn, state);
1303 if (nid >= 0 && nid != node)
1304 return false;
1305 return true;
1306}
1307
1308
1309static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1310{
1311 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1312}
1313
1314#else
1315
1316static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1317{
1318 return true;
1319}
1320static inline bool __meminit __maybe_unused
1321meminit_pfn_in_nid(unsigned long pfn, int node,
1322 struct mminit_pfnnid_cache *state)
1323{
1324 return true;
1325}
1326#endif
1327
1328
1329void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
1330 unsigned int order)
1331{
1332 if (early_page_uninitialised(pfn))
1333 return;
1334 return __free_pages_boot_core(page, order);
1335}
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1355 unsigned long end_pfn, struct zone *zone)
1356{
1357 struct page *start_page;
1358 struct page *end_page;
1359
1360
1361 end_pfn--;
1362
1363 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1364 return NULL;
1365
1366 start_page = pfn_to_online_page(start_pfn);
1367 if (!start_page)
1368 return NULL;
1369
1370 if (page_zone(start_page) != zone)
1371 return NULL;
1372
1373 end_page = pfn_to_page(end_pfn);
1374
1375
1376 if (page_zone_id(start_page) != page_zone_id(end_page))
1377 return NULL;
1378
1379 return start_page;
1380}
1381
1382void set_zone_contiguous(struct zone *zone)
1383{
1384 unsigned long block_start_pfn = zone->zone_start_pfn;
1385 unsigned long block_end_pfn;
1386
1387 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1388 for (; block_start_pfn < zone_end_pfn(zone);
1389 block_start_pfn = block_end_pfn,
1390 block_end_pfn += pageblock_nr_pages) {
1391
1392 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1393
1394 if (!__pageblock_pfn_to_page(block_start_pfn,
1395 block_end_pfn, zone))
1396 return;
1397 }
1398
1399
1400 zone->contiguous = true;
1401}
1402
1403void clear_zone_contiguous(struct zone *zone)
1404{
1405 zone->contiguous = false;
1406}
1407
1408#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1409static void __init deferred_free_range(unsigned long pfn,
1410 unsigned long nr_pages)
1411{
1412 struct page *page;
1413 unsigned long i;
1414
1415 if (!nr_pages)
1416 return;
1417
1418 page = pfn_to_page(pfn);
1419
1420
1421 if (nr_pages == pageblock_nr_pages &&
1422 (pfn & (pageblock_nr_pages - 1)) == 0) {
1423 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1424 __free_pages_boot_core(page, pageblock_order);
1425 return;
1426 }
1427
1428 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1429 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1430 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1431 __free_pages_boot_core(page, 0);
1432 }
1433}
1434
1435
1436static atomic_t pgdat_init_n_undone __initdata;
1437static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1438
1439static inline void __init pgdat_init_report_one_done(void)
1440{
1441 if (atomic_dec_and_test(&pgdat_init_n_undone))
1442 complete(&pgdat_init_all_done_comp);
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459static inline bool __init
1460deferred_pfn_valid(int nid, unsigned long pfn,
1461 struct mminit_pfnnid_cache *nid_init_state)
1462{
1463 if (!pfn_valid_within(pfn))
1464 return false;
1465 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1466 return false;
1467 if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
1468 return false;
1469 return true;
1470}
1471
1472
1473
1474
1475
1476static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
1477 unsigned long end_pfn)
1478{
1479 struct mminit_pfnnid_cache nid_init_state = { };
1480 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1481 unsigned long nr_free = 0;
1482
1483 for (; pfn < end_pfn; pfn++) {
1484 if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
1485 deferred_free_range(pfn - nr_free, nr_free);
1486 nr_free = 0;
1487 } else if (!(pfn & nr_pgmask)) {
1488 deferred_free_range(pfn - nr_free, nr_free);
1489 nr_free = 1;
1490 touch_nmi_watchdog();
1491 } else {
1492 nr_free++;
1493 }
1494 }
1495
1496 deferred_free_range(pfn - nr_free, nr_free);
1497}
1498
1499
1500
1501
1502
1503
1504static unsigned long __init deferred_init_pages(int nid, int zid,
1505 unsigned long pfn,
1506 unsigned long end_pfn)
1507{
1508 struct mminit_pfnnid_cache nid_init_state = { };
1509 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1510 unsigned long nr_pages = 0;
1511 struct page *page = NULL;
1512
1513 for (; pfn < end_pfn; pfn++) {
1514 if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
1515 page = NULL;
1516 continue;
1517 } else if (!page || !(pfn & nr_pgmask)) {
1518 page = pfn_to_page(pfn);
1519 touch_nmi_watchdog();
1520 } else {
1521 page++;
1522 }
1523 __init_single_page(page, pfn, zid, nid);
1524 nr_pages++;
1525 }
1526 return (nr_pages);
1527}
1528
1529
1530static int __init deferred_init_memmap(void *data)
1531{
1532 pg_data_t *pgdat = data;
1533 int nid = pgdat->node_id;
1534 unsigned long start = jiffies;
1535 unsigned long nr_pages = 0;
1536 unsigned long spfn, epfn, first_init_pfn, flags;
1537 phys_addr_t spa, epa;
1538 int zid;
1539 struct zone *zone;
1540 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1541 u64 i;
1542
1543
1544 if (!cpumask_empty(cpumask))
1545 set_cpus_allowed_ptr(current, cpumask);
1546
1547 pgdat_resize_lock(pgdat, &flags);
1548 first_init_pfn = pgdat->first_deferred_pfn;
1549 if (first_init_pfn == ULONG_MAX) {
1550 pgdat_resize_unlock(pgdat, &flags);
1551 pgdat_init_report_one_done();
1552 return 0;
1553 }
1554
1555
1556 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1557 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1558 pgdat->first_deferred_pfn = ULONG_MAX;
1559
1560
1561 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1562 zone = pgdat->node_zones + zid;
1563 if (first_init_pfn < zone_end_pfn(zone))
1564 break;
1565 }
1566 first_init_pfn = max(zone->zone_start_pfn, first_init_pfn);
1567
1568
1569
1570
1571
1572
1573
1574 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1575 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1576 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1577 nr_pages += deferred_init_pages(nid, zid, spfn, epfn);
1578 }
1579 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1580 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1581 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1582 deferred_free_pages(nid, zid, spfn, epfn);
1583 }
1584 pgdat_resize_unlock(pgdat, &flags);
1585
1586
1587 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1588
1589 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1590 jiffies_to_msecs(jiffies - start));
1591
1592 pgdat_init_report_one_done();
1593 return 0;
1594}
1595
1596
1597
1598
1599
1600
1601static DEFINE_STATIC_KEY_TRUE(deferred_pages);
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618static noinline bool __init
1619deferred_grow_zone(struct zone *zone, unsigned int order)
1620{
1621 int zid = zone_idx(zone);
1622 int nid = zone_to_nid(zone);
1623 pg_data_t *pgdat = NODE_DATA(nid);
1624 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
1625 unsigned long nr_pages = 0;
1626 unsigned long first_init_pfn, spfn, epfn, t, flags;
1627 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
1628 phys_addr_t spa, epa;
1629 u64 i;
1630
1631
1632 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
1633 return false;
1634
1635 pgdat_resize_lock(pgdat, &flags);
1636
1637
1638
1639
1640
1641
1642
1643 if (!static_branch_unlikely(&deferred_pages)) {
1644 pgdat_resize_unlock(pgdat, &flags);
1645 return true;
1646 }
1647
1648
1649
1650
1651
1652 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
1653 pgdat_resize_unlock(pgdat, &flags);
1654 return true;
1655 }
1656
1657 first_init_pfn = max(zone->zone_start_pfn, first_deferred_pfn);
1658
1659 if (first_init_pfn >= pgdat_end_pfn(pgdat)) {
1660 pgdat_resize_unlock(pgdat, &flags);
1661 return false;
1662 }
1663
1664 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1665 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1666 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1667
1668 while (spfn < epfn && nr_pages < nr_pages_needed) {
1669 t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
1670 first_deferred_pfn = min(t, epfn);
1671 nr_pages += deferred_init_pages(nid, zid, spfn,
1672 first_deferred_pfn);
1673 spfn = first_deferred_pfn;
1674 }
1675
1676 if (nr_pages >= nr_pages_needed)
1677 break;
1678 }
1679
1680 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1681 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1682 epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
1683 deferred_free_pages(nid, zid, spfn, epfn);
1684
1685 if (first_deferred_pfn == epfn)
1686 break;
1687 }
1688 pgdat->first_deferred_pfn = first_deferred_pfn;
1689 pgdat_resize_unlock(pgdat, &flags);
1690
1691 return nr_pages > 0;
1692}
1693
1694
1695
1696
1697
1698
1699
1700static bool __ref
1701_deferred_grow_zone(struct zone *zone, unsigned int order)
1702{
1703 return deferred_grow_zone(zone, order);
1704}
1705
1706#endif
1707
1708void __init page_alloc_init_late(void)
1709{
1710 struct zone *zone;
1711
1712#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1713 int nid;
1714
1715
1716 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1717 for_each_node_state(nid, N_MEMORY) {
1718 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1719 }
1720
1721
1722 wait_for_completion(&pgdat_init_all_done_comp);
1723
1724
1725
1726
1727
1728 static_branch_disable(&deferred_pages);
1729
1730
1731 files_maxfiles_init();
1732#endif
1733#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1734
1735 memblock_discard();
1736#endif
1737
1738 for_each_populated_zone(zone)
1739 set_zone_contiguous(zone);
1740}
1741
1742#ifdef CONFIG_CMA
1743
1744void __init init_cma_reserved_pageblock(struct page *page)
1745{
1746 unsigned i = pageblock_nr_pages;
1747 struct page *p = page;
1748
1749 do {
1750 __ClearPageReserved(p);
1751 set_page_count(p, 0);
1752 } while (++p, --i);
1753
1754 set_pageblock_migratetype(page, MIGRATE_CMA);
1755
1756 if (pageblock_order >= MAX_ORDER) {
1757 i = pageblock_nr_pages;
1758 p = page;
1759 do {
1760 set_page_refcounted(p);
1761 __free_pages(p, MAX_ORDER - 1);
1762 p += MAX_ORDER_NR_PAGES;
1763 } while (i -= MAX_ORDER_NR_PAGES);
1764 } else {
1765 set_page_refcounted(page);
1766 __free_pages(page, pageblock_order);
1767 }
1768
1769 adjust_managed_page_count(page, pageblock_nr_pages);
1770}
1771#endif
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787static inline void expand(struct zone *zone, struct page *page,
1788 int low, int high, struct free_area *area,
1789 int migratetype)
1790{
1791 unsigned long size = 1 << high;
1792
1793 while (high > low) {
1794 area--;
1795 high--;
1796 size >>= 1;
1797 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1798
1799
1800
1801
1802
1803
1804
1805 if (set_page_guard(zone, &page[size], high, migratetype))
1806 continue;
1807
1808 list_add(&page[size].lru, &area->free_list[migratetype]);
1809 area->nr_free++;
1810 set_page_order(&page[size], high);
1811 }
1812}
1813
1814static void check_new_page_bad(struct page *page)
1815{
1816 const char *bad_reason = NULL;
1817 unsigned long bad_flags = 0;
1818
1819 if (unlikely(atomic_read(&page->_mapcount) != -1))
1820 bad_reason = "nonzero mapcount";
1821 if (unlikely(page->mapping != NULL))
1822 bad_reason = "non-NULL mapping";
1823 if (unlikely(page_ref_count(page) != 0))
1824 bad_reason = "nonzero _count";
1825 if (unlikely(page->flags & __PG_HWPOISON)) {
1826 bad_reason = "HWPoisoned (hardware-corrupted)";
1827 bad_flags = __PG_HWPOISON;
1828
1829 page_mapcount_reset(page);
1830 return;
1831 }
1832 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1833 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1834 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1835 }
1836#ifdef CONFIG_MEMCG
1837 if (unlikely(page->mem_cgroup))
1838 bad_reason = "page still charged to cgroup";
1839#endif
1840 bad_page(page, bad_reason, bad_flags);
1841}
1842
1843
1844
1845
1846static inline int check_new_page(struct page *page)
1847{
1848 if (likely(page_expected_state(page,
1849 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1850 return 0;
1851
1852 check_new_page_bad(page);
1853 return 1;
1854}
1855
1856static inline bool free_pages_prezeroed(void)
1857{
1858 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1859 page_poisoning_enabled();
1860}
1861
1862#ifdef CONFIG_DEBUG_VM
1863static bool check_pcp_refill(struct page *page)
1864{
1865 return false;
1866}
1867
1868static bool check_new_pcp(struct page *page)
1869{
1870 return check_new_page(page);
1871}
1872#else
1873static bool check_pcp_refill(struct page *page)
1874{
1875 return check_new_page(page);
1876}
1877static bool check_new_pcp(struct page *page)
1878{
1879 return false;
1880}
1881#endif
1882
1883static bool check_new_pages(struct page *page, unsigned int order)
1884{
1885 int i;
1886 for (i = 0; i < (1 << order); i++) {
1887 struct page *p = page + i;
1888
1889 if (unlikely(check_new_page(p)))
1890 return true;
1891 }
1892
1893 return false;
1894}
1895
1896inline void post_alloc_hook(struct page *page, unsigned int order,
1897 gfp_t gfp_flags)
1898{
1899 set_page_private(page, 0);
1900 set_page_refcounted(page);
1901
1902 arch_alloc_page(page, order);
1903 kernel_map_pages(page, 1 << order, 1);
1904 kernel_poison_pages(page, 1 << order, 1);
1905 kasan_alloc_pages(page, order);
1906 set_page_owner(page, order, gfp_flags);
1907}
1908
1909static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1910 unsigned int alloc_flags)
1911{
1912 int i;
1913
1914 post_alloc_hook(page, order, gfp_flags);
1915
1916 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
1917 for (i = 0; i < (1 << order); i++)
1918 clear_highpage(page + i);
1919
1920 if (order && (gfp_flags & __GFP_COMP))
1921 prep_compound_page(page, order);
1922
1923
1924
1925
1926
1927
1928
1929 if (alloc_flags & ALLOC_NO_WATERMARKS)
1930 set_page_pfmemalloc(page);
1931 else
1932 clear_page_pfmemalloc(page);
1933}
1934
1935
1936
1937
1938
1939static __always_inline
1940struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1941 int migratetype)
1942{
1943 unsigned int current_order;
1944 struct free_area *area;
1945 struct page *page;
1946
1947
1948 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1949 area = &(zone->free_area[current_order]);
1950 page = list_first_entry_or_null(&area->free_list[migratetype],
1951 struct page, lru);
1952 if (!page)
1953 continue;
1954 list_del(&page->lru);
1955 rmv_page_order(page);
1956 area->nr_free--;
1957 expand(zone, page, order, current_order, area, migratetype);
1958 set_pcppage_migratetype(page, migratetype);
1959 return page;
1960 }
1961
1962 return NULL;
1963}
1964
1965
1966
1967
1968
1969
1970static int fallbacks[MIGRATE_TYPES][4] = {
1971 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1972 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1973 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
1974#ifdef CONFIG_CMA
1975 [MIGRATE_CMA] = { MIGRATE_TYPES },
1976#endif
1977#ifdef CONFIG_MEMORY_ISOLATION
1978 [MIGRATE_ISOLATE] = { MIGRATE_TYPES },
1979#endif
1980};
1981
1982#ifdef CONFIG_CMA
1983static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1984 unsigned int order)
1985{
1986 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1987}
1988#else
1989static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1990 unsigned int order) { return NULL; }
1991#endif
1992
1993
1994
1995
1996
1997
1998static int move_freepages(struct zone *zone,
1999 struct page *start_page, struct page *end_page,
2000 int migratetype, int *num_movable)
2001{
2002 struct page *page;
2003 unsigned int order;
2004 int pages_moved = 0;
2005
2006#ifndef CONFIG_HOLES_IN_ZONE
2007
2008
2009
2010
2011
2012
2013
2014 VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
2015 pfn_valid(page_to_pfn(end_page)) &&
2016 page_zone(start_page) != page_zone(end_page));
2017#endif
2018
2019 if (num_movable)
2020 *num_movable = 0;
2021
2022 for (page = start_page; page <= end_page;) {
2023 if (!pfn_valid_within(page_to_pfn(page))) {
2024 page++;
2025 continue;
2026 }
2027
2028
2029 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2030
2031 if (!PageBuddy(page)) {
2032
2033
2034
2035
2036
2037 if (num_movable &&
2038 (PageLRU(page) || __PageMovable(page)))
2039 (*num_movable)++;
2040
2041 page++;
2042 continue;
2043 }
2044
2045 order = page_order(page);
2046 list_move(&page->lru,
2047 &zone->free_area[order].free_list[migratetype]);
2048 page += 1 << order;
2049 pages_moved += 1 << order;
2050 }
2051
2052 return pages_moved;
2053}
2054
2055int move_freepages_block(struct zone *zone, struct page *page,
2056 int migratetype, int *num_movable)
2057{
2058 unsigned long start_pfn, end_pfn;
2059 struct page *start_page, *end_page;
2060
2061 start_pfn = page_to_pfn(page);
2062 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
2063 start_page = pfn_to_page(start_pfn);
2064 end_page = start_page + pageblock_nr_pages - 1;
2065 end_pfn = start_pfn + pageblock_nr_pages - 1;
2066
2067
2068 if (!zone_spans_pfn(zone, start_pfn))
2069 start_page = page;
2070 if (!zone_spans_pfn(zone, end_pfn))
2071 return 0;
2072
2073 return move_freepages(zone, start_page, end_page, migratetype,
2074 num_movable);
2075}
2076
2077static void change_pageblock_range(struct page *pageblock_page,
2078 int start_order, int migratetype)
2079{
2080 int nr_pageblocks = 1 << (start_order - pageblock_order);
2081
2082 while (nr_pageblocks--) {
2083 set_pageblock_migratetype(pageblock_page, migratetype);
2084 pageblock_page += pageblock_nr_pages;
2085 }
2086}
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100static bool can_steal_fallback(unsigned int order, int start_mt)
2101{
2102
2103
2104
2105
2106
2107
2108
2109 if (order >= pageblock_order)
2110 return true;
2111
2112 if (order >= pageblock_order / 2 ||
2113 start_mt == MIGRATE_RECLAIMABLE ||
2114 start_mt == MIGRATE_UNMOVABLE ||
2115 page_group_by_mobility_disabled)
2116 return true;
2117
2118 return false;
2119}
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129static void steal_suitable_fallback(struct zone *zone, struct page *page,
2130 int start_type, bool whole_block)
2131{
2132 unsigned int current_order = page_order(page);
2133 struct free_area *area;
2134 int free_pages, movable_pages, alike_pages;
2135 int old_block_type;
2136
2137 old_block_type = get_pageblock_migratetype(page);
2138
2139
2140
2141
2142
2143 if (is_migrate_highatomic(old_block_type))
2144 goto single_page;
2145
2146
2147 if (current_order >= pageblock_order) {
2148 change_pageblock_range(page, current_order, start_type);
2149 goto single_page;
2150 }
2151
2152
2153 if (!whole_block)
2154 goto single_page;
2155
2156 free_pages = move_freepages_block(zone, page, start_type,
2157 &movable_pages);
2158
2159
2160
2161
2162
2163 if (start_type == MIGRATE_MOVABLE) {
2164 alike_pages = movable_pages;
2165 } else {
2166
2167
2168
2169
2170
2171
2172
2173 if (old_block_type == MIGRATE_MOVABLE)
2174 alike_pages = pageblock_nr_pages
2175 - (free_pages + movable_pages);
2176 else
2177 alike_pages = 0;
2178 }
2179
2180
2181 if (!free_pages)
2182 goto single_page;
2183
2184
2185
2186
2187
2188 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2189 page_group_by_mobility_disabled)
2190 set_pageblock_migratetype(page, start_type);
2191
2192 return;
2193
2194single_page:
2195 area = &zone->free_area[current_order];
2196 list_move(&page->lru, &area->free_list[start_type]);
2197}
2198
2199
2200
2201
2202
2203
2204
2205int find_suitable_fallback(struct free_area *area, unsigned int order,
2206 int migratetype, bool only_stealable, bool *can_steal)
2207{
2208 int i;
2209 int fallback_mt;
2210
2211 if (area->nr_free == 0)
2212 return -1;
2213
2214 *can_steal = false;
2215 for (i = 0;; i++) {
2216 fallback_mt = fallbacks[migratetype][i];
2217 if (fallback_mt == MIGRATE_TYPES)
2218 break;
2219
2220 if (list_empty(&area->free_list[fallback_mt]))
2221 continue;
2222
2223 if (can_steal_fallback(order, migratetype))
2224 *can_steal = true;
2225
2226 if (!only_stealable)
2227 return fallback_mt;
2228
2229 if (*can_steal)
2230 return fallback_mt;
2231 }
2232
2233 return -1;
2234}
2235
2236
2237
2238
2239
2240static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2241 unsigned int alloc_order)
2242{
2243 int mt;
2244 unsigned long max_managed, flags;
2245
2246
2247
2248
2249
2250 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2251 if (zone->nr_reserved_highatomic >= max_managed)
2252 return;
2253
2254 spin_lock_irqsave(&zone->lock, flags);
2255
2256
2257 if (zone->nr_reserved_highatomic >= max_managed)
2258 goto out_unlock;
2259
2260
2261 mt = get_pageblock_migratetype(page);
2262 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2263 && !is_migrate_cma(mt)) {
2264 zone->nr_reserved_highatomic += pageblock_nr_pages;
2265 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2266 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2267 }
2268
2269out_unlock:
2270 spin_unlock_irqrestore(&zone->lock, flags);
2271}
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2283 bool force)
2284{
2285 struct zonelist *zonelist = ac->zonelist;
2286 unsigned long flags;
2287 struct zoneref *z;
2288 struct zone *zone;
2289 struct page *page;
2290 int order;
2291 bool ret;
2292
2293 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2294 ac->nodemask) {
2295
2296
2297
2298
2299 if (!force && zone->nr_reserved_highatomic <=
2300 pageblock_nr_pages)
2301 continue;
2302
2303 spin_lock_irqsave(&zone->lock, flags);
2304 for (order = 0; order < MAX_ORDER; order++) {
2305 struct free_area *area = &(zone->free_area[order]);
2306
2307 page = list_first_entry_or_null(
2308 &area->free_list[MIGRATE_HIGHATOMIC],
2309 struct page, lru);
2310 if (!page)
2311 continue;
2312
2313
2314
2315
2316
2317
2318
2319
2320 if (is_migrate_highatomic_page(page)) {
2321
2322
2323
2324
2325
2326
2327
2328 zone->nr_reserved_highatomic -= min(
2329 pageblock_nr_pages,
2330 zone->nr_reserved_highatomic);
2331 }
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 set_pageblock_migratetype(page, ac->migratetype);
2343 ret = move_freepages_block(zone, page, ac->migratetype,
2344 NULL);
2345 if (ret) {
2346 spin_unlock_irqrestore(&zone->lock, flags);
2347 return ret;
2348 }
2349 }
2350 spin_unlock_irqrestore(&zone->lock, flags);
2351 }
2352
2353 return false;
2354}
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366static __always_inline bool
2367__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
2368{
2369 struct free_area *area;
2370 int current_order;
2371 struct page *page;
2372 int fallback_mt;
2373 bool can_steal;
2374
2375
2376
2377
2378
2379
2380 for (current_order = MAX_ORDER - 1; current_order >= order;
2381 --current_order) {
2382 area = &(zone->free_area[current_order]);
2383 fallback_mt = find_suitable_fallback(area, current_order,
2384 start_migratetype, false, &can_steal);
2385 if (fallback_mt == -1)
2386 continue;
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2397 && current_order > order)
2398 goto find_smallest;
2399
2400 goto do_steal;
2401 }
2402
2403 return false;
2404
2405find_smallest:
2406 for (current_order = order; current_order < MAX_ORDER;
2407 current_order++) {
2408 area = &(zone->free_area[current_order]);
2409 fallback_mt = find_suitable_fallback(area, current_order,
2410 start_migratetype, false, &can_steal);
2411 if (fallback_mt != -1)
2412 break;
2413 }
2414
2415
2416
2417
2418
2419 VM_BUG_ON(current_order == MAX_ORDER);
2420
2421do_steal:
2422 page = list_first_entry(&area->free_list[fallback_mt],
2423 struct page, lru);
2424
2425 steal_suitable_fallback(zone, page, start_migratetype, can_steal);
2426
2427 trace_mm_page_alloc_extfrag(page, order, current_order,
2428 start_migratetype, fallback_mt);
2429
2430 return true;
2431
2432}
2433
2434
2435
2436
2437
2438static __always_inline struct page *
2439__rmqueue(struct zone *zone, unsigned int order, int migratetype)
2440{
2441 struct page *page;
2442
2443retry:
2444 page = __rmqueue_smallest(zone, order, migratetype);
2445 if (unlikely(!page)) {
2446 if (migratetype == MIGRATE_MOVABLE)
2447 page = __rmqueue_cma_fallback(zone, order);
2448
2449 if (!page && __rmqueue_fallback(zone, order, migratetype))
2450 goto retry;
2451 }
2452
2453 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2454 return page;
2455}
2456
2457
2458
2459
2460
2461
2462static int rmqueue_bulk(struct zone *zone, unsigned int order,
2463 unsigned long count, struct list_head *list,
2464 int migratetype)
2465{
2466 int i, alloced = 0;
2467
2468 spin_lock(&zone->lock);
2469 for (i = 0; i < count; ++i) {
2470 struct page *page = __rmqueue(zone, order, migratetype);
2471 if (unlikely(page == NULL))
2472 break;
2473
2474 if (unlikely(check_pcp_refill(page)))
2475 continue;
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487 list_add_tail(&page->lru, list);
2488 alloced++;
2489 if (is_migrate_cma(get_pcppage_migratetype(page)))
2490 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2491 -(1 << order));
2492 }
2493
2494
2495
2496
2497
2498
2499
2500 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2501 spin_unlock(&zone->lock);
2502 return alloced;
2503}
2504
2505#ifdef CONFIG_NUMA
2506
2507
2508
2509
2510
2511
2512
2513
2514void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2515{
2516 unsigned long flags;
2517 int to_drain, batch;
2518
2519 local_irq_save(flags);
2520 batch = READ_ONCE(pcp->batch);
2521 to_drain = min(pcp->count, batch);
2522 if (to_drain > 0)
2523 free_pcppages_bulk(zone, to_drain, pcp);
2524 local_irq_restore(flags);
2525}
2526#endif
2527
2528
2529
2530
2531
2532
2533
2534
2535static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2536{
2537 unsigned long flags;
2538 struct per_cpu_pageset *pset;
2539 struct per_cpu_pages *pcp;
2540
2541 local_irq_save(flags);
2542 pset = per_cpu_ptr(zone->pageset, cpu);
2543
2544 pcp = &pset->pcp;
2545 if (pcp->count)
2546 free_pcppages_bulk(zone, pcp->count, pcp);
2547 local_irq_restore(flags);
2548}
2549
2550
2551
2552
2553
2554
2555
2556
2557static void drain_pages(unsigned int cpu)
2558{
2559 struct zone *zone;
2560
2561 for_each_populated_zone(zone) {
2562 drain_pages_zone(cpu, zone);
2563 }
2564}
2565
2566
2567
2568
2569
2570
2571
2572void drain_local_pages(struct zone *zone)
2573{
2574 int cpu = smp_processor_id();
2575
2576 if (zone)
2577 drain_pages_zone(cpu, zone);
2578 else
2579 drain_pages(cpu);
2580}
2581
2582static void drain_local_pages_wq(struct work_struct *work)
2583{
2584
2585
2586
2587
2588
2589
2590
2591 preempt_disable();
2592 drain_local_pages(NULL);
2593 preempt_enable();
2594}
2595
2596
2597
2598
2599
2600
2601
2602
2603void drain_all_pages(struct zone *zone)
2604{
2605 int cpu;
2606
2607
2608
2609
2610
2611 static cpumask_t cpus_with_pcps;
2612
2613
2614
2615
2616
2617 if (WARN_ON_ONCE(!mm_percpu_wq))
2618 return;
2619
2620
2621
2622
2623
2624
2625 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2626 if (!zone)
2627 return;
2628 mutex_lock(&pcpu_drain_mutex);
2629 }
2630
2631
2632
2633
2634
2635
2636
2637 for_each_online_cpu(cpu) {
2638 struct per_cpu_pageset *pcp;
2639 struct zone *z;
2640 bool has_pcps = false;
2641
2642 if (zone) {
2643 pcp = per_cpu_ptr(zone->pageset, cpu);
2644 if (pcp->pcp.count)
2645 has_pcps = true;
2646 } else {
2647 for_each_populated_zone(z) {
2648 pcp = per_cpu_ptr(z->pageset, cpu);
2649 if (pcp->pcp.count) {
2650 has_pcps = true;
2651 break;
2652 }
2653 }
2654 }
2655
2656 if (has_pcps)
2657 cpumask_set_cpu(cpu, &cpus_with_pcps);
2658 else
2659 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2660 }
2661
2662 for_each_cpu(cpu, &cpus_with_pcps) {
2663 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2664 INIT_WORK(work, drain_local_pages_wq);
2665 queue_work_on(cpu, mm_percpu_wq, work);
2666 }
2667 for_each_cpu(cpu, &cpus_with_pcps)
2668 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
2669
2670 mutex_unlock(&pcpu_drain_mutex);
2671}
2672
2673#ifdef CONFIG_HIBERNATION
2674
2675
2676
2677
2678#define WD_PAGE_COUNT (128*1024)
2679
2680void mark_free_pages(struct zone *zone)
2681{
2682 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2683 unsigned long flags;
2684 unsigned int order, t;
2685 struct page *page;
2686
2687 if (zone_is_empty(zone))
2688 return;
2689
2690 spin_lock_irqsave(&zone->lock, flags);
2691
2692 max_zone_pfn = zone_end_pfn(zone);
2693 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2694 if (pfn_valid(pfn)) {
2695 page = pfn_to_page(pfn);
2696
2697 if (!--page_count) {
2698 touch_nmi_watchdog();
2699 page_count = WD_PAGE_COUNT;
2700 }
2701
2702 if (page_zone(page) != zone)
2703 continue;
2704
2705 if (!swsusp_page_is_forbidden(page))
2706 swsusp_unset_page_free(page);
2707 }
2708
2709 for_each_migratetype_order(order, t) {
2710 list_for_each_entry(page,
2711 &zone->free_area[order].free_list[t], lru) {
2712 unsigned long i;
2713
2714 pfn = page_to_pfn(page);
2715 for (i = 0; i < (1UL << order); i++) {
2716 if (!--page_count) {
2717 touch_nmi_watchdog();
2718 page_count = WD_PAGE_COUNT;
2719 }
2720 swsusp_set_page_free(pfn_to_page(pfn + i));
2721 }
2722 }
2723 }
2724 spin_unlock_irqrestore(&zone->lock, flags);
2725}
2726#endif
2727
2728static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
2729{
2730 int migratetype;
2731
2732 if (!free_pcp_prepare(page))
2733 return false;
2734
2735 migratetype = get_pfnblock_migratetype(page, pfn);
2736 set_pcppage_migratetype(page, migratetype);
2737 return true;
2738}
2739
2740static void free_unref_page_commit(struct page *page, unsigned long pfn)
2741{
2742 struct zone *zone = page_zone(page);
2743 struct per_cpu_pages *pcp;
2744 int migratetype;
2745
2746 migratetype = get_pcppage_migratetype(page);
2747 __count_vm_event(PGFREE);
2748
2749
2750
2751
2752
2753
2754
2755
2756 if (migratetype >= MIGRATE_PCPTYPES) {
2757 if (unlikely(is_migrate_isolate(migratetype))) {
2758 free_one_page(zone, page, pfn, 0, migratetype);
2759 return;
2760 }
2761 migratetype = MIGRATE_MOVABLE;
2762 }
2763
2764 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2765 list_add(&page->lru, &pcp->lists[migratetype]);
2766 pcp->count++;
2767 if (pcp->count >= pcp->high) {
2768 unsigned long batch = READ_ONCE(pcp->batch);
2769 free_pcppages_bulk(zone, batch, pcp);
2770 }
2771}
2772
2773
2774
2775
2776void free_unref_page(struct page *page)
2777{
2778 unsigned long flags;
2779 unsigned long pfn = page_to_pfn(page);
2780
2781 if (!free_unref_page_prepare(page, pfn))
2782 return;
2783
2784 local_irq_save(flags);
2785 free_unref_page_commit(page, pfn);
2786 local_irq_restore(flags);
2787}
2788
2789
2790
2791
2792void free_unref_page_list(struct list_head *list)
2793{
2794 struct page *page, *next;
2795 unsigned long flags, pfn;
2796 int batch_count = 0;
2797
2798
2799 list_for_each_entry_safe(page, next, list, lru) {
2800 pfn = page_to_pfn(page);
2801 if (!free_unref_page_prepare(page, pfn))
2802 list_del(&page->lru);
2803 set_page_private(page, pfn);
2804 }
2805
2806 local_irq_save(flags);
2807 list_for_each_entry_safe(page, next, list, lru) {
2808 unsigned long pfn = page_private(page);
2809
2810 set_page_private(page, 0);
2811 trace_mm_page_free_batched(page);
2812 free_unref_page_commit(page, pfn);
2813
2814
2815
2816
2817
2818 if (++batch_count == SWAP_CLUSTER_MAX) {
2819 local_irq_restore(flags);
2820 batch_count = 0;
2821 local_irq_save(flags);
2822 }
2823 }
2824 local_irq_restore(flags);
2825}
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835void split_page(struct page *page, unsigned int order)
2836{
2837 int i;
2838
2839 VM_BUG_ON_PAGE(PageCompound(page), page);
2840 VM_BUG_ON_PAGE(!page_count(page), page);
2841
2842 for (i = 1; i < (1 << order); i++)
2843 set_page_refcounted(page + i);
2844 split_page_owner(page, order);
2845}
2846EXPORT_SYMBOL_GPL(split_page);
2847
2848int __isolate_free_page(struct page *page, unsigned int order)
2849{
2850 unsigned long watermark;
2851 struct zone *zone;
2852 int mt;
2853
2854 BUG_ON(!PageBuddy(page));
2855
2856 zone = page_zone(page);
2857 mt = get_pageblock_migratetype(page);
2858
2859 if (!is_migrate_isolate(mt)) {
2860
2861
2862
2863
2864
2865
2866 watermark = min_wmark_pages(zone) + (1UL << order);
2867 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2868 return 0;
2869
2870 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2871 }
2872
2873
2874 list_del(&page->lru);
2875 zone->free_area[order].nr_free--;
2876 rmv_page_order(page);
2877
2878
2879
2880
2881
2882 if (order >= pageblock_order - 1) {
2883 struct page *endpage = page + (1 << order) - 1;
2884 for (; page < endpage; page += pageblock_nr_pages) {
2885 int mt = get_pageblock_migratetype(page);
2886 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
2887 && !is_migrate_highatomic(mt))
2888 set_pageblock_migratetype(page,
2889 MIGRATE_MOVABLE);
2890 }
2891 }
2892
2893
2894 return 1UL << order;
2895}
2896
2897
2898
2899
2900
2901
2902static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
2903{
2904#ifdef CONFIG_NUMA
2905 enum numa_stat_item local_stat = NUMA_LOCAL;
2906
2907
2908 if (!static_branch_likely(&vm_numa_stat_key))
2909 return;
2910
2911 if (zone_to_nid(z) != numa_node_id())
2912 local_stat = NUMA_OTHER;
2913
2914 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
2915 __inc_numa_state(z, NUMA_HIT);
2916 else {
2917 __inc_numa_state(z, NUMA_MISS);
2918 __inc_numa_state(preferred_zone, NUMA_FOREIGN);
2919 }
2920 __inc_numa_state(z, local_stat);
2921#endif
2922}
2923
2924
2925static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2926 struct per_cpu_pages *pcp,
2927 struct list_head *list)
2928{
2929 struct page *page;
2930
2931 do {
2932 if (list_empty(list)) {
2933 pcp->count += rmqueue_bulk(zone, 0,
2934 pcp->batch, list,
2935 migratetype);
2936 if (unlikely(list_empty(list)))
2937 return NULL;
2938 }
2939
2940 page = list_first_entry(list, struct page, lru);
2941 list_del(&page->lru);
2942 pcp->count--;
2943 } while (check_new_pcp(page));
2944
2945 return page;
2946}
2947
2948
2949static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2950 struct zone *zone, unsigned int order,
2951 gfp_t gfp_flags, int migratetype)
2952{
2953 struct per_cpu_pages *pcp;
2954 struct list_head *list;
2955 struct page *page;
2956 unsigned long flags;
2957
2958 local_irq_save(flags);
2959 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2960 list = &pcp->lists[migratetype];
2961 page = __rmqueue_pcplist(zone, migratetype, pcp, list);
2962 if (page) {
2963 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2964 zone_statistics(preferred_zone, zone);
2965 }
2966 local_irq_restore(flags);
2967 return page;
2968}
2969
2970
2971
2972
2973static inline
2974struct page *rmqueue(struct zone *preferred_zone,
2975 struct zone *zone, unsigned int order,
2976 gfp_t gfp_flags, unsigned int alloc_flags,
2977 int migratetype)
2978{
2979 unsigned long flags;
2980 struct page *page;
2981
2982 if (likely(order == 0)) {
2983 page = rmqueue_pcplist(preferred_zone, zone, order,
2984 gfp_flags, migratetype);
2985 goto out;
2986 }
2987
2988
2989
2990
2991
2992 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2993 spin_lock_irqsave(&zone->lock, flags);
2994
2995 do {
2996 page = NULL;
2997 if (alloc_flags & ALLOC_HARDER) {
2998 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2999 if (page)
3000 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3001 }
3002 if (!page)
3003 page = __rmqueue(zone, order, migratetype);
3004 } while (page && check_new_pages(page, order));
3005 spin_unlock(&zone->lock);
3006 if (!page)
3007 goto failed;
3008 __mod_zone_freepage_state(zone, -(1 << order),
3009 get_pcppage_migratetype(page));
3010
3011 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3012 zone_statistics(preferred_zone, zone);
3013 local_irq_restore(flags);
3014
3015out:
3016 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3017 return page;
3018
3019failed:
3020 local_irq_restore(flags);
3021 return NULL;
3022}
3023
3024#ifdef CONFIG_FAIL_PAGE_ALLOC
3025
3026static struct {
3027 struct fault_attr attr;
3028
3029 bool ignore_gfp_highmem;
3030 bool ignore_gfp_reclaim;
3031 u32 min_order;
3032} fail_page_alloc = {
3033 .attr = FAULT_ATTR_INITIALIZER,
3034 .ignore_gfp_reclaim = true,
3035 .ignore_gfp_highmem = true,
3036 .min_order = 1,
3037};
3038
3039static int __init setup_fail_page_alloc(char *str)
3040{
3041 return setup_fault_attr(&fail_page_alloc.attr, str);
3042}
3043__setup("fail_page_alloc=", setup_fail_page_alloc);
3044
3045static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3046{
3047 if (order < fail_page_alloc.min_order)
3048 return false;
3049 if (gfp_mask & __GFP_NOFAIL)
3050 return false;
3051 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3052 return false;
3053 if (fail_page_alloc.ignore_gfp_reclaim &&
3054 (gfp_mask & __GFP_DIRECT_RECLAIM))
3055 return false;
3056
3057 return should_fail(&fail_page_alloc.attr, 1 << order);
3058}
3059
3060#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3061
3062static int __init fail_page_alloc_debugfs(void)
3063{
3064 umode_t mode = S_IFREG | 0600;
3065 struct dentry *dir;
3066
3067 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3068 &fail_page_alloc.attr);
3069 if (IS_ERR(dir))
3070 return PTR_ERR(dir);
3071
3072 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
3073 &fail_page_alloc.ignore_gfp_reclaim))
3074 goto fail;
3075 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3076 &fail_page_alloc.ignore_gfp_highmem))
3077 goto fail;
3078 if (!debugfs_create_u32("min-order", mode, dir,
3079 &fail_page_alloc.min_order))
3080 goto fail;
3081
3082 return 0;
3083fail:
3084 debugfs_remove_recursive(dir);
3085
3086 return -ENOMEM;
3087}
3088
3089late_initcall(fail_page_alloc_debugfs);
3090
3091#endif
3092
3093#else
3094
3095static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3096{
3097 return false;
3098}
3099
3100#endif
3101
3102
3103
3104
3105
3106
3107
3108bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3109 int classzone_idx, unsigned int alloc_flags,
3110 long free_pages)
3111{
3112 long min = mark;
3113 int o;
3114 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3115
3116
3117 free_pages -= (1 << order) - 1;
3118
3119 if (alloc_flags & ALLOC_HIGH)
3120 min -= min / 2;
3121
3122
3123
3124
3125
3126
3127 if (likely(!alloc_harder)) {
3128 free_pages -= z->nr_reserved_highatomic;
3129 } else {
3130
3131
3132
3133
3134
3135
3136 if (alloc_flags & ALLOC_OOM)
3137 min -= min / 2;
3138 else
3139 min -= min / 4;
3140 }
3141
3142
3143#ifdef CONFIG_CMA
3144
3145 if (!(alloc_flags & ALLOC_CMA))
3146 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
3147#endif
3148
3149
3150
3151
3152
3153
3154 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
3155 return false;
3156
3157
3158 if (!order)
3159 return true;
3160
3161
3162 for (o = order; o < MAX_ORDER; o++) {
3163 struct free_area *area = &z->free_area[o];
3164 int mt;
3165
3166 if (!area->nr_free)
3167 continue;
3168
3169 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3170 if (!list_empty(&area->free_list[mt]))
3171 return true;
3172 }
3173
3174#ifdef CONFIG_CMA
3175 if ((alloc_flags & ALLOC_CMA) &&
3176 !list_empty(&area->free_list[MIGRATE_CMA])) {
3177 return true;
3178 }
3179#endif
3180 if (alloc_harder &&
3181 !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
3182 return true;
3183 }
3184 return false;
3185}
3186
3187bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3188 int classzone_idx, unsigned int alloc_flags)
3189{
3190 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3191 zone_page_state(z, NR_FREE_PAGES));
3192}
3193
3194static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3195 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
3196{
3197 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3198 long cma_pages = 0;
3199
3200#ifdef CONFIG_CMA
3201
3202 if (!(alloc_flags & ALLOC_CMA))
3203 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3204#endif
3205
3206
3207
3208
3209
3210
3211
3212
3213 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3214 return true;
3215
3216 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3217 free_pages);
3218}
3219
3220bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3221 unsigned long mark, int classzone_idx)
3222{
3223 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3224
3225 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3226 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3227
3228 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
3229 free_pages);
3230}
3231
3232#ifdef CONFIG_NUMA
3233static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3234{
3235 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3236 RECLAIM_DISTANCE;
3237}
3238#else
3239static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3240{
3241 return true;
3242}
3243#endif
3244
3245
3246
3247
3248
3249static struct page *
3250get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3251 const struct alloc_context *ac)
3252{
3253 struct zoneref *z = ac->preferred_zoneref;
3254 struct zone *zone;
3255 struct pglist_data *last_pgdat_dirty_limit = NULL;
3256
3257
3258
3259
3260
3261 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3262 ac->nodemask) {
3263 struct page *page;
3264 unsigned long mark;
3265
3266 if (cpusets_enabled() &&
3267 (alloc_flags & ALLOC_CPUSET) &&
3268 !__cpuset_zone_allowed(zone, gfp_mask))
3269 continue;
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289 if (ac->spread_dirty_pages) {
3290 if (last_pgdat_dirty_limit == zone->zone_pgdat)
3291 continue;
3292
3293 if (!node_dirty_ok(zone->zone_pgdat)) {
3294 last_pgdat_dirty_limit = zone->zone_pgdat;
3295 continue;
3296 }
3297 }
3298
3299 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
3300 if (!zone_watermark_fast(zone, order, mark,
3301 ac_classzone_idx(ac), alloc_flags)) {
3302 int ret;
3303
3304#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3305
3306
3307
3308
3309 if (static_branch_unlikely(&deferred_pages)) {
3310 if (_deferred_grow_zone(zone, order))
3311 goto try_this_zone;
3312 }
3313#endif
3314
3315 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3316 if (alloc_flags & ALLOC_NO_WATERMARKS)
3317 goto try_this_zone;
3318
3319 if (node_reclaim_mode == 0 ||
3320 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3321 continue;
3322
3323 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3324 switch (ret) {
3325 case NODE_RECLAIM_NOSCAN:
3326
3327 continue;
3328 case NODE_RECLAIM_FULL:
3329
3330 continue;
3331 default:
3332
3333 if (zone_watermark_ok(zone, order, mark,
3334 ac_classzone_idx(ac), alloc_flags))
3335 goto try_this_zone;
3336
3337 continue;
3338 }
3339 }
3340
3341try_this_zone:
3342 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3343 gfp_mask, alloc_flags, ac->migratetype);
3344 if (page) {
3345 prep_new_page(page, order, gfp_mask, alloc_flags);
3346
3347
3348
3349
3350
3351 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3352 reserve_highatomic_pageblock(page, zone, order);
3353
3354 return page;
3355 } else {
3356#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3357
3358 if (static_branch_unlikely(&deferred_pages)) {
3359 if (_deferred_grow_zone(zone, order))
3360 goto try_this_zone;
3361 }
3362#endif
3363 }
3364 }
3365
3366 return NULL;
3367}
3368
3369
3370
3371
3372
3373static inline bool should_suppress_show_mem(void)
3374{
3375 bool ret = false;
3376
3377#if NODES_SHIFT > 8
3378 ret = in_interrupt();
3379#endif
3380 return ret;
3381}
3382
3383static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3384{
3385 unsigned int filter = SHOW_MEM_FILTER_NODES;
3386 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
3387
3388 if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
3389 return;
3390
3391
3392
3393
3394
3395
3396 if (!(gfp_mask & __GFP_NOMEMALLOC))
3397 if (tsk_is_oom_victim(current) ||
3398 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3399 filter &= ~SHOW_MEM_FILTER_NODES;
3400 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3401 filter &= ~SHOW_MEM_FILTER_NODES;
3402
3403 show_mem(filter, nodemask);
3404}
3405
3406void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3407{
3408 struct va_format vaf;
3409 va_list args;
3410 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3411 DEFAULT_RATELIMIT_BURST);
3412
3413 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3414 return;
3415
3416 va_start(args, fmt);
3417 vaf.fmt = fmt;
3418 vaf.va = &args;
3419 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl\n",
3420 current->comm, &vaf, gfp_mask, &gfp_mask,
3421 nodemask_pr_args(nodemask));
3422 va_end(args);
3423
3424 cpuset_print_current_mems_allowed();
3425
3426 dump_stack();
3427 warn_alloc_show_mem(gfp_mask, nodemask);
3428}
3429
3430static inline struct page *
3431__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3432 unsigned int alloc_flags,
3433 const struct alloc_context *ac)
3434{
3435 struct page *page;
3436
3437 page = get_page_from_freelist(gfp_mask, order,
3438 alloc_flags|ALLOC_CPUSET, ac);
3439
3440
3441
3442
3443 if (!page)
3444 page = get_page_from_freelist(gfp_mask, order,
3445 alloc_flags, ac);
3446
3447 return page;
3448}
3449
3450static inline struct page *
3451__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3452 const struct alloc_context *ac, unsigned long *did_some_progress)
3453{
3454 struct oom_control oc = {
3455 .zonelist = ac->zonelist,
3456 .nodemask = ac->nodemask,
3457 .memcg = NULL,
3458 .gfp_mask = gfp_mask,
3459 .order = order,
3460 };
3461 struct page *page;
3462
3463 *did_some_progress = 0;
3464
3465
3466
3467
3468
3469 if (!mutex_trylock(&oom_lock)) {
3470 *did_some_progress = 1;
3471 schedule_timeout_uninterruptible(1);
3472 return NULL;
3473 }
3474
3475
3476
3477
3478
3479
3480
3481
3482 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3483 ~__GFP_DIRECT_RECLAIM, order,
3484 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3485 if (page)
3486 goto out;
3487
3488
3489 if (current->flags & PF_DUMPCORE)
3490 goto out;
3491
3492 if (order > PAGE_ALLOC_COSTLY_ORDER)
3493 goto out;
3494
3495
3496
3497
3498
3499
3500 if (gfp_mask & __GFP_RETRY_MAYFAIL)
3501 goto out;
3502
3503 if (ac->high_zoneidx < ZONE_NORMAL)
3504 goto out;
3505 if (pm_suspended_storage())
3506 goto out;
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518 if (gfp_mask & __GFP_THISNODE)
3519 goto out;
3520
3521
3522 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3523 *did_some_progress = 1;
3524
3525
3526
3527
3528
3529 if (gfp_mask & __GFP_NOFAIL)
3530 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3531 ALLOC_NO_WATERMARKS, ac);
3532 }
3533out:
3534 mutex_unlock(&oom_lock);
3535 return page;
3536}
3537
3538
3539
3540
3541
3542#define MAX_COMPACT_RETRIES 16
3543
3544#ifdef CONFIG_COMPACTION
3545
3546static struct page *
3547__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3548 unsigned int alloc_flags, const struct alloc_context *ac,
3549 enum compact_priority prio, enum compact_result *compact_result)
3550{
3551 struct page *page;
3552 unsigned int noreclaim_flag;
3553
3554 if (!order)
3555 return NULL;
3556
3557 noreclaim_flag = memalloc_noreclaim_save();
3558 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3559 prio);
3560 memalloc_noreclaim_restore(noreclaim_flag);
3561
3562 if (*compact_result <= COMPACT_INACTIVE)
3563 return NULL;
3564
3565
3566
3567
3568
3569 count_vm_event(COMPACTSTALL);
3570
3571 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3572
3573 if (page) {
3574 struct zone *zone = page_zone(page);
3575
3576 zone->compact_blockskip_flush = false;
3577 compaction_defer_reset(zone, order, true);
3578 count_vm_event(COMPACTSUCCESS);
3579 return page;
3580 }
3581
3582
3583
3584
3585
3586 count_vm_event(COMPACTFAIL);
3587
3588 cond_resched();
3589
3590 return NULL;
3591}
3592
3593static inline bool
3594should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3595 enum compact_result compact_result,
3596 enum compact_priority *compact_priority,
3597 int *compaction_retries)
3598{
3599 int max_retries = MAX_COMPACT_RETRIES;
3600 int min_priority;
3601 bool ret = false;
3602 int retries = *compaction_retries;
3603 enum compact_priority priority = *compact_priority;
3604
3605 if (!order)
3606 return false;
3607
3608 if (compaction_made_progress(compact_result))
3609 (*compaction_retries)++;
3610
3611
3612
3613
3614
3615
3616 if (compaction_failed(compact_result))
3617 goto check_priority;
3618
3619
3620
3621
3622
3623
3624
3625 if (compaction_withdrawn(compact_result)) {
3626 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3627 goto out;
3628 }
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638 if (order > PAGE_ALLOC_COSTLY_ORDER)
3639 max_retries /= 4;
3640 if (*compaction_retries <= max_retries) {
3641 ret = true;
3642 goto out;
3643 }
3644
3645
3646
3647
3648
3649check_priority:
3650 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3651 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3652
3653 if (*compact_priority > min_priority) {
3654 (*compact_priority)--;
3655 *compaction_retries = 0;
3656 ret = true;
3657 }
3658out:
3659 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3660 return ret;
3661}
3662#else
3663static inline struct page *
3664__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3665 unsigned int alloc_flags, const struct alloc_context *ac,
3666 enum compact_priority prio, enum compact_result *compact_result)
3667{
3668 *compact_result = COMPACT_SKIPPED;
3669 return NULL;
3670}
3671
3672static inline bool
3673should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3674 enum compact_result compact_result,
3675 enum compact_priority *compact_priority,
3676 int *compaction_retries)
3677{
3678 struct zone *zone;
3679 struct zoneref *z;
3680
3681 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3682 return false;
3683
3684
3685
3686
3687
3688
3689
3690 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3691 ac->nodemask) {
3692 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3693 ac_classzone_idx(ac), alloc_flags))
3694 return true;
3695 }
3696 return false;
3697}
3698#endif
3699
3700#ifdef CONFIG_LOCKDEP
3701static struct lockdep_map __fs_reclaim_map =
3702 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
3703
3704static bool __need_fs_reclaim(gfp_t gfp_mask)
3705{
3706 gfp_mask = current_gfp_context(gfp_mask);
3707
3708
3709 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3710 return false;
3711
3712
3713 if (current->flags & PF_MEMALLOC)
3714 return false;
3715
3716
3717 if (!(gfp_mask & __GFP_FS))
3718 return false;
3719
3720 if (gfp_mask & __GFP_NOLOCKDEP)
3721 return false;
3722
3723 return true;
3724}
3725
3726void __fs_reclaim_acquire(void)
3727{
3728 lock_map_acquire(&__fs_reclaim_map);
3729}
3730
3731void __fs_reclaim_release(void)
3732{
3733 lock_map_release(&__fs_reclaim_map);
3734}
3735
3736void fs_reclaim_acquire(gfp_t gfp_mask)
3737{
3738 if (__need_fs_reclaim(gfp_mask))
3739 __fs_reclaim_acquire();
3740}
3741EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
3742
3743void fs_reclaim_release(gfp_t gfp_mask)
3744{
3745 if (__need_fs_reclaim(gfp_mask))
3746 __fs_reclaim_release();
3747}
3748EXPORT_SYMBOL_GPL(fs_reclaim_release);
3749#endif
3750
3751
3752static int
3753__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3754 const struct alloc_context *ac)
3755{
3756 struct reclaim_state reclaim_state;
3757 int progress;
3758 unsigned int noreclaim_flag;
3759
3760 cond_resched();
3761
3762
3763 cpuset_memory_pressure_bump();
3764 fs_reclaim_acquire(gfp_mask);
3765 noreclaim_flag = memalloc_noreclaim_save();
3766 reclaim_state.reclaimed_slab = 0;
3767 current->reclaim_state = &reclaim_state;
3768
3769 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3770 ac->nodemask);
3771
3772 current->reclaim_state = NULL;
3773 memalloc_noreclaim_restore(noreclaim_flag);
3774 fs_reclaim_release(gfp_mask);
3775
3776 cond_resched();
3777
3778 return progress;
3779}
3780
3781
3782static inline struct page *
3783__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3784 unsigned int alloc_flags, const struct alloc_context *ac,
3785 unsigned long *did_some_progress)
3786{
3787 struct page *page = NULL;
3788 bool drained = false;
3789
3790 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3791 if (unlikely(!(*did_some_progress)))
3792 return NULL;
3793
3794retry:
3795 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3796
3797
3798
3799
3800
3801
3802 if (!page && !drained) {
3803 unreserve_highatomic_pageblock(ac, false);
3804 drain_all_pages(NULL);
3805 drained = true;
3806 goto retry;
3807 }
3808
3809 return page;
3810}
3811
3812static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3813 const struct alloc_context *ac)
3814{
3815 struct zoneref *z;
3816 struct zone *zone;
3817 pg_data_t *last_pgdat = NULL;
3818 enum zone_type high_zoneidx = ac->high_zoneidx;
3819
3820 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx,
3821 ac->nodemask) {
3822 if (last_pgdat != zone->zone_pgdat)
3823 wakeup_kswapd(zone, gfp_mask, order, high_zoneidx);
3824 last_pgdat = zone->zone_pgdat;
3825 }
3826}
3827
3828static inline unsigned int
3829gfp_to_alloc_flags(gfp_t gfp_mask)
3830{
3831 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3832
3833
3834 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
3835
3836
3837
3838
3839
3840
3841
3842 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
3843
3844 if (gfp_mask & __GFP_ATOMIC) {
3845
3846
3847
3848
3849 if (!(gfp_mask & __GFP_NOMEMALLOC))
3850 alloc_flags |= ALLOC_HARDER;
3851
3852
3853
3854
3855 alloc_flags &= ~ALLOC_CPUSET;
3856 } else if (unlikely(rt_task(current)) && !in_interrupt())
3857 alloc_flags |= ALLOC_HARDER;
3858
3859#ifdef CONFIG_CMA
3860 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3861 alloc_flags |= ALLOC_CMA;
3862#endif
3863 return alloc_flags;
3864}
3865
3866static bool oom_reserves_allowed(struct task_struct *tsk)
3867{
3868 if (!tsk_is_oom_victim(tsk))
3869 return false;
3870
3871
3872
3873
3874
3875 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
3876 return false;
3877
3878 return true;
3879}
3880
3881
3882
3883
3884
3885static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
3886{
3887 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3888 return 0;
3889 if (gfp_mask & __GFP_MEMALLOC)
3890 return ALLOC_NO_WATERMARKS;
3891 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3892 return ALLOC_NO_WATERMARKS;
3893 if (!in_interrupt()) {
3894 if (current->flags & PF_MEMALLOC)
3895 return ALLOC_NO_WATERMARKS;
3896 else if (oom_reserves_allowed(current))
3897 return ALLOC_OOM;
3898 }
3899
3900 return 0;
3901}
3902
3903bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3904{
3905 return !!__gfp_pfmemalloc_flags(gfp_mask);
3906}
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918static inline bool
3919should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3920 struct alloc_context *ac, int alloc_flags,
3921 bool did_some_progress, int *no_progress_loops)
3922{
3923 struct zone *zone;
3924 struct zoneref *z;
3925
3926
3927
3928
3929
3930
3931 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3932 *no_progress_loops = 0;
3933 else
3934 (*no_progress_loops)++;
3935
3936
3937
3938
3939
3940 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
3941
3942 return unreserve_highatomic_pageblock(ac, true);
3943 }
3944
3945
3946
3947
3948
3949
3950
3951 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3952 ac->nodemask) {
3953 unsigned long available;
3954 unsigned long reclaimable;
3955 unsigned long min_wmark = min_wmark_pages(zone);
3956 bool wmark;
3957
3958 available = reclaimable = zone_reclaimable_pages(zone);
3959 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3960
3961
3962
3963
3964
3965 wmark = __zone_watermark_ok(zone, order, min_wmark,
3966 ac_classzone_idx(ac), alloc_flags, available);
3967 trace_reclaim_retry_zone(z, order, reclaimable,
3968 available, min_wmark, *no_progress_loops, wmark);
3969 if (wmark) {
3970
3971
3972
3973
3974
3975
3976 if (!did_some_progress) {
3977 unsigned long write_pending;
3978
3979 write_pending = zone_page_state_snapshot(zone,
3980 NR_ZONE_WRITE_PENDING);
3981
3982 if (2 * write_pending > reclaimable) {
3983 congestion_wait(BLK_RW_ASYNC, HZ/10);
3984 return true;
3985 }
3986 }
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997 if (current->flags & PF_WQ_WORKER)
3998 schedule_timeout_uninterruptible(1);
3999 else
4000 cond_resched();
4001
4002 return true;
4003 }
4004 }
4005
4006 return false;
4007}
4008
4009static inline bool
4010check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4011{
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023 if (cpusets_enabled() && ac->nodemask &&
4024 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4025 ac->nodemask = NULL;
4026 return true;
4027 }
4028
4029
4030
4031
4032
4033
4034
4035
4036 if (read_mems_allowed_retry(cpuset_mems_cookie))
4037 return true;
4038
4039 return false;
4040}
4041
4042static inline struct page *
4043__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4044 struct alloc_context *ac)
4045{
4046 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4047 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4048 struct page *page = NULL;
4049 unsigned int alloc_flags;
4050 unsigned long did_some_progress;
4051 enum compact_priority compact_priority;
4052 enum compact_result compact_result;
4053 int compaction_retries;
4054 int no_progress_loops;
4055 unsigned int cpuset_mems_cookie;
4056 int reserve_flags;
4057
4058
4059
4060
4061
4062
4063
4064 if (order >= MAX_ORDER) {
4065 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4066 return NULL;
4067 }
4068
4069
4070
4071
4072
4073 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4074 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4075 gfp_mask &= ~__GFP_ATOMIC;
4076
4077retry_cpuset:
4078 compaction_retries = 0;
4079 no_progress_loops = 0;
4080 compact_priority = DEF_COMPACT_PRIORITY;
4081 cpuset_mems_cookie = read_mems_allowed_begin();
4082
4083
4084
4085
4086
4087
4088 alloc_flags = gfp_to_alloc_flags(gfp_mask);
4089
4090
4091
4092
4093
4094
4095
4096 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4097 ac->high_zoneidx, ac->nodemask);
4098 if (!ac->preferred_zoneref->zone)
4099 goto nopage;
4100
4101 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4102 wake_all_kswapds(order, gfp_mask, ac);
4103
4104
4105
4106
4107
4108 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4109 if (page)
4110 goto got_pg;
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121 if (can_direct_reclaim &&
4122 (costly_order ||
4123 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4124 && !gfp_pfmemalloc_allowed(gfp_mask)) {
4125 page = __alloc_pages_direct_compact(gfp_mask, order,
4126 alloc_flags, ac,
4127 INIT_COMPACT_PRIORITY,
4128 &compact_result);
4129 if (page)
4130 goto got_pg;
4131
4132
4133
4134
4135
4136 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4137
4138
4139
4140
4141
4142
4143
4144
4145 if (compact_result == COMPACT_DEFERRED)
4146 goto nopage;
4147
4148
4149
4150
4151
4152
4153 compact_priority = INIT_COMPACT_PRIORITY;
4154 }
4155 }
4156
4157retry:
4158
4159 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4160 wake_all_kswapds(order, gfp_mask, ac);
4161
4162 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4163 if (reserve_flags)
4164 alloc_flags = reserve_flags;
4165
4166
4167
4168
4169
4170
4171 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4172 ac->nodemask = NULL;
4173 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4174 ac->high_zoneidx, ac->nodemask);
4175 }
4176
4177
4178 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4179 if (page)
4180 goto got_pg;
4181
4182
4183 if (!can_direct_reclaim)
4184 goto nopage;
4185
4186
4187 if (current->flags & PF_MEMALLOC)
4188 goto nopage;
4189
4190
4191 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4192 &did_some_progress);
4193 if (page)
4194 goto got_pg;
4195
4196
4197 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4198 compact_priority, &compact_result);
4199 if (page)
4200 goto got_pg;
4201
4202
4203 if (gfp_mask & __GFP_NORETRY)
4204 goto nopage;
4205
4206
4207
4208
4209
4210 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4211 goto nopage;
4212
4213 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4214 did_some_progress > 0, &no_progress_loops))
4215 goto retry;
4216
4217
4218
4219
4220
4221
4222
4223 if (did_some_progress > 0 &&
4224 should_compact_retry(ac, order, alloc_flags,
4225 compact_result, &compact_priority,
4226 &compaction_retries))
4227 goto retry;
4228
4229
4230
4231 if (check_retry_cpuset(cpuset_mems_cookie, ac))
4232 goto retry_cpuset;
4233
4234
4235 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4236 if (page)
4237 goto got_pg;
4238
4239
4240 if (tsk_is_oom_victim(current) &&
4241 (alloc_flags == ALLOC_OOM ||
4242 (gfp_mask & __GFP_NOMEMALLOC)))
4243 goto nopage;
4244
4245
4246 if (did_some_progress) {
4247 no_progress_loops = 0;
4248 goto retry;
4249 }
4250
4251nopage:
4252
4253 if (check_retry_cpuset(cpuset_mems_cookie, ac))
4254 goto retry_cpuset;
4255
4256
4257
4258
4259
4260 if (gfp_mask & __GFP_NOFAIL) {
4261
4262
4263
4264
4265 if (WARN_ON_ONCE(!can_direct_reclaim))
4266 goto fail;
4267
4268
4269
4270
4271
4272
4273 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4274
4275
4276
4277
4278
4279
4280
4281 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4282
4283
4284
4285
4286
4287
4288
4289 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4290 if (page)
4291 goto got_pg;
4292
4293 cond_resched();
4294 goto retry;
4295 }
4296fail:
4297 warn_alloc(gfp_mask, ac->nodemask,
4298 "page allocation failure: order:%u", order);
4299got_pg:
4300 return page;
4301}
4302
4303static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4304 int preferred_nid, nodemask_t *nodemask,
4305 struct alloc_context *ac, gfp_t *alloc_mask,
4306 unsigned int *alloc_flags)
4307{
4308 ac->high_zoneidx = gfp_zone(gfp_mask);
4309 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4310 ac->nodemask = nodemask;
4311 ac->migratetype = gfpflags_to_migratetype(gfp_mask);
4312
4313 if (cpusets_enabled()) {
4314 *alloc_mask |= __GFP_HARDWALL;
4315 if (!ac->nodemask)
4316 ac->nodemask = &cpuset_current_mems_allowed;
4317 else
4318 *alloc_flags |= ALLOC_CPUSET;
4319 }
4320
4321 fs_reclaim_acquire(gfp_mask);
4322 fs_reclaim_release(gfp_mask);
4323
4324 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4325
4326 if (should_fail_alloc_page(gfp_mask, order))
4327 return false;
4328
4329 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4330 *alloc_flags |= ALLOC_CMA;
4331
4332 return true;
4333}
4334
4335
4336static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
4337{
4338
4339 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4340
4341
4342
4343
4344
4345
4346 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4347 ac->high_zoneidx, ac->nodemask);
4348}
4349
4350
4351
4352
4353struct page *
4354__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4355 nodemask_t *nodemask)
4356{
4357 struct page *page;
4358 unsigned int alloc_flags = ALLOC_WMARK_LOW;
4359 gfp_t alloc_mask;
4360 struct alloc_context ac = { };
4361
4362 gfp_mask &= gfp_allowed_mask;
4363 alloc_mask = gfp_mask;
4364 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4365 return NULL;
4366
4367 finalise_ac(gfp_mask, &ac);
4368
4369
4370 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4371 if (likely(page))
4372 goto out;
4373
4374
4375
4376
4377
4378
4379
4380 alloc_mask = current_gfp_context(gfp_mask);
4381 ac.spread_dirty_pages = false;
4382
4383
4384
4385
4386
4387 if (unlikely(ac.nodemask != nodemask))
4388 ac.nodemask = nodemask;
4389
4390 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
4391
4392out:
4393 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4394 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4395 __free_pages(page, order);
4396 page = NULL;
4397 }
4398
4399 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4400
4401 return page;
4402}
4403EXPORT_SYMBOL(__alloc_pages_nodemask);
4404
4405
4406
4407
4408
4409
4410unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4411{
4412 struct page *page;
4413
4414 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4415 if (!page)
4416 return 0;
4417 return (unsigned long) page_address(page);
4418}
4419EXPORT_SYMBOL(__get_free_pages);
4420
4421unsigned long get_zeroed_page(gfp_t gfp_mask)
4422{
4423 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
4424}
4425EXPORT_SYMBOL(get_zeroed_page);
4426
4427void __free_pages(struct page *page, unsigned int order)
4428{
4429 if (put_page_testzero(page)) {
4430 if (order == 0)
4431 free_unref_page(page);
4432 else
4433 __free_pages_ok(page, order);
4434 }
4435}
4436
4437EXPORT_SYMBOL(__free_pages);
4438
4439void free_pages(unsigned long addr, unsigned int order)
4440{
4441 if (addr != 0) {
4442 VM_BUG_ON(!virt_addr_valid((void *)addr));
4443 __free_pages(virt_to_page((void *)addr), order);
4444 }
4445}
4446
4447EXPORT_SYMBOL(free_pages);
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4461 gfp_t gfp_mask)
4462{
4463 struct page *page = NULL;
4464 gfp_t gfp = gfp_mask;
4465
4466#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4467 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4468 __GFP_NOMEMALLOC;
4469 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4470 PAGE_FRAG_CACHE_MAX_ORDER);
4471 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4472#endif
4473 if (unlikely(!page))
4474 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4475
4476 nc->va = page ? page_address(page) : NULL;
4477
4478 return page;
4479}
4480
4481void __page_frag_cache_drain(struct page *page, unsigned int count)
4482{
4483 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4484
4485 if (page_ref_sub_and_test(page, count)) {
4486 unsigned int order = compound_order(page);
4487
4488 if (order == 0)
4489 free_unref_page(page);
4490 else
4491 __free_pages_ok(page, order);
4492 }
4493}
4494EXPORT_SYMBOL(__page_frag_cache_drain);
4495
4496void *page_frag_alloc(struct page_frag_cache *nc,
4497 unsigned int fragsz, gfp_t gfp_mask)
4498{
4499 unsigned int size = PAGE_SIZE;
4500 struct page *page;
4501 int offset;
4502
4503 if (unlikely(!nc->va)) {
4504refill:
4505 page = __page_frag_cache_refill(nc, gfp_mask);
4506 if (!page)
4507 return NULL;
4508
4509#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4510
4511 size = nc->size;
4512#endif
4513
4514
4515
4516 page_ref_add(page, size - 1);
4517
4518
4519 nc->pfmemalloc = page_is_pfmemalloc(page);
4520 nc->pagecnt_bias = size;
4521 nc->offset = size;
4522 }
4523
4524 offset = nc->offset - fragsz;
4525 if (unlikely(offset < 0)) {
4526 page = virt_to_page(nc->va);
4527
4528 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4529 goto refill;
4530
4531#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4532
4533 size = nc->size;
4534#endif
4535
4536 set_page_count(page, size);
4537
4538
4539 nc->pagecnt_bias = size;
4540 offset = size - fragsz;
4541 }
4542
4543 nc->pagecnt_bias--;
4544 nc->offset = offset;
4545
4546 return nc->va + offset;
4547}
4548EXPORT_SYMBOL(page_frag_alloc);
4549
4550
4551
4552
4553void page_frag_free(void *addr)
4554{
4555 struct page *page = virt_to_head_page(addr);
4556
4557 if (unlikely(put_page_testzero(page)))
4558 __free_pages_ok(page, compound_order(page));
4559}
4560EXPORT_SYMBOL(page_frag_free);
4561
4562static void *make_alloc_exact(unsigned long addr, unsigned int order,
4563 size_t size)
4564{
4565 if (addr) {
4566 unsigned long alloc_end = addr + (PAGE_SIZE << order);
4567 unsigned long used = addr + PAGE_ALIGN(size);
4568
4569 split_page(virt_to_page((void *)addr), order);
4570 while (used < alloc_end) {
4571 free_page(used);
4572 used += PAGE_SIZE;
4573 }
4574 }
4575 return (void *)addr;
4576}
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4592{
4593 unsigned int order = get_order(size);
4594 unsigned long addr;
4595
4596 addr = __get_free_pages(gfp_mask, order);
4597 return make_alloc_exact(addr, order, size);
4598}
4599EXPORT_SYMBOL(alloc_pages_exact);
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4612{
4613 unsigned int order = get_order(size);
4614 struct page *p = alloc_pages_node(nid, gfp_mask, order);
4615 if (!p)
4616 return NULL;
4617 return make_alloc_exact((unsigned long)page_address(p), order, size);
4618}
4619
4620
4621
4622
4623
4624
4625
4626
4627void free_pages_exact(void *virt, size_t size)
4628{
4629 unsigned long addr = (unsigned long)virt;
4630 unsigned long end = addr + PAGE_ALIGN(size);
4631
4632 while (addr < end) {
4633 free_page(addr);
4634 addr += PAGE_SIZE;
4635 }
4636}
4637EXPORT_SYMBOL(free_pages_exact);
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649static unsigned long nr_free_zone_pages(int offset)
4650{
4651 struct zoneref *z;
4652 struct zone *zone;
4653
4654
4655 unsigned long sum = 0;
4656
4657 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4658
4659 for_each_zone_zonelist(zone, z, zonelist, offset) {
4660 unsigned long size = zone->managed_pages;
4661 unsigned long high = high_wmark_pages(zone);
4662 if (size > high)
4663 sum += size - high;
4664 }
4665
4666 return sum;
4667}
4668
4669
4670
4671
4672
4673
4674
4675unsigned long nr_free_buffer_pages(void)
4676{
4677 return nr_free_zone_pages(gfp_zone(GFP_USER));
4678}
4679EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4680
4681
4682
4683
4684
4685
4686
4687unsigned long nr_free_pagecache_pages(void)
4688{
4689 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
4690}
4691
4692static inline void show_node(struct zone *zone)
4693{
4694 if (IS_ENABLED(CONFIG_NUMA))
4695 printk("Node %d ", zone_to_nid(zone));
4696}
4697
4698long si_mem_available(void)
4699{
4700 long available;
4701 unsigned long pagecache;
4702 unsigned long wmark_low = 0;
4703 unsigned long pages[NR_LRU_LISTS];
4704 struct zone *zone;
4705 int lru;
4706
4707 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4708 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4709
4710 for_each_zone(zone)
4711 wmark_low += zone->watermark[WMARK_LOW];
4712
4713
4714
4715
4716
4717 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
4718
4719
4720
4721
4722
4723
4724 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4725 pagecache -= min(pagecache / 2, wmark_low);
4726 available += pagecache;
4727
4728
4729
4730
4731
4732 available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
4733 min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
4734 wmark_low);
4735
4736
4737
4738
4739
4740 available += global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
4741 PAGE_SHIFT;
4742
4743 if (available < 0)
4744 available = 0;
4745 return available;
4746}
4747EXPORT_SYMBOL_GPL(si_mem_available);
4748
4749void si_meminfo(struct sysinfo *val)
4750{
4751 val->totalram = totalram_pages;
4752 val->sharedram = global_node_page_state(NR_SHMEM);
4753 val->freeram = global_zone_page_state(NR_FREE_PAGES);
4754 val->bufferram = nr_blockdev_pages();
4755 val->totalhigh = totalhigh_pages;
4756 val->freehigh = nr_free_highpages();
4757 val->mem_unit = PAGE_SIZE;
4758}
4759
4760EXPORT_SYMBOL(si_meminfo);
4761
4762#ifdef CONFIG_NUMA
4763void si_meminfo_node(struct sysinfo *val, int nid)
4764{
4765 int zone_type;
4766 unsigned long managed_pages = 0;
4767 unsigned long managed_highpages = 0;
4768 unsigned long free_highpages = 0;
4769 pg_data_t *pgdat = NODE_DATA(nid);
4770
4771 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4772 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4773 val->totalram = managed_pages;
4774 val->sharedram = node_page_state(pgdat, NR_SHMEM);
4775 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
4776#ifdef CONFIG_HIGHMEM
4777 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4778 struct zone *zone = &pgdat->node_zones[zone_type];
4779
4780 if (is_highmem(zone)) {
4781 managed_highpages += zone->managed_pages;
4782 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4783 }
4784 }
4785 val->totalhigh = managed_highpages;
4786 val->freehigh = free_highpages;
4787#else
4788 val->totalhigh = managed_highpages;
4789 val->freehigh = free_highpages;
4790#endif
4791 val->mem_unit = PAGE_SIZE;
4792}
4793#endif
4794
4795
4796
4797
4798
4799static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
4800{
4801 if (!(flags & SHOW_MEM_FILTER_NODES))
4802 return false;
4803
4804
4805
4806
4807
4808
4809 if (!nodemask)
4810 nodemask = &cpuset_current_mems_allowed;
4811
4812 return !node_isset(nid, *nodemask);
4813}
4814
4815#define K(x) ((x) << (PAGE_SHIFT-10))
4816
4817static void show_migration_types(unsigned char type)
4818{
4819 static const char types[MIGRATE_TYPES] = {
4820 [MIGRATE_UNMOVABLE] = 'U',
4821 [MIGRATE_MOVABLE] = 'M',
4822 [MIGRATE_RECLAIMABLE] = 'E',
4823 [MIGRATE_HIGHATOMIC] = 'H',
4824#ifdef CONFIG_CMA
4825 [MIGRATE_CMA] = 'C',
4826#endif
4827#ifdef CONFIG_MEMORY_ISOLATION
4828 [MIGRATE_ISOLATE] = 'I',
4829#endif
4830 };
4831 char tmp[MIGRATE_TYPES + 1];
4832 char *p = tmp;
4833 int i;
4834
4835 for (i = 0; i < MIGRATE_TYPES; i++) {
4836 if (type & (1 << i))
4837 *p++ = types[i];
4838 }
4839
4840 *p = '\0';
4841 printk(KERN_CONT "(%s) ", tmp);
4842}
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4854{
4855 unsigned long free_pcp = 0;
4856 int cpu;
4857 struct zone *zone;
4858 pg_data_t *pgdat;
4859
4860 for_each_populated_zone(zone) {
4861 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4862 continue;
4863
4864 for_each_online_cpu(cpu)
4865 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4866 }
4867
4868 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4869 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
4870 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4871 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4872 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
4873 " free:%lu free_pcp:%lu free_cma:%lu\n",
4874 global_node_page_state(NR_ACTIVE_ANON),
4875 global_node_page_state(NR_INACTIVE_ANON),
4876 global_node_page_state(NR_ISOLATED_ANON),
4877 global_node_page_state(NR_ACTIVE_FILE),
4878 global_node_page_state(NR_INACTIVE_FILE),
4879 global_node_page_state(NR_ISOLATED_FILE),
4880 global_node_page_state(NR_UNEVICTABLE),
4881 global_node_page_state(NR_FILE_DIRTY),
4882 global_node_page_state(NR_WRITEBACK),
4883 global_node_page_state(NR_UNSTABLE_NFS),
4884 global_node_page_state(NR_SLAB_RECLAIMABLE),
4885 global_node_page_state(NR_SLAB_UNRECLAIMABLE),
4886 global_node_page_state(NR_FILE_MAPPED),
4887 global_node_page_state(NR_SHMEM),
4888 global_zone_page_state(NR_PAGETABLE),
4889 global_zone_page_state(NR_BOUNCE),
4890 global_zone_page_state(NR_FREE_PAGES),
4891 free_pcp,
4892 global_zone_page_state(NR_FREE_CMA_PAGES));
4893
4894 for_each_online_pgdat(pgdat) {
4895 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
4896 continue;
4897
4898 printk("Node %d"
4899 " active_anon:%lukB"
4900 " inactive_anon:%lukB"
4901 " active_file:%lukB"
4902 " inactive_file:%lukB"
4903 " unevictable:%lukB"
4904 " isolated(anon):%lukB"
4905 " isolated(file):%lukB"
4906 " mapped:%lukB"
4907 " dirty:%lukB"
4908 " writeback:%lukB"
4909 " shmem:%lukB"
4910#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4911 " shmem_thp: %lukB"
4912 " shmem_pmdmapped: %lukB"
4913 " anon_thp: %lukB"
4914#endif
4915 " writeback_tmp:%lukB"
4916 " unstable:%lukB"
4917 " all_unreclaimable? %s"
4918 "\n",
4919 pgdat->node_id,
4920 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4921 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4922 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4923 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4924 K(node_page_state(pgdat, NR_UNEVICTABLE)),
4925 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4926 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
4927 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4928 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4929 K(node_page_state(pgdat, NR_WRITEBACK)),
4930 K(node_page_state(pgdat, NR_SHMEM)),
4931#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4932 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4933 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4934 * HPAGE_PMD_NR),
4935 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4936#endif
4937 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4938 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4939 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4940 "yes" : "no");
4941 }
4942
4943 for_each_populated_zone(zone) {
4944 int i;
4945
4946 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4947 continue;
4948
4949 free_pcp = 0;
4950 for_each_online_cpu(cpu)
4951 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4952
4953 show_node(zone);
4954 printk(KERN_CONT
4955 "%s"
4956 " free:%lukB"
4957 " min:%lukB"
4958 " low:%lukB"
4959 " high:%lukB"
4960 " active_anon:%lukB"
4961 " inactive_anon:%lukB"
4962 " active_file:%lukB"
4963 " inactive_file:%lukB"
4964 " unevictable:%lukB"
4965 " writepending:%lukB"
4966 " present:%lukB"
4967 " managed:%lukB"
4968 " mlocked:%lukB"
4969 " kernel_stack:%lukB"
4970 " pagetables:%lukB"
4971 " bounce:%lukB"
4972 " free_pcp:%lukB"
4973 " local_pcp:%ukB"
4974 " free_cma:%lukB"
4975 "\n",
4976 zone->name,
4977 K(zone_page_state(zone, NR_FREE_PAGES)),
4978 K(min_wmark_pages(zone)),
4979 K(low_wmark_pages(zone)),
4980 K(high_wmark_pages(zone)),
4981 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4982 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4983 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4984 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4985 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
4986 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
4987 K(zone->present_pages),
4988 K(zone->managed_pages),
4989 K(zone_page_state(zone, NR_MLOCK)),
4990 zone_page_state(zone, NR_KERNEL_STACK_KB),
4991 K(zone_page_state(zone, NR_PAGETABLE)),
4992 K(zone_page_state(zone, NR_BOUNCE)),
4993 K(free_pcp),
4994 K(this_cpu_read(zone->pageset->pcp.count)),
4995 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
4996 printk("lowmem_reserve[]:");
4997 for (i = 0; i < MAX_NR_ZONES; i++)
4998 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4999 printk(KERN_CONT "\n");
5000 }
5001
5002 for_each_populated_zone(zone) {
5003 unsigned int order;
5004 unsigned long nr[MAX_ORDER], flags, total = 0;
5005 unsigned char types[MAX_ORDER];
5006
5007 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5008 continue;
5009 show_node(zone);
5010 printk(KERN_CONT "%s: ", zone->name);
5011
5012 spin_lock_irqsave(&zone->lock, flags);
5013 for (order = 0; order < MAX_ORDER; order++) {
5014 struct free_area *area = &zone->free_area[order];
5015 int type;
5016
5017 nr[order] = area->nr_free;
5018 total += nr[order] << order;
5019
5020 types[order] = 0;
5021 for (type = 0; type < MIGRATE_TYPES; type++) {
5022 if (!list_empty(&area->free_list[type]))
5023 types[order] |= 1 << type;
5024 }
5025 }
5026 spin_unlock_irqrestore(&zone->lock, flags);
5027 for (order = 0; order < MAX_ORDER; order++) {
5028 printk(KERN_CONT "%lu*%lukB ",
5029 nr[order], K(1UL) << order);
5030 if (nr[order])
5031 show_migration_types(types[order]);
5032 }
5033 printk(KERN_CONT "= %lukB\n", K(total));
5034 }
5035
5036 hugetlb_show_meminfo();
5037
5038 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
5039
5040 show_swap_cache_info();
5041}
5042
5043static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5044{
5045 zoneref->zone = zone;
5046 zoneref->zone_idx = zone_idx(zone);
5047}
5048
5049
5050
5051
5052
5053
5054static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5055{
5056 struct zone *zone;
5057 enum zone_type zone_type = MAX_NR_ZONES;
5058 int nr_zones = 0;
5059
5060 do {
5061 zone_type--;
5062 zone = pgdat->node_zones + zone_type;
5063 if (managed_zone(zone)) {
5064 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5065 check_highest_zone(zone_type);
5066 }
5067 } while (zone_type);
5068
5069 return nr_zones;
5070}
5071
5072#ifdef CONFIG_NUMA
5073
5074static int __parse_numa_zonelist_order(char *s)
5075{
5076
5077
5078
5079
5080
5081
5082 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5083 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
5084 return -EINVAL;
5085 }
5086 return 0;
5087}
5088
5089static __init int setup_numa_zonelist_order(char *s)
5090{
5091 if (!s)
5092 return 0;
5093
5094 return __parse_numa_zonelist_order(s);
5095}
5096early_param("numa_zonelist_order", setup_numa_zonelist_order);
5097
5098char numa_zonelist_order[] = "Node";
5099
5100
5101
5102
5103int numa_zonelist_order_handler(struct ctl_table *table, int write,
5104 void __user *buffer, size_t *length,
5105 loff_t *ppos)
5106{
5107 char *str;
5108 int ret;
5109
5110 if (!write)
5111 return proc_dostring(table, write, buffer, length, ppos);
5112 str = memdup_user_nul(buffer, 16);
5113 if (IS_ERR(str))
5114 return PTR_ERR(str);
5115
5116 ret = __parse_numa_zonelist_order(str);
5117 kfree(str);
5118 return ret;
5119}
5120
5121
5122#define MAX_NODE_LOAD (nr_online_nodes)
5123static int node_load[MAX_NUMNODES];
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139static int find_next_best_node(int node, nodemask_t *used_node_mask)
5140{
5141 int n, val;
5142 int min_val = INT_MAX;
5143 int best_node = NUMA_NO_NODE;
5144 const struct cpumask *tmp = cpumask_of_node(0);
5145
5146
5147 if (!node_isset(node, *used_node_mask)) {
5148 node_set(node, *used_node_mask);
5149 return node;
5150 }
5151
5152 for_each_node_state(n, N_MEMORY) {
5153
5154
5155 if (node_isset(n, *used_node_mask))
5156 continue;
5157
5158
5159 val = node_distance(node, n);
5160
5161
5162 val += (n < node);
5163
5164
5165 tmp = cpumask_of_node(n);
5166 if (!cpumask_empty(tmp))
5167 val += PENALTY_FOR_NODE_WITH_CPUS;
5168
5169
5170 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5171 val += node_load[n];
5172
5173 if (val < min_val) {
5174 min_val = val;
5175 best_node = n;
5176 }
5177 }
5178
5179 if (best_node >= 0)
5180 node_set(best_node, *used_node_mask);
5181
5182 return best_node;
5183}
5184
5185
5186
5187
5188
5189
5190
5191static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5192 unsigned nr_nodes)
5193{
5194 struct zoneref *zonerefs;
5195 int i;
5196
5197 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5198
5199 for (i = 0; i < nr_nodes; i++) {
5200 int nr_zones;
5201
5202 pg_data_t *node = NODE_DATA(node_order[i]);
5203
5204 nr_zones = build_zonerefs_node(node, zonerefs);
5205 zonerefs += nr_zones;
5206 }
5207 zonerefs->zone = NULL;
5208 zonerefs->zone_idx = 0;
5209}
5210
5211
5212
5213
5214static void build_thisnode_zonelists(pg_data_t *pgdat)
5215{
5216 struct zoneref *zonerefs;
5217 int nr_zones;
5218
5219 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5220 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5221 zonerefs += nr_zones;
5222 zonerefs->zone = NULL;
5223 zonerefs->zone_idx = 0;
5224}
5225
5226
5227
5228
5229
5230
5231
5232
5233static void build_zonelists(pg_data_t *pgdat)
5234{
5235 static int node_order[MAX_NUMNODES];
5236 int node, load, nr_nodes = 0;
5237 nodemask_t used_mask;
5238 int local_node, prev_node;
5239
5240
5241 local_node = pgdat->node_id;
5242 load = nr_online_nodes;
5243 prev_node = local_node;
5244 nodes_clear(used_mask);
5245
5246 memset(node_order, 0, sizeof(node_order));
5247 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5248
5249
5250
5251
5252
5253 if (node_distance(local_node, node) !=
5254 node_distance(local_node, prev_node))
5255 node_load[node] = load;
5256
5257 node_order[nr_nodes++] = node;
5258 prev_node = node;
5259 load--;
5260 }
5261
5262 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5263 build_thisnode_zonelists(pgdat);
5264}
5265
5266#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5267
5268
5269
5270
5271
5272
5273int local_memory_node(int node)
5274{
5275 struct zoneref *z;
5276
5277 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5278 gfp_zone(GFP_KERNEL),
5279 NULL);
5280 return zone_to_nid(z->zone);
5281}
5282#endif
5283
5284static void setup_min_unmapped_ratio(void);
5285static void setup_min_slab_ratio(void);
5286#else
5287
5288static void build_zonelists(pg_data_t *pgdat)
5289{
5290 int node, local_node;
5291 struct zoneref *zonerefs;
5292 int nr_zones;
5293
5294 local_node = pgdat->node_id;
5295
5296 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5297 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5298 zonerefs += nr_zones;
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5309 if (!node_online(node))
5310 continue;
5311 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5312 zonerefs += nr_zones;
5313 }
5314 for (node = 0; node < local_node; node++) {
5315 if (!node_online(node))
5316 continue;
5317 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5318 zonerefs += nr_zones;
5319 }
5320
5321 zonerefs->zone = NULL;
5322 zonerefs->zone_idx = 0;
5323}
5324
5325#endif
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5343static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5344static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5345
5346static void __build_all_zonelists(void *data)
5347{
5348 int nid;
5349 int __maybe_unused cpu;
5350 pg_data_t *self = data;
5351 static DEFINE_SPINLOCK(lock);
5352
5353 spin_lock(&lock);
5354
5355#ifdef CONFIG_NUMA
5356 memset(node_load, 0, sizeof(node_load));
5357#endif
5358
5359
5360
5361
5362
5363 if (self && !node_online(self->node_id)) {
5364 build_zonelists(self);
5365 } else {
5366 for_each_online_node(nid) {
5367 pg_data_t *pgdat = NODE_DATA(nid);
5368
5369 build_zonelists(pgdat);
5370 }
5371
5372#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5373
5374
5375
5376
5377
5378
5379
5380
5381 for_each_online_cpu(cpu)
5382 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5383#endif
5384 }
5385
5386 spin_unlock(&lock);
5387}
5388
5389static noinline void __init
5390build_all_zonelists_init(void)
5391{
5392 int cpu;
5393
5394 __build_all_zonelists(NULL);
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409 for_each_possible_cpu(cpu)
5410 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5411
5412 mminit_verify_zonelist();
5413 cpuset_init_current_mems_allowed();
5414}
5415
5416
5417
5418
5419
5420
5421
5422void __ref build_all_zonelists(pg_data_t *pgdat)
5423{
5424 if (system_state == SYSTEM_BOOTING) {
5425 build_all_zonelists_init();
5426 } else {
5427 __build_all_zonelists(pgdat);
5428
5429 }
5430 vm_total_pages = nr_free_pagecache_pages();
5431
5432
5433
5434
5435
5436
5437
5438 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5439 page_group_by_mobility_disabled = 1;
5440 else
5441 page_group_by_mobility_disabled = 0;
5442
5443 pr_info("Built %i zonelists, mobility grouping %s. Total pages: %ld\n",
5444 nr_online_nodes,
5445 page_group_by_mobility_disabled ? "off" : "on",
5446 vm_total_pages);
5447#ifdef CONFIG_NUMA
5448 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5449#endif
5450}
5451
5452
5453
5454
5455
5456
5457void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5458 unsigned long start_pfn, enum memmap_context context,
5459 struct vmem_altmap *altmap)
5460{
5461 unsigned long end_pfn = start_pfn + size;
5462 pg_data_t *pgdat = NODE_DATA(nid);
5463 unsigned long pfn;
5464 unsigned long nr_initialised = 0;
5465 struct page *page;
5466#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5467 struct memblock_region *r = NULL, *tmp;
5468#endif
5469
5470 if (highest_memmap_pfn < end_pfn - 1)
5471 highest_memmap_pfn = end_pfn - 1;
5472
5473
5474
5475
5476
5477 if (altmap && start_pfn == altmap->base_pfn)
5478 start_pfn += altmap->reserve;
5479
5480 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5481
5482
5483
5484
5485 if (context != MEMMAP_EARLY)
5486 goto not_early;
5487
5488 if (!early_pfn_valid(pfn))
5489 continue;
5490 if (!early_pfn_in_nid(pfn, nid))
5491 continue;
5492 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5493 break;
5494
5495#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5496
5497
5498
5499
5500
5501 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5502 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5503 for_each_memblock(memory, tmp)
5504 if (pfn < memblock_region_memory_end_pfn(tmp))
5505 break;
5506 r = tmp;
5507 }
5508 if (pfn >= memblock_region_memory_base_pfn(r) &&
5509 memblock_is_mirror(r)) {
5510
5511 pfn = memblock_region_memory_end_pfn(r);
5512 continue;
5513 }
5514 }
5515#endif
5516
5517not_early:
5518 page = pfn_to_page(pfn);
5519 __init_single_page(page, pfn, zone, nid);
5520 if (context == MEMMAP_HOTPLUG)
5521 SetPageReserved(page);
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538 if (!(pfn & (pageblock_nr_pages - 1))) {
5539 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5540 cond_resched();
5541 }
5542 }
5543}
5544
5545static void __meminit zone_init_free_lists(struct zone *zone)
5546{
5547 unsigned int order, t;
5548 for_each_migratetype_order(order, t) {
5549 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
5550 zone->free_area[order].nr_free = 0;
5551 }
5552}
5553
5554#ifndef __HAVE_ARCH_MEMMAP_INIT
5555#define memmap_init(size, nid, zone, start_pfn) \
5556 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL)
5557#endif
5558
5559static int zone_batchsize(struct zone *zone)
5560{
5561#ifdef CONFIG_MMU
5562 int batch;
5563
5564
5565
5566
5567
5568 batch = zone->managed_pages / 1024;
5569
5570 if (batch * PAGE_SIZE > 1024 * 1024)
5571 batch = (1024 * 1024) / PAGE_SIZE;
5572 batch /= 4;
5573 if (batch < 1)
5574 batch = 1;
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586 batch = rounddown_pow_of_two(batch + batch/2) - 1;
5587
5588 return batch;
5589
5590#else
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604 return 0;
5605#endif
5606}
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5622 unsigned long batch)
5623{
5624
5625 pcp->batch = 1;
5626 smp_wmb();
5627
5628
5629 pcp->high = high;
5630 smp_wmb();
5631
5632 pcp->batch = batch;
5633}
5634
5635
5636static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5637{
5638 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
5639}
5640
5641static void pageset_init(struct per_cpu_pageset *p)
5642{
5643 struct per_cpu_pages *pcp;
5644 int migratetype;
5645
5646 memset(p, 0, sizeof(*p));
5647
5648 pcp = &p->pcp;
5649 pcp->count = 0;
5650 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5651 INIT_LIST_HEAD(&pcp->lists[migratetype]);
5652}
5653
5654static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5655{
5656 pageset_init(p);
5657 pageset_set_batch(p, batch);
5658}
5659
5660
5661
5662
5663
5664static void pageset_set_high(struct per_cpu_pageset *p,
5665 unsigned long high)
5666{
5667 unsigned long batch = max(1UL, high / 4);
5668 if ((high / 4) > (PAGE_SHIFT * 8))
5669 batch = PAGE_SHIFT * 8;
5670
5671 pageset_update(&p->pcp, high, batch);
5672}
5673
5674static void pageset_set_high_and_batch(struct zone *zone,
5675 struct per_cpu_pageset *pcp)
5676{
5677 if (percpu_pagelist_fraction)
5678 pageset_set_high(pcp,
5679 (zone->managed_pages /
5680 percpu_pagelist_fraction));
5681 else
5682 pageset_set_batch(pcp, zone_batchsize(zone));
5683}
5684
5685static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5686{
5687 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5688
5689 pageset_init(pcp);
5690 pageset_set_high_and_batch(zone, pcp);
5691}
5692
5693void __meminit setup_zone_pageset(struct zone *zone)
5694{
5695 int cpu;
5696 zone->pageset = alloc_percpu(struct per_cpu_pageset);
5697 for_each_possible_cpu(cpu)
5698 zone_pageset_init(zone, cpu);
5699}
5700
5701
5702
5703
5704
5705void __init setup_per_cpu_pageset(void)
5706{
5707 struct pglist_data *pgdat;
5708 struct zone *zone;
5709
5710 for_each_populated_zone(zone)
5711 setup_zone_pageset(zone);
5712
5713 for_each_online_pgdat(pgdat)
5714 pgdat->per_cpu_nodestats =
5715 alloc_percpu(struct per_cpu_nodestat);
5716}
5717
5718static __meminit void zone_pcp_init(struct zone *zone)
5719{
5720
5721
5722
5723
5724
5725 zone->pageset = &boot_pageset;
5726
5727 if (populated_zone(zone))
5728 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
5729 zone->name, zone->present_pages,
5730 zone_batchsize(zone));
5731}
5732
5733void __meminit init_currently_empty_zone(struct zone *zone,
5734 unsigned long zone_start_pfn,
5735 unsigned long size)
5736{
5737 struct pglist_data *pgdat = zone->zone_pgdat;
5738
5739 pgdat->nr_zones = zone_idx(zone) + 1;
5740
5741 zone->zone_start_pfn = zone_start_pfn;
5742
5743 mminit_dprintk(MMINIT_TRACE, "memmap_init",
5744 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5745 pgdat->node_id,
5746 (unsigned long)zone_idx(zone),
5747 zone_start_pfn, (zone_start_pfn + size));
5748
5749 zone_init_free_lists(zone);
5750 zone->initialized = 1;
5751}
5752
5753#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5754#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
5755
5756
5757
5758
5759int __meminit __early_pfn_to_nid(unsigned long pfn,
5760 struct mminit_pfnnid_cache *state)
5761{
5762 unsigned long start_pfn, end_pfn;
5763 int nid;
5764
5765 if (state->last_start <= pfn && pfn < state->last_end)
5766 return state->last_nid;
5767
5768 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5769 if (nid != -1) {
5770 state->last_start = start_pfn;
5771 state->last_end = end_pfn;
5772 state->last_nid = nid;
5773 }
5774
5775 return nid;
5776}
5777#endif
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
5789{
5790 unsigned long start_pfn, end_pfn;
5791 int i, this_nid;
5792
5793 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5794 start_pfn = min(start_pfn, max_low_pfn);
5795 end_pfn = min(end_pfn, max_low_pfn);
5796
5797 if (start_pfn < end_pfn)
5798 memblock_free_early_nid(PFN_PHYS(start_pfn),
5799 (end_pfn - start_pfn) << PAGE_SHIFT,
5800 this_nid);
5801 }
5802}
5803
5804
5805
5806
5807
5808
5809
5810
5811void __init sparse_memory_present_with_active_regions(int nid)
5812{
5813 unsigned long start_pfn, end_pfn;
5814 int i, this_nid;
5815
5816 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5817 memory_present(this_nid, start_pfn, end_pfn);
5818}
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831void __meminit get_pfn_range_for_nid(unsigned int nid,
5832 unsigned long *start_pfn, unsigned long *end_pfn)
5833{
5834 unsigned long this_start_pfn, this_end_pfn;
5835 int i;
5836
5837 *start_pfn = -1UL;
5838 *end_pfn = 0;
5839
5840 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5841 *start_pfn = min(*start_pfn, this_start_pfn);
5842 *end_pfn = max(*end_pfn, this_end_pfn);
5843 }
5844
5845 if (*start_pfn == -1UL)
5846 *start_pfn = 0;
5847}
5848
5849
5850
5851
5852
5853
5854static void __init find_usable_zone_for_movable(void)
5855{
5856 int zone_index;
5857 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5858 if (zone_index == ZONE_MOVABLE)
5859 continue;
5860
5861 if (arch_zone_highest_possible_pfn[zone_index] >
5862 arch_zone_lowest_possible_pfn[zone_index])
5863 break;
5864 }
5865
5866 VM_BUG_ON(zone_index == -1);
5867 movable_zone = zone_index;
5868}
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880static void __meminit adjust_zone_range_for_zone_movable(int nid,
5881 unsigned long zone_type,
5882 unsigned long node_start_pfn,
5883 unsigned long node_end_pfn,
5884 unsigned long *zone_start_pfn,
5885 unsigned long *zone_end_pfn)
5886{
5887
5888 if (zone_movable_pfn[nid]) {
5889
5890 if (zone_type == ZONE_MOVABLE) {
5891 *zone_start_pfn = zone_movable_pfn[nid];
5892 *zone_end_pfn = min(node_end_pfn,
5893 arch_zone_highest_possible_pfn[movable_zone]);
5894
5895
5896 } else if (!mirrored_kernelcore &&
5897 *zone_start_pfn < zone_movable_pfn[nid] &&
5898 *zone_end_pfn > zone_movable_pfn[nid]) {
5899 *zone_end_pfn = zone_movable_pfn[nid];
5900
5901
5902 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5903 *zone_start_pfn = *zone_end_pfn;
5904 }
5905}
5906
5907
5908
5909
5910
5911static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5912 unsigned long zone_type,
5913 unsigned long node_start_pfn,
5914 unsigned long node_end_pfn,
5915 unsigned long *zone_start_pfn,
5916 unsigned long *zone_end_pfn,
5917 unsigned long *ignored)
5918{
5919
5920 if (!node_start_pfn && !node_end_pfn)
5921 return 0;
5922
5923
5924 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5925 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
5926 adjust_zone_range_for_zone_movable(nid, zone_type,
5927 node_start_pfn, node_end_pfn,
5928 zone_start_pfn, zone_end_pfn);
5929
5930
5931 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
5932 return 0;
5933
5934
5935 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5936 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
5937
5938
5939 return *zone_end_pfn - *zone_start_pfn;
5940}
5941
5942
5943
5944
5945
5946unsigned long __meminit __absent_pages_in_range(int nid,
5947 unsigned long range_start_pfn,
5948 unsigned long range_end_pfn)
5949{
5950 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5951 unsigned long start_pfn, end_pfn;
5952 int i;
5953
5954 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5955 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5956 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5957 nr_absent -= end_pfn - start_pfn;
5958 }
5959 return nr_absent;
5960}
5961
5962
5963
5964
5965
5966
5967
5968
5969unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5970 unsigned long end_pfn)
5971{
5972 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5973}
5974
5975
5976static unsigned long __meminit zone_absent_pages_in_node(int nid,
5977 unsigned long zone_type,
5978 unsigned long node_start_pfn,
5979 unsigned long node_end_pfn,
5980 unsigned long *ignored)
5981{
5982 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5983 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5984 unsigned long zone_start_pfn, zone_end_pfn;
5985 unsigned long nr_absent;
5986
5987
5988 if (!node_start_pfn && !node_end_pfn)
5989 return 0;
5990
5991 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5992 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5993
5994 adjust_zone_range_for_zone_movable(nid, zone_type,
5995 node_start_pfn, node_end_pfn,
5996 &zone_start_pfn, &zone_end_pfn);
5997 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5998
5999
6000
6001
6002
6003
6004 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6005 unsigned long start_pfn, end_pfn;
6006 struct memblock_region *r;
6007
6008 for_each_memblock(memory, r) {
6009 start_pfn = clamp(memblock_region_memory_base_pfn(r),
6010 zone_start_pfn, zone_end_pfn);
6011 end_pfn = clamp(memblock_region_memory_end_pfn(r),
6012 zone_start_pfn, zone_end_pfn);
6013
6014 if (zone_type == ZONE_MOVABLE &&
6015 memblock_is_mirror(r))
6016 nr_absent += end_pfn - start_pfn;
6017
6018 if (zone_type == ZONE_NORMAL &&
6019 !memblock_is_mirror(r))
6020 nr_absent += end_pfn - start_pfn;
6021 }
6022 }
6023
6024 return nr_absent;
6025}
6026
6027#else
6028static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
6029 unsigned long zone_type,
6030 unsigned long node_start_pfn,
6031 unsigned long node_end_pfn,
6032 unsigned long *zone_start_pfn,
6033 unsigned long *zone_end_pfn,
6034 unsigned long *zones_size)
6035{
6036 unsigned int zone;
6037
6038 *zone_start_pfn = node_start_pfn;
6039 for (zone = 0; zone < zone_type; zone++)
6040 *zone_start_pfn += zones_size[zone];
6041
6042 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
6043
6044 return zones_size[zone_type];
6045}
6046
6047static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
6048 unsigned long zone_type,
6049 unsigned long node_start_pfn,
6050 unsigned long node_end_pfn,
6051 unsigned long *zholes_size)
6052{
6053 if (!zholes_size)
6054 return 0;
6055
6056 return zholes_size[zone_type];
6057}
6058
6059#endif
6060
6061static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
6062 unsigned long node_start_pfn,
6063 unsigned long node_end_pfn,
6064 unsigned long *zones_size,
6065 unsigned long *zholes_size)
6066{
6067 unsigned long realtotalpages = 0, totalpages = 0;
6068 enum zone_type i;
6069
6070 for (i = 0; i < MAX_NR_ZONES; i++) {
6071 struct zone *zone = pgdat->node_zones + i;
6072 unsigned long zone_start_pfn, zone_end_pfn;
6073 unsigned long size, real_size;
6074
6075 size = zone_spanned_pages_in_node(pgdat->node_id, i,
6076 node_start_pfn,
6077 node_end_pfn,
6078 &zone_start_pfn,
6079 &zone_end_pfn,
6080 zones_size);
6081 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
6082 node_start_pfn, node_end_pfn,
6083 zholes_size);
6084 if (size)
6085 zone->zone_start_pfn = zone_start_pfn;
6086 else
6087 zone->zone_start_pfn = 0;
6088 zone->spanned_pages = size;
6089 zone->present_pages = real_size;
6090
6091 totalpages += size;
6092 realtotalpages += real_size;
6093 }
6094
6095 pgdat->node_spanned_pages = totalpages;
6096 pgdat->node_present_pages = realtotalpages;
6097 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6098 realtotalpages);
6099}
6100
6101#ifndef CONFIG_SPARSEMEM
6102
6103
6104
6105
6106
6107
6108
6109static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6110{
6111 unsigned long usemapsize;
6112
6113 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6114 usemapsize = roundup(zonesize, pageblock_nr_pages);
6115 usemapsize = usemapsize >> pageblock_order;
6116 usemapsize *= NR_PAGEBLOCK_BITS;
6117 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6118
6119 return usemapsize / 8;
6120}
6121
6122static void __ref setup_usemap(struct pglist_data *pgdat,
6123 struct zone *zone,
6124 unsigned long zone_start_pfn,
6125 unsigned long zonesize)
6126{
6127 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6128 zone->pageblock_flags = NULL;
6129 if (usemapsize)
6130 zone->pageblock_flags =
6131 memblock_virt_alloc_node_nopanic(usemapsize,
6132 pgdat->node_id);
6133}
6134#else
6135static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6136 unsigned long zone_start_pfn, unsigned long zonesize) {}
6137#endif
6138
6139#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6140
6141
6142void __init set_pageblock_order(void)
6143{
6144 unsigned int order;
6145
6146
6147 if (pageblock_order)
6148 return;
6149
6150 if (HPAGE_SHIFT > PAGE_SHIFT)
6151 order = HUGETLB_PAGE_ORDER;
6152 else
6153 order = MAX_ORDER - 1;
6154
6155
6156
6157
6158
6159
6160 pageblock_order = order;
6161}
6162#else
6163
6164
6165
6166
6167
6168
6169
6170void __init set_pageblock_order(void)
6171{
6172}
6173
6174#endif
6175
6176static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
6177 unsigned long present_pages)
6178{
6179 unsigned long pages = spanned_pages;
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189 if (spanned_pages > present_pages + (present_pages >> 4) &&
6190 IS_ENABLED(CONFIG_SPARSEMEM))
6191 pages = present_pages;
6192
6193 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6194}
6195
6196#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6197static void pgdat_init_split_queue(struct pglist_data *pgdat)
6198{
6199 spin_lock_init(&pgdat->split_queue_lock);
6200 INIT_LIST_HEAD(&pgdat->split_queue);
6201 pgdat->split_queue_len = 0;
6202}
6203#else
6204static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
6205#endif
6206
6207#ifdef CONFIG_COMPACTION
6208static void pgdat_init_kcompactd(struct pglist_data *pgdat)
6209{
6210 init_waitqueue_head(&pgdat->kcompactd_wait);
6211}
6212#else
6213static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
6214#endif
6215
6216static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
6217{
6218 pgdat_resize_init(pgdat);
6219
6220 pgdat_init_split_queue(pgdat);
6221 pgdat_init_kcompactd(pgdat);
6222
6223 init_waitqueue_head(&pgdat->kswapd_wait);
6224 init_waitqueue_head(&pgdat->pfmemalloc_wait);
6225
6226 pgdat_page_ext_init(pgdat);
6227 spin_lock_init(&pgdat->lru_lock);
6228 lruvec_init(node_lruvec(pgdat));
6229}
6230
6231static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
6232 unsigned long remaining_pages)
6233{
6234 zone->managed_pages = remaining_pages;
6235 zone_set_nid(zone, nid);
6236 zone->name = zone_names[idx];
6237 zone->zone_pgdat = NODE_DATA(nid);
6238 spin_lock_init(&zone->lock);
6239 zone_seqlock_init(zone);
6240 zone_pcp_init(zone);
6241}
6242
6243
6244
6245
6246
6247
6248
6249
6250#ifdef CONFIG_MEMORY_HOTPLUG
6251void __ref free_area_init_core_hotplug(int nid)
6252{
6253 enum zone_type z;
6254 pg_data_t *pgdat = NODE_DATA(nid);
6255
6256 pgdat_init_internals(pgdat);
6257 for (z = 0; z < MAX_NR_ZONES; z++)
6258 zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
6259}
6260#endif
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271static void __init free_area_init_core(struct pglist_data *pgdat)
6272{
6273 enum zone_type j;
6274 int nid = pgdat->node_id;
6275
6276 pgdat_init_internals(pgdat);
6277 pgdat->per_cpu_nodestats = &boot_nodestats;
6278
6279 for (j = 0; j < MAX_NR_ZONES; j++) {
6280 struct zone *zone = pgdat->node_zones + j;
6281 unsigned long size, freesize, memmap_pages;
6282 unsigned long zone_start_pfn = zone->zone_start_pfn;
6283
6284 size = zone->spanned_pages;
6285 freesize = zone->present_pages;
6286
6287
6288
6289
6290
6291
6292 memmap_pages = calc_memmap_size(size, freesize);
6293 if (!is_highmem_idx(j)) {
6294 if (freesize >= memmap_pages) {
6295 freesize -= memmap_pages;
6296 if (memmap_pages)
6297 printk(KERN_DEBUG
6298 " %s zone: %lu pages used for memmap\n",
6299 zone_names[j], memmap_pages);
6300 } else
6301 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
6302 zone_names[j], memmap_pages, freesize);
6303 }
6304
6305
6306 if (j == 0 && freesize > dma_reserve) {
6307 freesize -= dma_reserve;
6308 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6309 zone_names[0], dma_reserve);
6310 }
6311
6312 if (!is_highmem_idx(j))
6313 nr_kernel_pages += freesize;
6314
6315 else if (nr_kernel_pages > memmap_pages * 2)
6316 nr_kernel_pages -= memmap_pages;
6317 nr_all_pages += freesize;
6318
6319
6320
6321
6322
6323
6324 zone_init_internals(zone, j, nid, freesize);
6325
6326 if (!size)
6327 continue;
6328
6329 set_pageblock_order();
6330 setup_usemap(pgdat, zone, zone_start_pfn, size);
6331 init_currently_empty_zone(zone, zone_start_pfn, size);
6332 memmap_init(size, nid, j, zone_start_pfn);
6333 }
6334}
6335
6336#ifdef CONFIG_FLAT_NODE_MEM_MAP
6337static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6338{
6339 unsigned long __maybe_unused start = 0;
6340 unsigned long __maybe_unused offset = 0;
6341
6342
6343 if (!pgdat->node_spanned_pages)
6344 return;
6345
6346 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6347 offset = pgdat->node_start_pfn - start;
6348
6349 if (!pgdat->node_mem_map) {
6350 unsigned long size, end;
6351 struct page *map;
6352
6353
6354
6355
6356
6357
6358 end = pgdat_end_pfn(pgdat);
6359 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6360 size = (end - start) * sizeof(struct page);
6361 map = memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
6362 pgdat->node_mem_map = map + offset;
6363 }
6364 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
6365 __func__, pgdat->node_id, (unsigned long)pgdat,
6366 (unsigned long)pgdat->node_mem_map);
6367#ifndef CONFIG_NEED_MULTIPLE_NODES
6368
6369
6370
6371 if (pgdat == NODE_DATA(0)) {
6372 mem_map = NODE_DATA(0)->node_mem_map;
6373#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
6374 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
6375 mem_map -= offset;
6376#endif
6377 }
6378#endif
6379}
6380#else
6381static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
6382#endif
6383
6384#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
6385static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
6386{
6387
6388
6389
6390
6391 pgdat->static_init_pgcnt = min_t(unsigned long, PAGES_PER_SECTION,
6392 pgdat->node_spanned_pages);
6393 pgdat->first_deferred_pfn = ULONG_MAX;
6394}
6395#else
6396static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
6397#endif
6398
6399void __init free_area_init_node(int nid, unsigned long *zones_size,
6400 unsigned long node_start_pfn,
6401 unsigned long *zholes_size)
6402{
6403 pg_data_t *pgdat = NODE_DATA(nid);
6404 unsigned long start_pfn = 0;
6405 unsigned long end_pfn = 0;
6406
6407
6408 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
6409
6410 pgdat->node_id = nid;
6411 pgdat->node_start_pfn = node_start_pfn;
6412 pgdat->per_cpu_nodestats = NULL;
6413#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6414 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
6415 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
6416 (u64)start_pfn << PAGE_SHIFT,
6417 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
6418#else
6419 start_pfn = node_start_pfn;
6420#endif
6421 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6422 zones_size, zholes_size);
6423
6424 alloc_node_mem_map(pgdat);
6425 pgdat_set_deferred_range(pgdat);
6426
6427 free_area_init_core(pgdat);
6428}
6429
6430#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
6431
6432
6433
6434
6435
6436
6437
6438void __init zero_resv_unavail(void)
6439{
6440 phys_addr_t start, end;
6441 unsigned long pfn;
6442 u64 i, pgcnt;
6443
6444
6445
6446
6447
6448 pgcnt = 0;
6449 for_each_resv_unavail_range(i, &start, &end) {
6450 for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
6451 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6452 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6453 + pageblock_nr_pages - 1;
6454 continue;
6455 }
6456 mm_zero_struct_page(pfn_to_page(pfn));
6457 pgcnt++;
6458 }
6459 }
6460
6461
6462
6463
6464
6465
6466
6467
6468 if (pgcnt)
6469 pr_info("Reserved but unavailable: %lld pages", pgcnt);
6470}
6471#endif
6472
6473#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6474
6475#if MAX_NUMNODES > 1
6476
6477
6478
6479void __init setup_nr_node_ids(void)
6480{
6481 unsigned int highest;
6482
6483 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
6484 nr_node_ids = highest + 1;
6485}
6486#endif
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507unsigned long __init node_map_pfn_alignment(void)
6508{
6509 unsigned long accl_mask = 0, last_end = 0;
6510 unsigned long start, end, mask;
6511 int last_nid = -1;
6512 int i, nid;
6513
6514 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
6515 if (!start || last_nid < 0 || last_nid == nid) {
6516 last_nid = nid;
6517 last_end = end;
6518 continue;
6519 }
6520
6521
6522
6523
6524
6525
6526 mask = ~((1 << __ffs(start)) - 1);
6527 while (mask && last_end <= (start & (mask << 1)))
6528 mask <<= 1;
6529
6530
6531 accl_mask |= mask;
6532 }
6533
6534
6535 return ~accl_mask + 1;
6536}
6537
6538
6539static unsigned long __init find_min_pfn_for_node(int nid)
6540{
6541 unsigned long min_pfn = ULONG_MAX;
6542 unsigned long start_pfn;
6543 int i;
6544
6545 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6546 min_pfn = min(min_pfn, start_pfn);
6547
6548 if (min_pfn == ULONG_MAX) {
6549 pr_warn("Could not find start_pfn for node %d\n", nid);
6550 return 0;
6551 }
6552
6553 return min_pfn;
6554}
6555
6556
6557
6558
6559
6560
6561
6562unsigned long __init find_min_pfn_with_active_regions(void)
6563{
6564 return find_min_pfn_for_node(MAX_NUMNODES);
6565}
6566
6567
6568
6569
6570
6571
6572static unsigned long __init early_calculate_totalpages(void)
6573{
6574 unsigned long totalpages = 0;
6575 unsigned long start_pfn, end_pfn;
6576 int i, nid;
6577
6578 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6579 unsigned long pages = end_pfn - start_pfn;
6580
6581 totalpages += pages;
6582 if (pages)
6583 node_set_state(nid, N_MEMORY);
6584 }
6585 return totalpages;
6586}
6587
6588
6589
6590
6591
6592
6593
6594static void __init find_zone_movable_pfns_for_nodes(void)
6595{
6596 int i, nid;
6597 unsigned long usable_startpfn;
6598 unsigned long kernelcore_node, kernelcore_remaining;
6599
6600 nodemask_t saved_node_state = node_states[N_MEMORY];
6601 unsigned long totalpages = early_calculate_totalpages();
6602 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
6603 struct memblock_region *r;
6604
6605
6606 find_usable_zone_for_movable();
6607
6608
6609
6610
6611
6612 if (movable_node_is_enabled()) {
6613 for_each_memblock(memory, r) {
6614 if (!memblock_is_hotpluggable(r))
6615 continue;
6616
6617 nid = r->nid;
6618
6619 usable_startpfn = PFN_DOWN(r->base);
6620 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6621 min(usable_startpfn, zone_movable_pfn[nid]) :
6622 usable_startpfn;
6623 }
6624
6625 goto out2;
6626 }
6627
6628
6629
6630
6631 if (mirrored_kernelcore) {
6632 bool mem_below_4gb_not_mirrored = false;
6633
6634 for_each_memblock(memory, r) {
6635 if (memblock_is_mirror(r))
6636 continue;
6637
6638 nid = r->nid;
6639
6640 usable_startpfn = memblock_region_memory_base_pfn(r);
6641
6642 if (usable_startpfn < 0x100000) {
6643 mem_below_4gb_not_mirrored = true;
6644 continue;
6645 }
6646
6647 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6648 min(usable_startpfn, zone_movable_pfn[nid]) :
6649 usable_startpfn;
6650 }
6651
6652 if (mem_below_4gb_not_mirrored)
6653 pr_warn("This configuration results in unmirrored kernel memory.");
6654
6655 goto out2;
6656 }
6657
6658
6659
6660
6661
6662 if (required_kernelcore_percent)
6663 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
6664 10000UL;
6665 if (required_movablecore_percent)
6666 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
6667 10000UL;
6668
6669
6670
6671
6672
6673
6674
6675
6676
6677 if (required_movablecore) {
6678 unsigned long corepages;
6679
6680
6681
6682
6683
6684 required_movablecore =
6685 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
6686 required_movablecore = min(totalpages, required_movablecore);
6687 corepages = totalpages - required_movablecore;
6688
6689 required_kernelcore = max(required_kernelcore, corepages);
6690 }
6691
6692
6693
6694
6695
6696 if (!required_kernelcore || required_kernelcore >= totalpages)
6697 goto out;
6698
6699
6700 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6701
6702restart:
6703
6704 kernelcore_node = required_kernelcore / usable_nodes;
6705 for_each_node_state(nid, N_MEMORY) {
6706 unsigned long start_pfn, end_pfn;
6707
6708
6709
6710
6711
6712
6713 if (required_kernelcore < kernelcore_node)
6714 kernelcore_node = required_kernelcore / usable_nodes;
6715
6716
6717
6718
6719
6720
6721 kernelcore_remaining = kernelcore_node;
6722
6723
6724 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6725 unsigned long size_pages;
6726
6727 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
6728 if (start_pfn >= end_pfn)
6729 continue;
6730
6731
6732 if (start_pfn < usable_startpfn) {
6733 unsigned long kernel_pages;
6734 kernel_pages = min(end_pfn, usable_startpfn)
6735 - start_pfn;
6736
6737 kernelcore_remaining -= min(kernel_pages,
6738 kernelcore_remaining);
6739 required_kernelcore -= min(kernel_pages,
6740 required_kernelcore);
6741
6742
6743 if (end_pfn <= usable_startpfn) {
6744
6745
6746
6747
6748
6749
6750
6751 zone_movable_pfn[nid] = end_pfn;
6752 continue;
6753 }
6754 start_pfn = usable_startpfn;
6755 }
6756
6757
6758
6759
6760
6761
6762 size_pages = end_pfn - start_pfn;
6763 if (size_pages > kernelcore_remaining)
6764 size_pages = kernelcore_remaining;
6765 zone_movable_pfn[nid] = start_pfn + size_pages;
6766
6767
6768
6769
6770
6771
6772 required_kernelcore -= min(required_kernelcore,
6773 size_pages);
6774 kernelcore_remaining -= size_pages;
6775 if (!kernelcore_remaining)
6776 break;
6777 }
6778 }
6779
6780
6781
6782
6783
6784
6785
6786 usable_nodes--;
6787 if (usable_nodes && required_kernelcore > usable_nodes)
6788 goto restart;
6789
6790out2:
6791
6792 for (nid = 0; nid < MAX_NUMNODES; nid++)
6793 zone_movable_pfn[nid] =
6794 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
6795
6796out:
6797
6798 node_states[N_MEMORY] = saved_node_state;
6799}
6800
6801
6802static void check_for_memory(pg_data_t *pgdat, int nid)
6803{
6804 enum zone_type zone_type;
6805
6806 if (N_MEMORY == N_NORMAL_MEMORY)
6807 return;
6808
6809 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
6810 struct zone *zone = &pgdat->node_zones[zone_type];
6811 if (populated_zone(zone)) {
6812 node_set_state(nid, N_HIGH_MEMORY);
6813 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6814 zone_type <= ZONE_NORMAL)
6815 node_set_state(nid, N_NORMAL_MEMORY);
6816 break;
6817 }
6818 }
6819}
6820
6821
6822
6823
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6835{
6836 unsigned long start_pfn, end_pfn;
6837 int i, nid;
6838
6839
6840 memset(arch_zone_lowest_possible_pfn, 0,
6841 sizeof(arch_zone_lowest_possible_pfn));
6842 memset(arch_zone_highest_possible_pfn, 0,
6843 sizeof(arch_zone_highest_possible_pfn));
6844
6845 start_pfn = find_min_pfn_with_active_regions();
6846
6847 for (i = 0; i < MAX_NR_ZONES; i++) {
6848 if (i == ZONE_MOVABLE)
6849 continue;
6850
6851 end_pfn = max(max_zone_pfn[i], start_pfn);
6852 arch_zone_lowest_possible_pfn[i] = start_pfn;
6853 arch_zone_highest_possible_pfn[i] = end_pfn;
6854
6855 start_pfn = end_pfn;
6856 }
6857
6858
6859 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
6860 find_zone_movable_pfns_for_nodes();
6861
6862
6863 pr_info("Zone ranges:\n");
6864 for (i = 0; i < MAX_NR_ZONES; i++) {
6865 if (i == ZONE_MOVABLE)
6866 continue;
6867 pr_info(" %-8s ", zone_names[i]);
6868 if (arch_zone_lowest_possible_pfn[i] ==
6869 arch_zone_highest_possible_pfn[i])
6870 pr_cont("empty\n");
6871 else
6872 pr_cont("[mem %#018Lx-%#018Lx]\n",
6873 (u64)arch_zone_lowest_possible_pfn[i]
6874 << PAGE_SHIFT,
6875 ((u64)arch_zone_highest_possible_pfn[i]
6876 << PAGE_SHIFT) - 1);
6877 }
6878
6879
6880 pr_info("Movable zone start for each node\n");
6881 for (i = 0; i < MAX_NUMNODES; i++) {
6882 if (zone_movable_pfn[i])
6883 pr_info(" Node %d: %#018Lx\n", i,
6884 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
6885 }
6886
6887
6888 pr_info("Early memory node ranges\n");
6889 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
6890 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6891 (u64)start_pfn << PAGE_SHIFT,
6892 ((u64)end_pfn << PAGE_SHIFT) - 1);
6893
6894
6895 mminit_verify_pageflags_layout();
6896 setup_nr_node_ids();
6897 zero_resv_unavail();
6898 for_each_online_node(nid) {
6899 pg_data_t *pgdat = NODE_DATA(nid);
6900 free_area_init_node(nid, NULL,
6901 find_min_pfn_for_node(nid), NULL);
6902
6903
6904 if (pgdat->node_present_pages)
6905 node_set_state(nid, N_MEMORY);
6906 check_for_memory(pgdat, nid);
6907 }
6908}
6909
6910static int __init cmdline_parse_core(char *p, unsigned long *core,
6911 unsigned long *percent)
6912{
6913 unsigned long long coremem;
6914 char *endptr;
6915
6916 if (!p)
6917 return -EINVAL;
6918
6919
6920 coremem = simple_strtoull(p, &endptr, 0);
6921 if (*endptr == '%') {
6922
6923 WARN_ON(coremem > 100);
6924
6925 *percent = coremem;
6926 } else {
6927 coremem = memparse(p, &p);
6928
6929 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6930
6931 *core = coremem >> PAGE_SHIFT;
6932 *percent = 0UL;
6933 }
6934 return 0;
6935}
6936
6937
6938
6939
6940
6941static int __init cmdline_parse_kernelcore(char *p)
6942{
6943
6944 if (parse_option_str(p, "mirror")) {
6945 mirrored_kernelcore = true;
6946 return 0;
6947 }
6948
6949 return cmdline_parse_core(p, &required_kernelcore,
6950 &required_kernelcore_percent);
6951}
6952
6953
6954
6955
6956
6957static int __init cmdline_parse_movablecore(char *p)
6958{
6959 return cmdline_parse_core(p, &required_movablecore,
6960 &required_movablecore_percent);
6961}
6962
6963early_param("kernelcore", cmdline_parse_kernelcore);
6964early_param("movablecore", cmdline_parse_movablecore);
6965
6966#endif
6967
6968void adjust_managed_page_count(struct page *page, long count)
6969{
6970 spin_lock(&managed_page_count_lock);
6971 page_zone(page)->managed_pages += count;
6972 totalram_pages += count;
6973#ifdef CONFIG_HIGHMEM
6974 if (PageHighMem(page))
6975 totalhigh_pages += count;
6976#endif
6977 spin_unlock(&managed_page_count_lock);
6978}
6979EXPORT_SYMBOL(adjust_managed_page_count);
6980
6981unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
6982{
6983 void *pos;
6984 unsigned long pages = 0;
6985
6986 start = (void *)PAGE_ALIGN((unsigned long)start);
6987 end = (void *)((unsigned long)end & PAGE_MASK);
6988 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
6989 struct page *page = virt_to_page(pos);
6990 void *direct_map_addr;
6991
6992
6993
6994
6995
6996
6997
6998
6999 direct_map_addr = page_address(page);
7000 if ((unsigned int)poison <= 0xFF)
7001 memset(direct_map_addr, poison, PAGE_SIZE);
7002
7003 free_reserved_page(page);
7004 }
7005
7006 if (pages && s)
7007 pr_info("Freeing %s memory: %ldK\n",
7008 s, pages << (PAGE_SHIFT - 10));
7009
7010 return pages;
7011}
7012EXPORT_SYMBOL(free_reserved_area);
7013
7014#ifdef CONFIG_HIGHMEM
7015void free_highmem_page(struct page *page)
7016{
7017 __free_reserved_page(page);
7018 totalram_pages++;
7019 page_zone(page)->managed_pages++;
7020 totalhigh_pages++;
7021}
7022#endif
7023
7024
7025void __init mem_init_print_info(const char *str)
7026{
7027 unsigned long physpages, codesize, datasize, rosize, bss_size;
7028 unsigned long init_code_size, init_data_size;
7029
7030 physpages = get_num_physpages();
7031 codesize = _etext - _stext;
7032 datasize = _edata - _sdata;
7033 rosize = __end_rodata - __start_rodata;
7034 bss_size = __bss_stop - __bss_start;
7035 init_data_size = __init_end - __init_begin;
7036 init_code_size = _einittext - _sinittext;
7037
7038
7039
7040
7041
7042
7043
7044
7045#define adj_init_size(start, end, size, pos, adj) \
7046 do { \
7047 if (start <= pos && pos < end && size > adj) \
7048 size -= adj; \
7049 } while (0)
7050
7051 adj_init_size(__init_begin, __init_end, init_data_size,
7052 _sinittext, init_code_size);
7053 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7054 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7055 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7056 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7057
7058#undef adj_init_size
7059
7060 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7061#ifdef CONFIG_HIGHMEM
7062 ", %luK highmem"
7063#endif
7064 "%s%s)\n",
7065 nr_free_pages() << (PAGE_SHIFT - 10),
7066 physpages << (PAGE_SHIFT - 10),
7067 codesize >> 10, datasize >> 10, rosize >> 10,
7068 (init_data_size + init_code_size) >> 10, bss_size >> 10,
7069 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
7070 totalcma_pages << (PAGE_SHIFT - 10),
7071#ifdef CONFIG_HIGHMEM
7072 totalhigh_pages << (PAGE_SHIFT - 10),
7073#endif
7074 str ? ", " : "", str ? str : "");
7075}
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085
7086
7087
7088void __init set_dma_reserve(unsigned long new_dma_reserve)
7089{
7090 dma_reserve = new_dma_reserve;
7091}
7092
7093void __init free_area_init(unsigned long *zones_size)
7094{
7095 zero_resv_unavail();
7096 free_area_init_node(0, zones_size,
7097 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
7098}
7099
7100static int page_alloc_cpu_dead(unsigned int cpu)
7101{
7102
7103 lru_add_drain_cpu(cpu);
7104 drain_pages(cpu);
7105
7106
7107
7108
7109
7110
7111
7112 vm_events_fold_cpu(cpu);
7113
7114
7115
7116
7117
7118
7119
7120
7121 cpu_vm_stats_fold(cpu);
7122 return 0;
7123}
7124
7125void __init page_alloc_init(void)
7126{
7127 int ret;
7128
7129 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
7130 "mm/page_alloc:dead", NULL,
7131 page_alloc_cpu_dead);
7132 WARN_ON(ret < 0);
7133}
7134
7135
7136
7137
7138
7139static void calculate_totalreserve_pages(void)
7140{
7141 struct pglist_data *pgdat;
7142 unsigned long reserve_pages = 0;
7143 enum zone_type i, j;
7144
7145 for_each_online_pgdat(pgdat) {
7146
7147 pgdat->totalreserve_pages = 0;
7148
7149 for (i = 0; i < MAX_NR_ZONES; i++) {
7150 struct zone *zone = pgdat->node_zones + i;
7151 long max = 0;
7152
7153
7154 for (j = i; j < MAX_NR_ZONES; j++) {
7155 if (zone->lowmem_reserve[j] > max)
7156 max = zone->lowmem_reserve[j];
7157 }
7158
7159
7160 max += high_wmark_pages(zone);
7161
7162 if (max > zone->managed_pages)
7163 max = zone->managed_pages;
7164
7165 pgdat->totalreserve_pages += max;
7166
7167 reserve_pages += max;
7168 }
7169 }
7170 totalreserve_pages = reserve_pages;
7171}
7172
7173
7174
7175
7176
7177
7178
7179static void setup_per_zone_lowmem_reserve(void)
7180{
7181 struct pglist_data *pgdat;
7182 enum zone_type j, idx;
7183
7184 for_each_online_pgdat(pgdat) {
7185 for (j = 0; j < MAX_NR_ZONES; j++) {
7186 struct zone *zone = pgdat->node_zones + j;
7187 unsigned long managed_pages = zone->managed_pages;
7188
7189 zone->lowmem_reserve[j] = 0;
7190
7191 idx = j;
7192 while (idx) {
7193 struct zone *lower_zone;
7194
7195 idx--;
7196 lower_zone = pgdat->node_zones + idx;
7197
7198 if (sysctl_lowmem_reserve_ratio[idx] < 1) {
7199 sysctl_lowmem_reserve_ratio[idx] = 0;
7200 lower_zone->lowmem_reserve[j] = 0;
7201 } else {
7202 lower_zone->lowmem_reserve[j] =
7203 managed_pages / sysctl_lowmem_reserve_ratio[idx];
7204 }
7205 managed_pages += lower_zone->managed_pages;
7206 }
7207 }
7208 }
7209
7210
7211 calculate_totalreserve_pages();
7212}
7213
7214static void __setup_per_zone_wmarks(void)
7215{
7216 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
7217 unsigned long lowmem_pages = 0;
7218 struct zone *zone;
7219 unsigned long flags;
7220
7221
7222 for_each_zone(zone) {
7223 if (!is_highmem(zone))
7224 lowmem_pages += zone->managed_pages;
7225 }
7226
7227 for_each_zone(zone) {
7228 u64 tmp;
7229
7230 spin_lock_irqsave(&zone->lock, flags);
7231 tmp = (u64)pages_min * zone->managed_pages;
7232 do_div(tmp, lowmem_pages);
7233 if (is_highmem(zone)) {
7234
7235
7236
7237
7238
7239
7240
7241
7242
7243 unsigned long min_pages;
7244
7245 min_pages = zone->managed_pages / 1024;
7246 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7247 zone->watermark[WMARK_MIN] = min_pages;
7248 } else {
7249
7250
7251
7252
7253 zone->watermark[WMARK_MIN] = tmp;
7254 }
7255
7256
7257
7258
7259
7260
7261 tmp = max_t(u64, tmp >> 2,
7262 mult_frac(zone->managed_pages,
7263 watermark_scale_factor, 10000));
7264
7265 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
7266 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7267
7268 spin_unlock_irqrestore(&zone->lock, flags);
7269 }
7270
7271
7272 calculate_totalreserve_pages();
7273}
7274
7275
7276
7277
7278
7279
7280
7281
7282void setup_per_zone_wmarks(void)
7283{
7284 static DEFINE_SPINLOCK(lock);
7285
7286 spin_lock(&lock);
7287 __setup_per_zone_wmarks();
7288 spin_unlock(&lock);
7289}
7290
7291
7292
7293
7294
7295
7296
7297
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307
7308
7309
7310
7311
7312
7313
7314
7315int __meminit init_per_zone_wmark_min(void)
7316{
7317 unsigned long lowmem_kbytes;
7318 int new_min_free_kbytes;
7319
7320 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
7321 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7322
7323 if (new_min_free_kbytes > user_min_free_kbytes) {
7324 min_free_kbytes = new_min_free_kbytes;
7325 if (min_free_kbytes < 128)
7326 min_free_kbytes = 128;
7327 if (min_free_kbytes > 65536)
7328 min_free_kbytes = 65536;
7329 } else {
7330 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7331 new_min_free_kbytes, user_min_free_kbytes);
7332 }
7333 setup_per_zone_wmarks();
7334 refresh_zone_stat_thresholds();
7335 setup_per_zone_lowmem_reserve();
7336
7337#ifdef CONFIG_NUMA
7338 setup_min_unmapped_ratio();
7339 setup_min_slab_ratio();
7340#endif
7341
7342 return 0;
7343}
7344core_initcall(init_per_zone_wmark_min)
7345
7346
7347
7348
7349
7350
7351int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
7352 void __user *buffer, size_t *length, loff_t *ppos)
7353{
7354 int rc;
7355
7356 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7357 if (rc)
7358 return rc;
7359
7360 if (write) {
7361 user_min_free_kbytes = min_free_kbytes;
7362 setup_per_zone_wmarks();
7363 }
7364 return 0;
7365}
7366
7367int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7368 void __user *buffer, size_t *length, loff_t *ppos)
7369{
7370 int rc;
7371
7372 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7373 if (rc)
7374 return rc;
7375
7376 if (write)
7377 setup_per_zone_wmarks();
7378
7379 return 0;
7380}
7381
7382#ifdef CONFIG_NUMA
7383static void setup_min_unmapped_ratio(void)
7384{
7385 pg_data_t *pgdat;
7386 struct zone *zone;
7387
7388 for_each_online_pgdat(pgdat)
7389 pgdat->min_unmapped_pages = 0;
7390
7391 for_each_zone(zone)
7392 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
7393 sysctl_min_unmapped_ratio) / 100;
7394}
7395
7396
7397int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
7398 void __user *buffer, size_t *length, loff_t *ppos)
7399{
7400 int rc;
7401
7402 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7403 if (rc)
7404 return rc;
7405
7406 setup_min_unmapped_ratio();
7407
7408 return 0;
7409}
7410
7411static void setup_min_slab_ratio(void)
7412{
7413 pg_data_t *pgdat;
7414 struct zone *zone;
7415
7416 for_each_online_pgdat(pgdat)
7417 pgdat->min_slab_pages = 0;
7418
7419 for_each_zone(zone)
7420 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
7421 sysctl_min_slab_ratio) / 100;
7422}
7423
7424int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7425 void __user *buffer, size_t *length, loff_t *ppos)
7426{
7427 int rc;
7428
7429 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7430 if (rc)
7431 return rc;
7432
7433 setup_min_slab_ratio();
7434
7435 return 0;
7436}
7437#endif
7438
7439
7440
7441
7442
7443
7444
7445
7446
7447
7448int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
7449 void __user *buffer, size_t *length, loff_t *ppos)
7450{
7451 proc_dointvec_minmax(table, write, buffer, length, ppos);
7452 setup_per_zone_lowmem_reserve();
7453 return 0;
7454}
7455
7456
7457
7458
7459
7460
7461int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
7462 void __user *buffer, size_t *length, loff_t *ppos)
7463{
7464 struct zone *zone;
7465 int old_percpu_pagelist_fraction;
7466 int ret;
7467
7468 mutex_lock(&pcp_batch_high_lock);
7469 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7470
7471 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7472 if (!write || ret < 0)
7473 goto out;
7474
7475
7476 if (percpu_pagelist_fraction &&
7477 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7478 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7479 ret = -EINVAL;
7480 goto out;
7481 }
7482
7483
7484 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7485 goto out;
7486
7487 for_each_populated_zone(zone) {
7488 unsigned int cpu;
7489
7490 for_each_possible_cpu(cpu)
7491 pageset_set_high_and_batch(zone,
7492 per_cpu_ptr(zone->pageset, cpu));
7493 }
7494out:
7495 mutex_unlock(&pcp_batch_high_lock);
7496 return ret;
7497}
7498
7499#ifdef CONFIG_NUMA
7500int hashdist = HASHDIST_DEFAULT;
7501
7502static int __init set_hashdist(char *str)
7503{
7504 if (!str)
7505 return 0;
7506 hashdist = simple_strtoul(str, &str, 0);
7507 return 1;
7508}
7509__setup("hashdist=", set_hashdist);
7510#endif
7511
7512#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7513
7514
7515
7516
7517static unsigned long __init arch_reserved_kernel_pages(void)
7518{
7519 return 0;
7520}
7521#endif
7522
7523
7524
7525
7526
7527
7528
7529
7530
7531
7532#if __BITS_PER_LONG > 32
7533#define ADAPT_SCALE_BASE (64ul << 30)
7534#define ADAPT_SCALE_SHIFT 2
7535#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
7536#endif
7537
7538
7539
7540
7541
7542
7543
7544void *__init alloc_large_system_hash(const char *tablename,
7545 unsigned long bucketsize,
7546 unsigned long numentries,
7547 int scale,
7548 int flags,
7549 unsigned int *_hash_shift,
7550 unsigned int *_hash_mask,
7551 unsigned long low_limit,
7552 unsigned long high_limit)
7553{
7554 unsigned long long max = high_limit;
7555 unsigned long log2qty, size;
7556 void *table = NULL;
7557 gfp_t gfp_flags;
7558
7559
7560 if (!numentries) {
7561
7562 numentries = nr_kernel_pages;
7563 numentries -= arch_reserved_kernel_pages();
7564
7565
7566 if (PAGE_SHIFT < 20)
7567 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
7568
7569#if __BITS_PER_LONG > 32
7570 if (!high_limit) {
7571 unsigned long adapt;
7572
7573 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
7574 adapt <<= ADAPT_SCALE_SHIFT)
7575 scale++;
7576 }
7577#endif
7578
7579
7580 if (scale > PAGE_SHIFT)
7581 numentries >>= (scale - PAGE_SHIFT);
7582 else
7583 numentries <<= (PAGE_SHIFT - scale);
7584
7585
7586 if (unlikely(flags & HASH_SMALL)) {
7587
7588 WARN_ON(!(flags & HASH_EARLY));
7589 if (!(numentries >> *_hash_shift)) {
7590 numentries = 1UL << *_hash_shift;
7591 BUG_ON(!numentries);
7592 }
7593 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
7594 numentries = PAGE_SIZE / bucketsize;
7595 }
7596 numentries = roundup_pow_of_two(numentries);
7597
7598
7599 if (max == 0) {
7600 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7601 do_div(max, bucketsize);
7602 }
7603 max = min(max, 0x80000000ULL);
7604
7605 if (numentries < low_limit)
7606 numentries = low_limit;
7607 if (numentries > max)
7608 numentries = max;
7609
7610 log2qty = ilog2(numentries);
7611
7612 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
7613 do {
7614 size = bucketsize << log2qty;
7615 if (flags & HASH_EARLY) {
7616 if (flags & HASH_ZERO)
7617 table = memblock_virt_alloc_nopanic(size, 0);
7618 else
7619 table = memblock_virt_alloc_raw(size, 0);
7620 } else if (hashdist) {
7621 table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
7622 } else {
7623
7624
7625
7626
7627
7628 if (get_order(size) < MAX_ORDER) {
7629 table = alloc_pages_exact(size, gfp_flags);
7630 kmemleak_alloc(table, size, 1, gfp_flags);
7631 }
7632 }
7633 } while (!table && size > PAGE_SIZE && --log2qty);
7634
7635 if (!table)
7636 panic("Failed to allocate %s hash table\n", tablename);
7637
7638 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7639 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
7640
7641 if (_hash_shift)
7642 *_hash_shift = log2qty;
7643 if (_hash_mask)
7644 *_hash_mask = (1 << log2qty) - 1;
7645
7646 return table;
7647}
7648
7649
7650
7651
7652
7653
7654
7655
7656
7657
7658bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7659 int migratetype,
7660 bool skip_hwpoisoned_pages)
7661{
7662 unsigned long pfn, iter, found;
7663
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674
7675
7676
7677 if (is_migrate_cma(migratetype) &&
7678 is_migrate_cma(get_pageblock_migratetype(page)))
7679 return false;
7680
7681 pfn = page_to_pfn(page);
7682 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7683 unsigned long check = pfn + iter;
7684
7685 if (!pfn_valid_within(check))
7686 continue;
7687
7688 page = pfn_to_page(check);
7689
7690 if (PageReserved(page))
7691 goto unmovable;
7692
7693
7694
7695
7696
7697
7698 if (PageHuge(page)) {
7699
7700 if (!hugepage_migration_supported(page_hstate(page)))
7701 goto unmovable;
7702
7703 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7704 continue;
7705 }
7706
7707
7708
7709
7710
7711
7712
7713 if (!page_ref_count(page)) {
7714 if (PageBuddy(page))
7715 iter += (1 << page_order(page)) - 1;
7716 continue;
7717 }
7718
7719
7720
7721
7722
7723 if (skip_hwpoisoned_pages && PageHWPoison(page))
7724 continue;
7725
7726 if (__PageMovable(page))
7727 continue;
7728
7729 if (!PageLRU(page))
7730 found++;
7731
7732
7733
7734
7735
7736
7737
7738
7739
7740
7741
7742
7743
7744 if (found > count)
7745 goto unmovable;
7746 }
7747 return false;
7748unmovable:
7749 WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
7750 return true;
7751}
7752
7753#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
7754
7755static unsigned long pfn_max_align_down(unsigned long pfn)
7756{
7757 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7758 pageblock_nr_pages) - 1);
7759}
7760
7761static unsigned long pfn_max_align_up(unsigned long pfn)
7762{
7763 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7764 pageblock_nr_pages));
7765}
7766
7767
7768static int __alloc_contig_migrate_range(struct compact_control *cc,
7769 unsigned long start, unsigned long end)
7770{
7771
7772 unsigned long nr_reclaimed;
7773 unsigned long pfn = start;
7774 unsigned int tries = 0;
7775 int ret = 0;
7776
7777 migrate_prep();
7778
7779 while (pfn < end || !list_empty(&cc->migratepages)) {
7780 if (fatal_signal_pending(current)) {
7781 ret = -EINTR;
7782 break;
7783 }
7784
7785 if (list_empty(&cc->migratepages)) {
7786 cc->nr_migratepages = 0;
7787 pfn = isolate_migratepages_range(cc, pfn, end);
7788 if (!pfn) {
7789 ret = -EINTR;
7790 break;
7791 }
7792 tries = 0;
7793 } else if (++tries == 5) {
7794 ret = ret < 0 ? ret : -EBUSY;
7795 break;
7796 }
7797
7798 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7799 &cc->migratepages);
7800 cc->nr_migratepages -= nr_reclaimed;
7801
7802 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
7803 NULL, 0, cc->mode, MR_CONTIG_RANGE);
7804 }
7805 if (ret < 0) {
7806 putback_movable_pages(&cc->migratepages);
7807 return ret;
7808 }
7809 return 0;
7810}
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820
7821
7822
7823
7824
7825
7826
7827
7828
7829
7830
7831
7832
7833int alloc_contig_range(unsigned long start, unsigned long end,
7834 unsigned migratetype, gfp_t gfp_mask)
7835{
7836 unsigned long outer_start, outer_end;
7837 unsigned int order;
7838 int ret = 0;
7839
7840 struct compact_control cc = {
7841 .nr_migratepages = 0,
7842 .order = -1,
7843 .zone = page_zone(pfn_to_page(start)),
7844 .mode = MIGRATE_SYNC,
7845 .ignore_skip_hint = true,
7846 .no_set_skip_hint = true,
7847 .gfp_mask = current_gfp_context(gfp_mask),
7848 };
7849 INIT_LIST_HEAD(&cc.migratepages);
7850
7851
7852
7853
7854
7855
7856
7857
7858
7859
7860
7861
7862
7863
7864
7865
7866
7867
7868
7869
7870
7871
7872
7873
7874
7875 ret = start_isolate_page_range(pfn_max_align_down(start),
7876 pfn_max_align_up(end), migratetype,
7877 false);
7878 if (ret)
7879 return ret;
7880
7881
7882
7883
7884
7885
7886
7887
7888
7889
7890
7891 ret = __alloc_contig_migrate_range(&cc, start, end);
7892 if (ret && ret != -EBUSY)
7893 goto done;
7894 ret =0;
7895
7896
7897
7898
7899
7900
7901
7902
7903
7904
7905
7906
7907
7908
7909
7910
7911
7912
7913 lru_add_drain_all();
7914 drain_all_pages(cc.zone);
7915
7916 order = 0;
7917 outer_start = start;
7918 while (!PageBuddy(pfn_to_page(outer_start))) {
7919 if (++order >= MAX_ORDER) {
7920 outer_start = start;
7921 break;
7922 }
7923 outer_start &= ~0UL << order;
7924 }
7925
7926 if (outer_start != start) {
7927 order = page_order(pfn_to_page(outer_start));
7928
7929
7930
7931
7932
7933
7934
7935 if (outer_start + (1UL << order) <= start)
7936 outer_start = start;
7937 }
7938
7939
7940 if (test_pages_isolated(outer_start, end, false)) {
7941 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
7942 __func__, outer_start, end);
7943 ret = -EBUSY;
7944 goto done;
7945 }
7946
7947
7948 outer_end = isolate_freepages_range(&cc, outer_start, end);
7949 if (!outer_end) {
7950 ret = -EBUSY;
7951 goto done;
7952 }
7953
7954
7955 if (start != outer_start)
7956 free_contig_range(outer_start, start - outer_start);
7957 if (end != outer_end)
7958 free_contig_range(end, outer_end - end);
7959
7960done:
7961 undo_isolate_page_range(pfn_max_align_down(start),
7962 pfn_max_align_up(end), migratetype);
7963 return ret;
7964}
7965
7966void free_contig_range(unsigned long pfn, unsigned nr_pages)
7967{
7968 unsigned int count = 0;
7969
7970 for (; nr_pages--; pfn++) {
7971 struct page *page = pfn_to_page(pfn);
7972
7973 count += page_count(page) != 1;
7974 __free_page(page);
7975 }
7976 WARN(count != 0, "%d pages are still in use!\n", count);
7977}
7978#endif
7979
7980#ifdef CONFIG_MEMORY_HOTPLUG
7981
7982
7983
7984
7985void __meminit zone_pcp_update(struct zone *zone)
7986{
7987 unsigned cpu;
7988 mutex_lock(&pcp_batch_high_lock);
7989 for_each_possible_cpu(cpu)
7990 pageset_set_high_and_batch(zone,
7991 per_cpu_ptr(zone->pageset, cpu));
7992 mutex_unlock(&pcp_batch_high_lock);
7993}
7994#endif
7995
7996void zone_pcp_reset(struct zone *zone)
7997{
7998 unsigned long flags;
7999 int cpu;
8000 struct per_cpu_pageset *pset;
8001
8002
8003 local_irq_save(flags);
8004 if (zone->pageset != &boot_pageset) {
8005 for_each_online_cpu(cpu) {
8006 pset = per_cpu_ptr(zone->pageset, cpu);
8007 drain_zonestat(zone, pset);
8008 }
8009 free_percpu(zone->pageset);
8010 zone->pageset = &boot_pageset;
8011 }
8012 local_irq_restore(flags);
8013}
8014
8015#ifdef CONFIG_MEMORY_HOTREMOVE
8016
8017
8018
8019
8020void
8021__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8022{
8023 struct page *page;
8024 struct zone *zone;
8025 unsigned int order, i;
8026 unsigned long pfn;
8027 unsigned long flags;
8028
8029 for (pfn = start_pfn; pfn < end_pfn; pfn++)
8030 if (pfn_valid(pfn))
8031 break;
8032 if (pfn == end_pfn)
8033 return;
8034 offline_mem_sections(pfn, end_pfn);
8035 zone = page_zone(pfn_to_page(pfn));
8036 spin_lock_irqsave(&zone->lock, flags);
8037 pfn = start_pfn;
8038 while (pfn < end_pfn) {
8039 if (!pfn_valid(pfn)) {
8040 pfn++;
8041 continue;
8042 }
8043 page = pfn_to_page(pfn);
8044
8045
8046
8047
8048 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
8049 pfn++;
8050 SetPageReserved(page);
8051 continue;
8052 }
8053
8054 BUG_ON(page_count(page));
8055 BUG_ON(!PageBuddy(page));
8056 order = page_order(page);
8057#ifdef CONFIG_DEBUG_VM
8058 pr_info("remove from free list %lx %d %lx\n",
8059 pfn, 1 << order, end_pfn);
8060#endif
8061 list_del(&page->lru);
8062 rmv_page_order(page);
8063 zone->free_area[order].nr_free--;
8064 for (i = 0; i < (1 << order); i++)
8065 SetPageReserved((page+i));
8066 pfn += (1 << order);
8067 }
8068 spin_unlock_irqrestore(&zone->lock, flags);
8069}
8070#endif
8071
8072bool is_free_buddy_page(struct page *page)
8073{
8074 struct zone *zone = page_zone(page);
8075 unsigned long pfn = page_to_pfn(page);
8076 unsigned long flags;
8077 unsigned int order;
8078
8079 spin_lock_irqsave(&zone->lock, flags);
8080 for (order = 0; order < MAX_ORDER; order++) {
8081 struct page *page_head = page - (pfn & ((1 << order) - 1));
8082
8083 if (PageBuddy(page_head) && page_order(page_head) >= order)
8084 break;
8085 }
8086 spin_unlock_irqrestore(&zone->lock, flags);
8087
8088 return order < MAX_ORDER;
8089}
8090
8091#ifdef CONFIG_MEMORY_FAILURE
8092
8093
8094
8095
8096
8097bool set_hwpoison_free_buddy_page(struct page *page)
8098{
8099 struct zone *zone = page_zone(page);
8100 unsigned long pfn = page_to_pfn(page);
8101 unsigned long flags;
8102 unsigned int order;
8103 bool hwpoisoned = false;
8104
8105 spin_lock_irqsave(&zone->lock, flags);
8106 for (order = 0; order < MAX_ORDER; order++) {
8107 struct page *page_head = page - (pfn & ((1 << order) - 1));
8108
8109 if (PageBuddy(page_head) && page_order(page_head) >= order) {
8110 if (!TestSetPageHWPoison(page))
8111 hwpoisoned = true;
8112 break;
8113 }
8114 }
8115 spin_unlock_irqrestore(&zone->lock, flags);
8116
8117 return hwpoisoned;
8118}
8119#endif
8120