1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h>
25#include <linux/compiler.h>
26#include <linux/kernel.h>
27#include <linux/kmemcheck.h>
28#include <linux/kasan.h>
29#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
34#include <linux/ratelimit.h>
35#include <linux/oom.h>
36#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
41#include <linux/memory_hotplug.h>
42#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
44#include <linux/vmstat.h>
45#include <linux/mempolicy.h>
46#include <linux/memremap.h>
47#include <linux/stop_machine.h>
48#include <linux/sort.h>
49#include <linux/pfn.h>
50#include <linux/backing-dev.h>
51#include <linux/fault-inject.h>
52#include <linux/page-isolation.h>
53#include <linux/page_ext.h>
54#include <linux/debugobjects.h>
55#include <linux/kmemleak.h>
56#include <linux/compaction.h>
57#include <trace/events/kmem.h>
58#include <linux/prefetch.h>
59#include <linux/mm_inline.h>
60#include <linux/migrate.h>
61#include <linux/page_ext.h>
62#include <linux/hugetlb.h>
63#include <linux/sched/rt.h>
64#include <linux/page_owner.h>
65#include <linux/kthread.h>
66#include <linux/memcontrol.h>
67
68#include <asm/sections.h>
69#include <asm/tlbflush.h>
70#include <asm/div64.h>
71#include "internal.h"
72
73
74static DEFINE_MUTEX(pcp_batch_high_lock);
75#define MIN_PERCPU_PAGELIST_FRACTION (8)
76
77#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
78DEFINE_PER_CPU(int, numa_node);
79EXPORT_PER_CPU_SYMBOL(numa_node);
80#endif
81
82#ifdef CONFIG_HAVE_MEMORYLESS_NODES
83
84
85
86
87
88
89DEFINE_PER_CPU(int, _numa_mem_);
90EXPORT_PER_CPU_SYMBOL(_numa_mem_);
91int _node_numa_mem_[MAX_NUMNODES];
92#endif
93
94#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
95volatile unsigned long latent_entropy __latent_entropy;
96EXPORT_SYMBOL(latent_entropy);
97#endif
98
99
100
101
102nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
103 [N_POSSIBLE] = NODE_MASK_ALL,
104 [N_ONLINE] = { { [0] = 1UL } },
105#ifndef CONFIG_NUMA
106 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
107#ifdef CONFIG_HIGHMEM
108 [N_HIGH_MEMORY] = { { [0] = 1UL } },
109#endif
110#ifdef CONFIG_MOVABLE_NODE
111 [N_MEMORY] = { { [0] = 1UL } },
112#endif
113 [N_CPU] = { { [0] = 1UL } },
114#endif
115};
116EXPORT_SYMBOL(node_states);
117
118
119static DEFINE_SPINLOCK(managed_page_count_lock);
120
121unsigned long totalram_pages __read_mostly;
122unsigned long totalreserve_pages __read_mostly;
123unsigned long totalcma_pages __read_mostly;
124
125int percpu_pagelist_fraction;
126gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
127
128
129
130
131
132
133
134
135
136static inline int get_pcppage_migratetype(struct page *page)
137{
138 return page->index;
139}
140
141static inline void set_pcppage_migratetype(struct page *page, int migratetype)
142{
143 page->index = migratetype;
144}
145
146#ifdef CONFIG_PM_SLEEP
147
148
149
150
151
152
153
154
155
156static gfp_t saved_gfp_mask;
157
158void pm_restore_gfp_mask(void)
159{
160 WARN_ON(!mutex_is_locked(&pm_mutex));
161 if (saved_gfp_mask) {
162 gfp_allowed_mask = saved_gfp_mask;
163 saved_gfp_mask = 0;
164 }
165}
166
167void pm_restrict_gfp_mask(void)
168{
169 WARN_ON(!mutex_is_locked(&pm_mutex));
170 WARN_ON(saved_gfp_mask);
171 saved_gfp_mask = gfp_allowed_mask;
172 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
173}
174
175bool pm_suspended_storage(void)
176{
177 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
178 return false;
179 return true;
180}
181#endif
182
183#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
184unsigned int pageblock_order __read_mostly;
185#endif
186
187static void __free_pages_ok(struct page *page, unsigned int order);
188
189
190
191
192
193
194
195
196
197
198
199
200int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
201#ifdef CONFIG_ZONE_DMA
202 256,
203#endif
204#ifdef CONFIG_ZONE_DMA32
205 256,
206#endif
207#ifdef CONFIG_HIGHMEM
208 32,
209#endif
210 32,
211};
212
213EXPORT_SYMBOL(totalram_pages);
214
215static char * const zone_names[MAX_NR_ZONES] = {
216#ifdef CONFIG_ZONE_DMA
217 "DMA",
218#endif
219#ifdef CONFIG_ZONE_DMA32
220 "DMA32",
221#endif
222 "Normal",
223#ifdef CONFIG_HIGHMEM
224 "HighMem",
225#endif
226 "Movable",
227#ifdef CONFIG_ZONE_DEVICE
228 "Device",
229#endif
230};
231
232char * const migratetype_names[MIGRATE_TYPES] = {
233 "Unmovable",
234 "Movable",
235 "Reclaimable",
236 "HighAtomic",
237#ifdef CONFIG_CMA
238 "CMA",
239#endif
240#ifdef CONFIG_MEMORY_ISOLATION
241 "Isolate",
242#endif
243};
244
245compound_page_dtor * const compound_page_dtors[] = {
246 NULL,
247 free_compound_page,
248#ifdef CONFIG_HUGETLB_PAGE
249 free_huge_page,
250#endif
251#ifdef CONFIG_TRANSPARENT_HUGEPAGE
252 free_transhuge_page,
253#endif
254};
255
256int min_free_kbytes = 1024;
257int user_min_free_kbytes = -1;
258int watermark_scale_factor = 10;
259
260static unsigned long __meminitdata nr_kernel_pages;
261static unsigned long __meminitdata nr_all_pages;
262static unsigned long __meminitdata dma_reserve;
263
264#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
265static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
266static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
267static unsigned long __initdata required_kernelcore;
268static unsigned long __initdata required_movablecore;
269static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
270static bool mirrored_kernelcore;
271
272
273int movable_zone;
274EXPORT_SYMBOL(movable_zone);
275#endif
276
277#if MAX_NUMNODES > 1
278int nr_node_ids __read_mostly = MAX_NUMNODES;
279int nr_online_nodes __read_mostly = 1;
280EXPORT_SYMBOL(nr_node_ids);
281EXPORT_SYMBOL(nr_online_nodes);
282#endif
283
284int page_group_by_mobility_disabled __read_mostly;
285
286#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
287static inline void reset_deferred_meminit(pg_data_t *pgdat)
288{
289 pgdat->first_deferred_pfn = ULONG_MAX;
290}
291
292
293static inline bool __meminit early_page_uninitialised(unsigned long pfn)
294{
295 int nid = early_pfn_to_nid(pfn);
296
297 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
298 return true;
299
300 return false;
301}
302
303
304
305
306
307static inline bool update_defer_init(pg_data_t *pgdat,
308 unsigned long pfn, unsigned long zone_end,
309 unsigned long *nr_initialised)
310{
311 unsigned long max_initialise;
312
313
314 if (zone_end < pgdat_end_pfn(pgdat))
315 return true;
316
317
318
319
320 max_initialise = max(2UL << (30 - PAGE_SHIFT),
321 (pgdat->node_spanned_pages >> 8));
322
323 (*nr_initialised)++;
324 if ((*nr_initialised > max_initialise) &&
325 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
326 pgdat->first_deferred_pfn = pfn;
327 return false;
328 }
329
330 return true;
331}
332#else
333static inline void reset_deferred_meminit(pg_data_t *pgdat)
334{
335}
336
337static inline bool early_page_uninitialised(unsigned long pfn)
338{
339 return false;
340}
341
342static inline bool update_defer_init(pg_data_t *pgdat,
343 unsigned long pfn, unsigned long zone_end,
344 unsigned long *nr_initialised)
345{
346 return true;
347}
348#endif
349
350
351static inline unsigned long *get_pageblock_bitmap(struct page *page,
352 unsigned long pfn)
353{
354#ifdef CONFIG_SPARSEMEM
355 return __pfn_to_section(pfn)->pageblock_flags;
356#else
357 return page_zone(page)->pageblock_flags;
358#endif
359}
360
361static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
362{
363#ifdef CONFIG_SPARSEMEM
364 pfn &= (PAGES_PER_SECTION-1);
365 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
366#else
367 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
368 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
369#endif
370}
371
372
373
374
375
376
377
378
379
380
381static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
382 unsigned long pfn,
383 unsigned long end_bitidx,
384 unsigned long mask)
385{
386 unsigned long *bitmap;
387 unsigned long bitidx, word_bitidx;
388 unsigned long word;
389
390 bitmap = get_pageblock_bitmap(page, pfn);
391 bitidx = pfn_to_bitidx(page, pfn);
392 word_bitidx = bitidx / BITS_PER_LONG;
393 bitidx &= (BITS_PER_LONG-1);
394
395 word = bitmap[word_bitidx];
396 bitidx += end_bitidx;
397 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
398}
399
400unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
401 unsigned long end_bitidx,
402 unsigned long mask)
403{
404 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
405}
406
407static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
408{
409 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
410}
411
412
413
414
415
416
417
418
419
420void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
421 unsigned long pfn,
422 unsigned long end_bitidx,
423 unsigned long mask)
424{
425 unsigned long *bitmap;
426 unsigned long bitidx, word_bitidx;
427 unsigned long old_word, word;
428
429 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
430
431 bitmap = get_pageblock_bitmap(page, pfn);
432 bitidx = pfn_to_bitidx(page, pfn);
433 word_bitidx = bitidx / BITS_PER_LONG;
434 bitidx &= (BITS_PER_LONG-1);
435
436 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
437
438 bitidx += end_bitidx;
439 mask <<= (BITS_PER_LONG - bitidx - 1);
440 flags <<= (BITS_PER_LONG - bitidx - 1);
441
442 word = READ_ONCE(bitmap[word_bitidx]);
443 for (;;) {
444 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
445 if (word == old_word)
446 break;
447 word = old_word;
448 }
449}
450
451void set_pageblock_migratetype(struct page *page, int migratetype)
452{
453 if (unlikely(page_group_by_mobility_disabled &&
454 migratetype < MIGRATE_PCPTYPES))
455 migratetype = MIGRATE_UNMOVABLE;
456
457 set_pageblock_flags_group(page, (unsigned long)migratetype,
458 PB_migrate, PB_migrate_end);
459}
460
461#ifdef CONFIG_DEBUG_VM
462static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
463{
464 int ret = 0;
465 unsigned seq;
466 unsigned long pfn = page_to_pfn(page);
467 unsigned long sp, start_pfn;
468
469 do {
470 seq = zone_span_seqbegin(zone);
471 start_pfn = zone->zone_start_pfn;
472 sp = zone->spanned_pages;
473 if (!zone_spans_pfn(zone, pfn))
474 ret = 1;
475 } while (zone_span_seqretry(zone, seq));
476
477 if (ret)
478 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
479 pfn, zone_to_nid(zone), zone->name,
480 start_pfn, start_pfn + sp);
481
482 return ret;
483}
484
485static int page_is_consistent(struct zone *zone, struct page *page)
486{
487 if (!pfn_valid_within(page_to_pfn(page)))
488 return 0;
489 if (zone != page_zone(page))
490 return 0;
491
492 return 1;
493}
494
495
496
497static int bad_range(struct zone *zone, struct page *page)
498{
499 if (page_outside_zone_boundaries(zone, page))
500 return 1;
501 if (!page_is_consistent(zone, page))
502 return 1;
503
504 return 0;
505}
506#else
507static inline int bad_range(struct zone *zone, struct page *page)
508{
509 return 0;
510}
511#endif
512
513static void bad_page(struct page *page, const char *reason,
514 unsigned long bad_flags)
515{
516 static unsigned long resume;
517 static unsigned long nr_shown;
518 static unsigned long nr_unshown;
519
520
521
522
523
524 if (nr_shown == 60) {
525 if (time_before(jiffies, resume)) {
526 nr_unshown++;
527 goto out;
528 }
529 if (nr_unshown) {
530 pr_alert(
531 "BUG: Bad page state: %lu messages suppressed\n",
532 nr_unshown);
533 nr_unshown = 0;
534 }
535 nr_shown = 0;
536 }
537 if (nr_shown++ == 0)
538 resume = jiffies + 60 * HZ;
539
540 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
541 current->comm, page_to_pfn(page));
542 __dump_page(page, reason);
543 bad_flags &= page->flags;
544 if (bad_flags)
545 pr_alert("bad because of flags: %#lx(%pGp)\n",
546 bad_flags, &bad_flags);
547 dump_page_owner(page);
548
549 print_modules();
550 dump_stack();
551out:
552
553 page_mapcount_reset(page);
554 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
555}
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572void free_compound_page(struct page *page)
573{
574 __free_pages_ok(page, compound_order(page));
575}
576
577void prep_compound_page(struct page *page, unsigned int order)
578{
579 int i;
580 int nr_pages = 1 << order;
581
582 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
583 set_compound_order(page, order);
584 __SetPageHead(page);
585 for (i = 1; i < nr_pages; i++) {
586 struct page *p = page + i;
587 set_page_count(p, 0);
588 p->mapping = TAIL_MAPPING;
589 set_compound_head(p, page);
590 }
591 atomic_set(compound_mapcount_ptr(page), -1);
592}
593
594#ifdef CONFIG_DEBUG_PAGEALLOC
595unsigned int _debug_guardpage_minorder;
596bool _debug_pagealloc_enabled __read_mostly
597 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
598EXPORT_SYMBOL(_debug_pagealloc_enabled);
599bool _debug_guardpage_enabled __read_mostly;
600
601static int __init early_debug_pagealloc(char *buf)
602{
603 if (!buf)
604 return -EINVAL;
605 return kstrtobool(buf, &_debug_pagealloc_enabled);
606}
607early_param("debug_pagealloc", early_debug_pagealloc);
608
609static bool need_debug_guardpage(void)
610{
611
612 if (!debug_pagealloc_enabled())
613 return false;
614
615 if (!debug_guardpage_minorder())
616 return false;
617
618 return true;
619}
620
621static void init_debug_guardpage(void)
622{
623 if (!debug_pagealloc_enabled())
624 return;
625
626 if (!debug_guardpage_minorder())
627 return;
628
629 _debug_guardpage_enabled = true;
630}
631
632struct page_ext_operations debug_guardpage_ops = {
633 .need = need_debug_guardpage,
634 .init = init_debug_guardpage,
635};
636
637static int __init debug_guardpage_minorder_setup(char *buf)
638{
639 unsigned long res;
640
641 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
642 pr_err("Bad debug_guardpage_minorder value\n");
643 return 0;
644 }
645 _debug_guardpage_minorder = res;
646 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
647 return 0;
648}
649early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
650
651static inline bool set_page_guard(struct zone *zone, struct page *page,
652 unsigned int order, int migratetype)
653{
654 struct page_ext *page_ext;
655
656 if (!debug_guardpage_enabled())
657 return false;
658
659 if (order >= debug_guardpage_minorder())
660 return false;
661
662 page_ext = lookup_page_ext(page);
663 if (unlikely(!page_ext))
664 return false;
665
666 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
667
668 INIT_LIST_HEAD(&page->lru);
669 set_page_private(page, order);
670
671 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
672
673 return true;
674}
675
676static inline void clear_page_guard(struct zone *zone, struct page *page,
677 unsigned int order, int migratetype)
678{
679 struct page_ext *page_ext;
680
681 if (!debug_guardpage_enabled())
682 return;
683
684 page_ext = lookup_page_ext(page);
685 if (unlikely(!page_ext))
686 return;
687
688 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
689
690 set_page_private(page, 0);
691 if (!is_migrate_isolate(migratetype))
692 __mod_zone_freepage_state(zone, (1 << order), migratetype);
693}
694#else
695struct page_ext_operations debug_guardpage_ops;
696static inline bool set_page_guard(struct zone *zone, struct page *page,
697 unsigned int order, int migratetype) { return false; }
698static inline void clear_page_guard(struct zone *zone, struct page *page,
699 unsigned int order, int migratetype) {}
700#endif
701
702static inline void set_page_order(struct page *page, unsigned int order)
703{
704 set_page_private(page, order);
705 __SetPageBuddy(page);
706}
707
708static inline void rmv_page_order(struct page *page)
709{
710 __ClearPageBuddy(page);
711 set_page_private(page, 0);
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729static inline int page_is_buddy(struct page *page, struct page *buddy,
730 unsigned int order)
731{
732 if (!pfn_valid_within(page_to_pfn(buddy)))
733 return 0;
734
735 if (page_is_guard(buddy) && page_order(buddy) == order) {
736 if (page_zone_id(page) != page_zone_id(buddy))
737 return 0;
738
739 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
740
741 return 1;
742 }
743
744 if (PageBuddy(buddy) && page_order(buddy) == order) {
745
746
747
748
749
750 if (page_zone_id(page) != page_zone_id(buddy))
751 return 0;
752
753 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
754
755 return 1;
756 }
757 return 0;
758}
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785static inline void __free_one_page(struct page *page,
786 unsigned long pfn,
787 struct zone *zone, unsigned int order,
788 int migratetype)
789{
790 unsigned long page_idx;
791 unsigned long combined_idx;
792 unsigned long uninitialized_var(buddy_idx);
793 struct page *buddy;
794 unsigned int max_order;
795
796 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
797
798 VM_BUG_ON(!zone_is_initialized(zone));
799 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
800
801 VM_BUG_ON(migratetype == -1);
802 if (likely(!is_migrate_isolate(migratetype)))
803 __mod_zone_freepage_state(zone, 1 << order, migratetype);
804
805 page_idx = pfn & ((1 << MAX_ORDER) - 1);
806
807 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
808 VM_BUG_ON_PAGE(bad_range(zone, page), page);
809
810continue_merging:
811 while (order < max_order - 1) {
812 buddy_idx = __find_buddy_index(page_idx, order);
813 buddy = page + (buddy_idx - page_idx);
814 if (!page_is_buddy(page, buddy, order))
815 goto done_merging;
816
817
818
819
820 if (page_is_guard(buddy)) {
821 clear_page_guard(zone, buddy, order, migratetype);
822 } else {
823 list_del(&buddy->lru);
824 zone->free_area[order].nr_free--;
825 rmv_page_order(buddy);
826 }
827 combined_idx = buddy_idx & page_idx;
828 page = page + (combined_idx - page_idx);
829 page_idx = combined_idx;
830 order++;
831 }
832 if (max_order < MAX_ORDER) {
833
834
835
836
837
838
839
840
841 if (unlikely(has_isolate_pageblock(zone))) {
842 int buddy_mt;
843
844 buddy_idx = __find_buddy_index(page_idx, order);
845 buddy = page + (buddy_idx - page_idx);
846 buddy_mt = get_pageblock_migratetype(buddy);
847
848 if (migratetype != buddy_mt
849 && (is_migrate_isolate(migratetype) ||
850 is_migrate_isolate(buddy_mt)))
851 goto done_merging;
852 }
853 max_order++;
854 goto continue_merging;
855 }
856
857done_merging:
858 set_page_order(page, order);
859
860
861
862
863
864
865
866
867
868 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
869 struct page *higher_page, *higher_buddy;
870 combined_idx = buddy_idx & page_idx;
871 higher_page = page + (combined_idx - page_idx);
872 buddy_idx = __find_buddy_index(combined_idx, order + 1);
873 higher_buddy = higher_page + (buddy_idx - combined_idx);
874 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
875 list_add_tail(&page->lru,
876 &zone->free_area[order].free_list[migratetype]);
877 goto out;
878 }
879 }
880
881 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
882out:
883 zone->free_area[order].nr_free++;
884}
885
886
887
888
889
890
891static inline bool page_expected_state(struct page *page,
892 unsigned long check_flags)
893{
894 if (unlikely(atomic_read(&page->_mapcount) != -1))
895 return false;
896
897 if (unlikely((unsigned long)page->mapping |
898 page_ref_count(page) |
899#ifdef CONFIG_MEMCG
900 (unsigned long)page->mem_cgroup |
901#endif
902 (page->flags & check_flags)))
903 return false;
904
905 return true;
906}
907
908static void free_pages_check_bad(struct page *page)
909{
910 const char *bad_reason;
911 unsigned long bad_flags;
912
913 bad_reason = NULL;
914 bad_flags = 0;
915
916 if (unlikely(atomic_read(&page->_mapcount) != -1))
917 bad_reason = "nonzero mapcount";
918 if (unlikely(page->mapping != NULL))
919 bad_reason = "non-NULL mapping";
920 if (unlikely(page_ref_count(page) != 0))
921 bad_reason = "nonzero _refcount";
922 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
923 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
924 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
925 }
926#ifdef CONFIG_MEMCG
927 if (unlikely(page->mem_cgroup))
928 bad_reason = "page still charged to cgroup";
929#endif
930 bad_page(page, bad_reason, bad_flags);
931}
932
933static inline int free_pages_check(struct page *page)
934{
935 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
936 return 0;
937
938
939 free_pages_check_bad(page);
940 return 1;
941}
942
943static int free_tail_pages_check(struct page *head_page, struct page *page)
944{
945 int ret = 1;
946
947
948
949
950
951 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
952
953 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
954 ret = 0;
955 goto out;
956 }
957 switch (page - head_page) {
958 case 1:
959
960 if (unlikely(compound_mapcount(page))) {
961 bad_page(page, "nonzero compound_mapcount", 0);
962 goto out;
963 }
964 break;
965 case 2:
966
967
968
969
970 break;
971 default:
972 if (page->mapping != TAIL_MAPPING) {
973 bad_page(page, "corrupted mapping in tail page", 0);
974 goto out;
975 }
976 break;
977 }
978 if (unlikely(!PageTail(page))) {
979 bad_page(page, "PageTail not set", 0);
980 goto out;
981 }
982 if (unlikely(compound_head(page) != head_page)) {
983 bad_page(page, "compound_head not consistent", 0);
984 goto out;
985 }
986 ret = 0;
987out:
988 page->mapping = NULL;
989 clear_compound_head(page);
990 return ret;
991}
992
993static __always_inline bool free_pages_prepare(struct page *page,
994 unsigned int order, bool check_free)
995{
996 int bad = 0;
997
998 VM_BUG_ON_PAGE(PageTail(page), page);
999
1000 trace_mm_page_free(page, order);
1001 kmemcheck_free_shadow(page, order);
1002
1003
1004
1005
1006
1007 if (unlikely(order)) {
1008 bool compound = PageCompound(page);
1009 int i;
1010
1011 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1012
1013 if (compound)
1014 ClearPageDoubleMap(page);
1015 for (i = 1; i < (1 << order); i++) {
1016 if (compound)
1017 bad += free_tail_pages_check(page, page + i);
1018 if (unlikely(free_pages_check(page + i))) {
1019 bad++;
1020 continue;
1021 }
1022 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1023 }
1024 }
1025 if (PageMappingFlags(page))
1026 page->mapping = NULL;
1027 if (memcg_kmem_enabled() && PageKmemcg(page))
1028 memcg_kmem_uncharge(page, order);
1029 if (check_free)
1030 bad += free_pages_check(page);
1031 if (bad)
1032 return false;
1033
1034 page_cpupid_reset_last(page);
1035 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1036 reset_page_owner(page, order);
1037
1038 if (!PageHighMem(page)) {
1039 debug_check_no_locks_freed(page_address(page),
1040 PAGE_SIZE << order);
1041 debug_check_no_obj_freed(page_address(page),
1042 PAGE_SIZE << order);
1043 }
1044 arch_free_page(page, order);
1045 kernel_poison_pages(page, 1 << order, 0);
1046 kernel_map_pages(page, 1 << order, 0);
1047 kasan_free_pages(page, order);
1048
1049 return true;
1050}
1051
1052#ifdef CONFIG_DEBUG_VM
1053static inline bool free_pcp_prepare(struct page *page)
1054{
1055 return free_pages_prepare(page, 0, true);
1056}
1057
1058static inline bool bulkfree_pcp_prepare(struct page *page)
1059{
1060 return false;
1061}
1062#else
1063static bool free_pcp_prepare(struct page *page)
1064{
1065 return free_pages_prepare(page, 0, false);
1066}
1067
1068static bool bulkfree_pcp_prepare(struct page *page)
1069{
1070 return free_pages_check(page);
1071}
1072#endif
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static void free_pcppages_bulk(struct zone *zone, int count,
1086 struct per_cpu_pages *pcp)
1087{
1088 int migratetype = 0;
1089 int batch_free = 0;
1090 unsigned long nr_scanned;
1091 bool isolated_pageblocks;
1092
1093 spin_lock(&zone->lock);
1094 isolated_pageblocks = has_isolate_pageblock(zone);
1095 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1096 if (nr_scanned)
1097 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
1098
1099 while (count) {
1100 struct page *page;
1101 struct list_head *list;
1102
1103
1104
1105
1106
1107
1108
1109
1110 do {
1111 batch_free++;
1112 if (++migratetype == MIGRATE_PCPTYPES)
1113 migratetype = 0;
1114 list = &pcp->lists[migratetype];
1115 } while (list_empty(list));
1116
1117
1118 if (batch_free == MIGRATE_PCPTYPES)
1119 batch_free = count;
1120
1121 do {
1122 int mt;
1123
1124 page = list_last_entry(list, struct page, lru);
1125
1126 list_del(&page->lru);
1127
1128 mt = get_pcppage_migratetype(page);
1129
1130 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1131
1132 if (unlikely(isolated_pageblocks))
1133 mt = get_pageblock_migratetype(page);
1134
1135 if (bulkfree_pcp_prepare(page))
1136 continue;
1137
1138 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1139 trace_mm_page_pcpu_drain(page, 0, mt);
1140 } while (--count && --batch_free && !list_empty(list));
1141 }
1142 spin_unlock(&zone->lock);
1143}
1144
1145static void free_one_page(struct zone *zone,
1146 struct page *page, unsigned long pfn,
1147 unsigned int order,
1148 int migratetype)
1149{
1150 unsigned long nr_scanned;
1151 spin_lock(&zone->lock);
1152 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1153 if (nr_scanned)
1154 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
1155
1156 if (unlikely(has_isolate_pageblock(zone) ||
1157 is_migrate_isolate(migratetype))) {
1158 migratetype = get_pfnblock_migratetype(page, pfn);
1159 }
1160 __free_one_page(page, pfn, zone, order, migratetype);
1161 spin_unlock(&zone->lock);
1162}
1163
1164static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1165 unsigned long zone, int nid)
1166{
1167 set_page_links(page, zone, nid, pfn);
1168 init_page_count(page);
1169 page_mapcount_reset(page);
1170 page_cpupid_reset_last(page);
1171
1172 INIT_LIST_HEAD(&page->lru);
1173#ifdef WANT_PAGE_VIRTUAL
1174
1175 if (!is_highmem_idx(zone))
1176 set_page_address(page, __va(pfn << PAGE_SHIFT));
1177#endif
1178}
1179
1180static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1181 int nid)
1182{
1183 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1184}
1185
1186#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1187static void init_reserved_page(unsigned long pfn)
1188{
1189 pg_data_t *pgdat;
1190 int nid, zid;
1191
1192 if (!early_page_uninitialised(pfn))
1193 return;
1194
1195 nid = early_pfn_to_nid(pfn);
1196 pgdat = NODE_DATA(nid);
1197
1198 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1199 struct zone *zone = &pgdat->node_zones[zid];
1200
1201 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1202 break;
1203 }
1204 __init_single_pfn(pfn, zid, nid);
1205}
1206#else
1207static inline void init_reserved_page(unsigned long pfn)
1208{
1209}
1210#endif
1211
1212
1213
1214
1215
1216
1217
1218void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1219{
1220 unsigned long start_pfn = PFN_DOWN(start);
1221 unsigned long end_pfn = PFN_UP(end);
1222
1223 for (; start_pfn < end_pfn; start_pfn++) {
1224 if (pfn_valid(start_pfn)) {
1225 struct page *page = pfn_to_page(start_pfn);
1226
1227 init_reserved_page(start_pfn);
1228
1229
1230 INIT_LIST_HEAD(&page->lru);
1231
1232 SetPageReserved(page);
1233 }
1234 }
1235}
1236
1237static void __free_pages_ok(struct page *page, unsigned int order)
1238{
1239 unsigned long flags;
1240 int migratetype;
1241 unsigned long pfn = page_to_pfn(page);
1242
1243 if (!free_pages_prepare(page, order, true))
1244 return;
1245
1246 migratetype = get_pfnblock_migratetype(page, pfn);
1247 local_irq_save(flags);
1248 __count_vm_events(PGFREE, 1 << order);
1249 free_one_page(page_zone(page), page, pfn, order, migratetype);
1250 local_irq_restore(flags);
1251}
1252
1253static void __init __free_pages_boot_core(struct page *page, unsigned int order)
1254{
1255 unsigned int nr_pages = 1 << order;
1256 struct page *p = page;
1257 unsigned int loop;
1258
1259 prefetchw(p);
1260 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1261 prefetchw(p + 1);
1262 __ClearPageReserved(p);
1263 set_page_count(p, 0);
1264 }
1265 __ClearPageReserved(p);
1266 set_page_count(p, 0);
1267
1268 page_zone(page)->managed_pages += nr_pages;
1269 set_page_refcounted(page);
1270 __free_pages(page, order);
1271}
1272
1273#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1274 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1275
1276static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1277
1278int __meminit early_pfn_to_nid(unsigned long pfn)
1279{
1280 static DEFINE_SPINLOCK(early_pfn_lock);
1281 int nid;
1282
1283 spin_lock(&early_pfn_lock);
1284 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1285 if (nid < 0)
1286 nid = first_online_node;
1287 spin_unlock(&early_pfn_lock);
1288
1289 return nid;
1290}
1291#endif
1292
1293#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1294static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1295 struct mminit_pfnnid_cache *state)
1296{
1297 int nid;
1298
1299 nid = __early_pfn_to_nid(pfn, state);
1300 if (nid >= 0 && nid != node)
1301 return false;
1302 return true;
1303}
1304
1305
1306static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1307{
1308 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1309}
1310
1311#else
1312
1313static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1314{
1315 return true;
1316}
1317static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1318 struct mminit_pfnnid_cache *state)
1319{
1320 return true;
1321}
1322#endif
1323
1324
1325void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
1326 unsigned int order)
1327{
1328 if (early_page_uninitialised(pfn))
1329 return;
1330 return __free_pages_boot_core(page, order);
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1351 unsigned long end_pfn, struct zone *zone)
1352{
1353 struct page *start_page;
1354 struct page *end_page;
1355
1356
1357 end_pfn--;
1358
1359 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1360 return NULL;
1361
1362 start_page = pfn_to_page(start_pfn);
1363
1364 if (page_zone(start_page) != zone)
1365 return NULL;
1366
1367 end_page = pfn_to_page(end_pfn);
1368
1369
1370 if (page_zone_id(start_page) != page_zone_id(end_page))
1371 return NULL;
1372
1373 return start_page;
1374}
1375
1376void set_zone_contiguous(struct zone *zone)
1377{
1378 unsigned long block_start_pfn = zone->zone_start_pfn;
1379 unsigned long block_end_pfn;
1380
1381 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1382 for (; block_start_pfn < zone_end_pfn(zone);
1383 block_start_pfn = block_end_pfn,
1384 block_end_pfn += pageblock_nr_pages) {
1385
1386 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1387
1388 if (!__pageblock_pfn_to_page(block_start_pfn,
1389 block_end_pfn, zone))
1390 return;
1391 }
1392
1393
1394 zone->contiguous = true;
1395}
1396
1397void clear_zone_contiguous(struct zone *zone)
1398{
1399 zone->contiguous = false;
1400}
1401
1402#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1403static void __init deferred_free_range(struct page *page,
1404 unsigned long pfn, int nr_pages)
1405{
1406 int i;
1407
1408 if (!page)
1409 return;
1410
1411
1412 if (nr_pages == pageblock_nr_pages &&
1413 (pfn & (pageblock_nr_pages - 1)) == 0) {
1414 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1415 __free_pages_boot_core(page, pageblock_order);
1416 return;
1417 }
1418
1419 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1420 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1421 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1422 __free_pages_boot_core(page, 0);
1423 }
1424}
1425
1426
1427static atomic_t pgdat_init_n_undone __initdata;
1428static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1429
1430static inline void __init pgdat_init_report_one_done(void)
1431{
1432 if (atomic_dec_and_test(&pgdat_init_n_undone))
1433 complete(&pgdat_init_all_done_comp);
1434}
1435
1436
1437static int __init deferred_init_memmap(void *data)
1438{
1439 pg_data_t *pgdat = data;
1440 int nid = pgdat->node_id;
1441 struct mminit_pfnnid_cache nid_init_state = { };
1442 unsigned long start = jiffies;
1443 unsigned long nr_pages = 0;
1444 unsigned long walk_start, walk_end;
1445 int i, zid;
1446 struct zone *zone;
1447 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
1448 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1449
1450 if (first_init_pfn == ULONG_MAX) {
1451 pgdat_init_report_one_done();
1452 return 0;
1453 }
1454
1455
1456 if (!cpumask_empty(cpumask))
1457 set_cpus_allowed_ptr(current, cpumask);
1458
1459
1460 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1461 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1462 pgdat->first_deferred_pfn = ULONG_MAX;
1463
1464
1465 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1466 zone = pgdat->node_zones + zid;
1467 if (first_init_pfn < zone_end_pfn(zone))
1468 break;
1469 }
1470
1471 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1472 unsigned long pfn, end_pfn;
1473 struct page *page = NULL;
1474 struct page *free_base_page = NULL;
1475 unsigned long free_base_pfn = 0;
1476 int nr_to_free = 0;
1477
1478 end_pfn = min(walk_end, zone_end_pfn(zone));
1479 pfn = first_init_pfn;
1480 if (pfn < walk_start)
1481 pfn = walk_start;
1482 if (pfn < zone->zone_start_pfn)
1483 pfn = zone->zone_start_pfn;
1484
1485 for (; pfn < end_pfn; pfn++) {
1486 if (!pfn_valid_within(pfn))
1487 goto free_range;
1488
1489
1490
1491
1492
1493 if ((pfn & (pageblock_nr_pages - 1)) == 0) {
1494 if (!pfn_valid(pfn)) {
1495 page = NULL;
1496 goto free_range;
1497 }
1498 }
1499
1500 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1501 page = NULL;
1502 goto free_range;
1503 }
1504
1505
1506 if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
1507 page++;
1508 } else {
1509 nr_pages += nr_to_free;
1510 deferred_free_range(free_base_page,
1511 free_base_pfn, nr_to_free);
1512 free_base_page = NULL;
1513 free_base_pfn = nr_to_free = 0;
1514
1515 page = pfn_to_page(pfn);
1516 cond_resched();
1517 }
1518
1519 if (page->flags) {
1520 VM_BUG_ON(page_zone(page) != zone);
1521 goto free_range;
1522 }
1523
1524 __init_single_page(page, pfn, zid, nid);
1525 if (!free_base_page) {
1526 free_base_page = page;
1527 free_base_pfn = pfn;
1528 nr_to_free = 0;
1529 }
1530 nr_to_free++;
1531
1532
1533 continue;
1534free_range:
1535
1536 nr_pages += nr_to_free;
1537 deferred_free_range(free_base_page, free_base_pfn,
1538 nr_to_free);
1539 free_base_page = NULL;
1540 free_base_pfn = nr_to_free = 0;
1541 }
1542
1543 nr_pages += nr_to_free;
1544 deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
1545
1546 first_init_pfn = max(end_pfn, first_init_pfn);
1547 }
1548
1549
1550 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1551
1552 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1553 jiffies_to_msecs(jiffies - start));
1554
1555 pgdat_init_report_one_done();
1556 return 0;
1557}
1558#endif
1559
1560void __init page_alloc_init_late(void)
1561{
1562 struct zone *zone;
1563
1564#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1565 int nid;
1566
1567
1568 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1569 for_each_node_state(nid, N_MEMORY) {
1570 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1571 }
1572
1573
1574 wait_for_completion(&pgdat_init_all_done_comp);
1575
1576
1577 files_maxfiles_init();
1578#endif
1579
1580 for_each_populated_zone(zone)
1581 set_zone_contiguous(zone);
1582}
1583
1584#ifdef CONFIG_CMA
1585
1586void __init init_cma_reserved_pageblock(struct page *page)
1587{
1588 unsigned i = pageblock_nr_pages;
1589 struct page *p = page;
1590
1591 do {
1592 __ClearPageReserved(p);
1593 set_page_count(p, 0);
1594 } while (++p, --i);
1595
1596 set_pageblock_migratetype(page, MIGRATE_CMA);
1597
1598 if (pageblock_order >= MAX_ORDER) {
1599 i = pageblock_nr_pages;
1600 p = page;
1601 do {
1602 set_page_refcounted(p);
1603 __free_pages(p, MAX_ORDER - 1);
1604 p += MAX_ORDER_NR_PAGES;
1605 } while (i -= MAX_ORDER_NR_PAGES);
1606 } else {
1607 set_page_refcounted(page);
1608 __free_pages(page, pageblock_order);
1609 }
1610
1611 adjust_managed_page_count(page, pageblock_nr_pages);
1612}
1613#endif
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629static inline void expand(struct zone *zone, struct page *page,
1630 int low, int high, struct free_area *area,
1631 int migratetype)
1632{
1633 unsigned long size = 1 << high;
1634
1635 while (high > low) {
1636 area--;
1637 high--;
1638 size >>= 1;
1639 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1640
1641
1642
1643
1644
1645
1646
1647 if (set_page_guard(zone, &page[size], high, migratetype))
1648 continue;
1649
1650 list_add(&page[size].lru, &area->free_list[migratetype]);
1651 area->nr_free++;
1652 set_page_order(&page[size], high);
1653 }
1654}
1655
1656static void check_new_page_bad(struct page *page)
1657{
1658 const char *bad_reason = NULL;
1659 unsigned long bad_flags = 0;
1660
1661 if (unlikely(atomic_read(&page->_mapcount) != -1))
1662 bad_reason = "nonzero mapcount";
1663 if (unlikely(page->mapping != NULL))
1664 bad_reason = "non-NULL mapping";
1665 if (unlikely(page_ref_count(page) != 0))
1666 bad_reason = "nonzero _count";
1667 if (unlikely(page->flags & __PG_HWPOISON)) {
1668 bad_reason = "HWPoisoned (hardware-corrupted)";
1669 bad_flags = __PG_HWPOISON;
1670
1671 page_mapcount_reset(page);
1672 return;
1673 }
1674 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1675 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1676 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1677 }
1678#ifdef CONFIG_MEMCG
1679 if (unlikely(page->mem_cgroup))
1680 bad_reason = "page still charged to cgroup";
1681#endif
1682 bad_page(page, bad_reason, bad_flags);
1683}
1684
1685
1686
1687
1688static inline int check_new_page(struct page *page)
1689{
1690 if (likely(page_expected_state(page,
1691 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1692 return 0;
1693
1694 check_new_page_bad(page);
1695 return 1;
1696}
1697
1698static inline bool free_pages_prezeroed(bool poisoned)
1699{
1700 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1701 page_poisoning_enabled() && poisoned;
1702}
1703
1704#ifdef CONFIG_DEBUG_VM
1705static bool check_pcp_refill(struct page *page)
1706{
1707 return false;
1708}
1709
1710static bool check_new_pcp(struct page *page)
1711{
1712 return check_new_page(page);
1713}
1714#else
1715static bool check_pcp_refill(struct page *page)
1716{
1717 return check_new_page(page);
1718}
1719static bool check_new_pcp(struct page *page)
1720{
1721 return false;
1722}
1723#endif
1724
1725static bool check_new_pages(struct page *page, unsigned int order)
1726{
1727 int i;
1728 for (i = 0; i < (1 << order); i++) {
1729 struct page *p = page + i;
1730
1731 if (unlikely(check_new_page(p)))
1732 return true;
1733 }
1734
1735 return false;
1736}
1737
1738inline void post_alloc_hook(struct page *page, unsigned int order,
1739 gfp_t gfp_flags)
1740{
1741 set_page_private(page, 0);
1742 set_page_refcounted(page);
1743
1744 arch_alloc_page(page, order);
1745 kernel_map_pages(page, 1 << order, 1);
1746 kernel_poison_pages(page, 1 << order, 1);
1747 kasan_alloc_pages(page, order);
1748 set_page_owner(page, order, gfp_flags);
1749}
1750
1751static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1752 unsigned int alloc_flags)
1753{
1754 int i;
1755 bool poisoned = true;
1756
1757 for (i = 0; i < (1 << order); i++) {
1758 struct page *p = page + i;
1759 if (poisoned)
1760 poisoned &= page_is_poisoned(p);
1761 }
1762
1763 post_alloc_hook(page, order, gfp_flags);
1764
1765 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
1766 for (i = 0; i < (1 << order); i++)
1767 clear_highpage(page + i);
1768
1769 if (order && (gfp_flags & __GFP_COMP))
1770 prep_compound_page(page, order);
1771
1772
1773
1774
1775
1776
1777
1778 if (alloc_flags & ALLOC_NO_WATERMARKS)
1779 set_page_pfmemalloc(page);
1780 else
1781 clear_page_pfmemalloc(page);
1782}
1783
1784
1785
1786
1787
1788static inline
1789struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1790 int migratetype)
1791{
1792 unsigned int current_order;
1793 struct free_area *area;
1794 struct page *page;
1795
1796
1797 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1798 area = &(zone->free_area[current_order]);
1799 page = list_first_entry_or_null(&area->free_list[migratetype],
1800 struct page, lru);
1801 if (!page)
1802 continue;
1803 list_del(&page->lru);
1804 rmv_page_order(page);
1805 area->nr_free--;
1806 expand(zone, page, order, current_order, area, migratetype);
1807 set_pcppage_migratetype(page, migratetype);
1808 return page;
1809 }
1810
1811 return NULL;
1812}
1813
1814
1815
1816
1817
1818
1819static int fallbacks[MIGRATE_TYPES][4] = {
1820 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1821 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1822 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
1823#ifdef CONFIG_CMA
1824 [MIGRATE_CMA] = { MIGRATE_TYPES },
1825#endif
1826#ifdef CONFIG_MEMORY_ISOLATION
1827 [MIGRATE_ISOLATE] = { MIGRATE_TYPES },
1828#endif
1829};
1830
1831#ifdef CONFIG_CMA
1832static struct page *__rmqueue_cma_fallback(struct zone *zone,
1833 unsigned int order)
1834{
1835 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1836}
1837#else
1838static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1839 unsigned int order) { return NULL; }
1840#endif
1841
1842
1843
1844
1845
1846
1847int move_freepages(struct zone *zone,
1848 struct page *start_page, struct page *end_page,
1849 int migratetype)
1850{
1851 struct page *page;
1852 unsigned int order;
1853 int pages_moved = 0;
1854
1855#ifndef CONFIG_HOLES_IN_ZONE
1856
1857
1858
1859
1860
1861
1862
1863 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1864#endif
1865
1866 for (page = start_page; page <= end_page;) {
1867
1868 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1869
1870 if (!pfn_valid_within(page_to_pfn(page))) {
1871 page++;
1872 continue;
1873 }
1874
1875 if (!PageBuddy(page)) {
1876 page++;
1877 continue;
1878 }
1879
1880 order = page_order(page);
1881 list_move(&page->lru,
1882 &zone->free_area[order].free_list[migratetype]);
1883 page += 1 << order;
1884 pages_moved += 1 << order;
1885 }
1886
1887 return pages_moved;
1888}
1889
1890int move_freepages_block(struct zone *zone, struct page *page,
1891 int migratetype)
1892{
1893 unsigned long start_pfn, end_pfn;
1894 struct page *start_page, *end_page;
1895
1896 start_pfn = page_to_pfn(page);
1897 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1898 start_page = pfn_to_page(start_pfn);
1899 end_page = start_page + pageblock_nr_pages - 1;
1900 end_pfn = start_pfn + pageblock_nr_pages - 1;
1901
1902
1903 if (!zone_spans_pfn(zone, start_pfn))
1904 start_page = page;
1905 if (!zone_spans_pfn(zone, end_pfn))
1906 return 0;
1907
1908 return move_freepages(zone, start_page, end_page, migratetype);
1909}
1910
1911static void change_pageblock_range(struct page *pageblock_page,
1912 int start_order, int migratetype)
1913{
1914 int nr_pageblocks = 1 << (start_order - pageblock_order);
1915
1916 while (nr_pageblocks--) {
1917 set_pageblock_migratetype(pageblock_page, migratetype);
1918 pageblock_page += pageblock_nr_pages;
1919 }
1920}
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934static bool can_steal_fallback(unsigned int order, int start_mt)
1935{
1936
1937
1938
1939
1940
1941
1942
1943 if (order >= pageblock_order)
1944 return true;
1945
1946 if (order >= pageblock_order / 2 ||
1947 start_mt == MIGRATE_RECLAIMABLE ||
1948 start_mt == MIGRATE_UNMOVABLE ||
1949 page_group_by_mobility_disabled)
1950 return true;
1951
1952 return false;
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962static void steal_suitable_fallback(struct zone *zone, struct page *page,
1963 int start_type)
1964{
1965 unsigned int current_order = page_order(page);
1966 int pages;
1967
1968
1969 if (current_order >= pageblock_order) {
1970 change_pageblock_range(page, current_order, start_type);
1971 return;
1972 }
1973
1974 pages = move_freepages_block(zone, page, start_type);
1975
1976
1977 if (pages >= (1 << (pageblock_order-1)) ||
1978 page_group_by_mobility_disabled)
1979 set_pageblock_migratetype(page, start_type);
1980}
1981
1982
1983
1984
1985
1986
1987
1988int find_suitable_fallback(struct free_area *area, unsigned int order,
1989 int migratetype, bool only_stealable, bool *can_steal)
1990{
1991 int i;
1992 int fallback_mt;
1993
1994 if (area->nr_free == 0)
1995 return -1;
1996
1997 *can_steal = false;
1998 for (i = 0;; i++) {
1999 fallback_mt = fallbacks[migratetype][i];
2000 if (fallback_mt == MIGRATE_TYPES)
2001 break;
2002
2003 if (list_empty(&area->free_list[fallback_mt]))
2004 continue;
2005
2006 if (can_steal_fallback(order, migratetype))
2007 *can_steal = true;
2008
2009 if (!only_stealable)
2010 return fallback_mt;
2011
2012 if (*can_steal)
2013 return fallback_mt;
2014 }
2015
2016 return -1;
2017}
2018
2019
2020
2021
2022
2023static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2024 unsigned int alloc_order)
2025{
2026 int mt;
2027 unsigned long max_managed, flags;
2028
2029
2030
2031
2032
2033 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2034 if (zone->nr_reserved_highatomic >= max_managed)
2035 return;
2036
2037 spin_lock_irqsave(&zone->lock, flags);
2038
2039
2040 if (zone->nr_reserved_highatomic >= max_managed)
2041 goto out_unlock;
2042
2043
2044 mt = get_pageblock_migratetype(page);
2045 if (mt != MIGRATE_HIGHATOMIC &&
2046 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
2047 zone->nr_reserved_highatomic += pageblock_nr_pages;
2048 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2049 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
2050 }
2051
2052out_unlock:
2053 spin_unlock_irqrestore(&zone->lock, flags);
2054}
2055
2056
2057
2058
2059
2060
2061
2062static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
2063{
2064 struct zonelist *zonelist = ac->zonelist;
2065 unsigned long flags;
2066 struct zoneref *z;
2067 struct zone *zone;
2068 struct page *page;
2069 int order;
2070
2071 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2072 ac->nodemask) {
2073
2074 if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
2075 continue;
2076
2077 spin_lock_irqsave(&zone->lock, flags);
2078 for (order = 0; order < MAX_ORDER; order++) {
2079 struct free_area *area = &(zone->free_area[order]);
2080
2081 page = list_first_entry_or_null(
2082 &area->free_list[MIGRATE_HIGHATOMIC],
2083 struct page, lru);
2084 if (!page)
2085 continue;
2086
2087
2088
2089
2090
2091
2092
2093 zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
2094 zone->nr_reserved_highatomic);
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105 set_pageblock_migratetype(page, ac->migratetype);
2106 move_freepages_block(zone, page, ac->migratetype);
2107 spin_unlock_irqrestore(&zone->lock, flags);
2108 return;
2109 }
2110 spin_unlock_irqrestore(&zone->lock, flags);
2111 }
2112}
2113
2114
2115static inline struct page *
2116__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
2117{
2118 struct free_area *area;
2119 unsigned int current_order;
2120 struct page *page;
2121 int fallback_mt;
2122 bool can_steal;
2123
2124
2125 for (current_order = MAX_ORDER-1;
2126 current_order >= order && current_order <= MAX_ORDER-1;
2127 --current_order) {
2128 area = &(zone->free_area[current_order]);
2129 fallback_mt = find_suitable_fallback(area, current_order,
2130 start_migratetype, false, &can_steal);
2131 if (fallback_mt == -1)
2132 continue;
2133
2134 page = list_first_entry(&area->free_list[fallback_mt],
2135 struct page, lru);
2136 if (can_steal)
2137 steal_suitable_fallback(zone, page, start_migratetype);
2138
2139
2140 area->nr_free--;
2141 list_del(&page->lru);
2142 rmv_page_order(page);
2143
2144 expand(zone, page, order, current_order, area,
2145 start_migratetype);
2146
2147
2148
2149
2150
2151
2152
2153 set_pcppage_migratetype(page, start_migratetype);
2154
2155 trace_mm_page_alloc_extfrag(page, order, current_order,
2156 start_migratetype, fallback_mt);
2157
2158 return page;
2159 }
2160
2161 return NULL;
2162}
2163
2164
2165
2166
2167
2168static struct page *__rmqueue(struct zone *zone, unsigned int order,
2169 int migratetype)
2170{
2171 struct page *page;
2172
2173 page = __rmqueue_smallest(zone, order, migratetype);
2174 if (unlikely(!page)) {
2175 if (migratetype == MIGRATE_MOVABLE)
2176 page = __rmqueue_cma_fallback(zone, order);
2177
2178 if (!page)
2179 page = __rmqueue_fallback(zone, order, migratetype);
2180 }
2181
2182 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2183 return page;
2184}
2185
2186
2187
2188
2189
2190
2191static int rmqueue_bulk(struct zone *zone, unsigned int order,
2192 unsigned long count, struct list_head *list,
2193 int migratetype, bool cold)
2194{
2195 int i;
2196
2197 spin_lock(&zone->lock);
2198 for (i = 0; i < count; ++i) {
2199 struct page *page = __rmqueue(zone, order, migratetype);
2200 if (unlikely(page == NULL))
2201 break;
2202
2203 if (unlikely(check_pcp_refill(page)))
2204 continue;
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215 if (likely(!cold))
2216 list_add(&page->lru, list);
2217 else
2218 list_add_tail(&page->lru, list);
2219 list = &page->lru;
2220 if (is_migrate_cma(get_pcppage_migratetype(page)))
2221 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2222 -(1 << order));
2223 }
2224 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2225 spin_unlock(&zone->lock);
2226 return i;
2227}
2228
2229#ifdef CONFIG_NUMA
2230
2231
2232
2233
2234
2235
2236
2237
2238void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2239{
2240 unsigned long flags;
2241 int to_drain, batch;
2242
2243 local_irq_save(flags);
2244 batch = READ_ONCE(pcp->batch);
2245 to_drain = min(pcp->count, batch);
2246 if (to_drain > 0) {
2247 free_pcppages_bulk(zone, to_drain, pcp);
2248 pcp->count -= to_drain;
2249 }
2250 local_irq_restore(flags);
2251}
2252#endif
2253
2254
2255
2256
2257
2258
2259
2260
2261static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2262{
2263 unsigned long flags;
2264 struct per_cpu_pageset *pset;
2265 struct per_cpu_pages *pcp;
2266
2267 local_irq_save(flags);
2268 pset = per_cpu_ptr(zone->pageset, cpu);
2269
2270 pcp = &pset->pcp;
2271 if (pcp->count) {
2272 free_pcppages_bulk(zone, pcp->count, pcp);
2273 pcp->count = 0;
2274 }
2275 local_irq_restore(flags);
2276}
2277
2278
2279
2280
2281
2282
2283
2284
2285static void drain_pages(unsigned int cpu)
2286{
2287 struct zone *zone;
2288
2289 for_each_populated_zone(zone) {
2290 drain_pages_zone(cpu, zone);
2291 }
2292}
2293
2294
2295
2296
2297
2298
2299
2300void drain_local_pages(struct zone *zone)
2301{
2302 int cpu = smp_processor_id();
2303
2304 if (zone)
2305 drain_pages_zone(cpu, zone);
2306 else
2307 drain_pages(cpu);
2308}
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321void drain_all_pages(struct zone *zone)
2322{
2323 int cpu;
2324
2325
2326
2327
2328
2329 static cpumask_t cpus_with_pcps;
2330
2331
2332
2333
2334
2335
2336
2337 for_each_online_cpu(cpu) {
2338 struct per_cpu_pageset *pcp;
2339 struct zone *z;
2340 bool has_pcps = false;
2341
2342 if (zone) {
2343 pcp = per_cpu_ptr(zone->pageset, cpu);
2344 if (pcp->pcp.count)
2345 has_pcps = true;
2346 } else {
2347 for_each_populated_zone(z) {
2348 pcp = per_cpu_ptr(z->pageset, cpu);
2349 if (pcp->pcp.count) {
2350 has_pcps = true;
2351 break;
2352 }
2353 }
2354 }
2355
2356 if (has_pcps)
2357 cpumask_set_cpu(cpu, &cpus_with_pcps);
2358 else
2359 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2360 }
2361 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
2362 zone, 1);
2363}
2364
2365#ifdef CONFIG_HIBERNATION
2366
2367void mark_free_pages(struct zone *zone)
2368{
2369 unsigned long pfn, max_zone_pfn;
2370 unsigned long flags;
2371 unsigned int order, t;
2372 struct page *page;
2373
2374 if (zone_is_empty(zone))
2375 return;
2376
2377 spin_lock_irqsave(&zone->lock, flags);
2378
2379 max_zone_pfn = zone_end_pfn(zone);
2380 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2381 if (pfn_valid(pfn)) {
2382 page = pfn_to_page(pfn);
2383
2384 if (page_zone(page) != zone)
2385 continue;
2386
2387 if (!swsusp_page_is_forbidden(page))
2388 swsusp_unset_page_free(page);
2389 }
2390
2391 for_each_migratetype_order(order, t) {
2392 list_for_each_entry(page,
2393 &zone->free_area[order].free_list[t], lru) {
2394 unsigned long i;
2395
2396 pfn = page_to_pfn(page);
2397 for (i = 0; i < (1UL << order); i++)
2398 swsusp_set_page_free(pfn_to_page(pfn + i));
2399 }
2400 }
2401 spin_unlock_irqrestore(&zone->lock, flags);
2402}
2403#endif
2404
2405
2406
2407
2408
2409void free_hot_cold_page(struct page *page, bool cold)
2410{
2411 struct zone *zone = page_zone(page);
2412 struct per_cpu_pages *pcp;
2413 unsigned long flags;
2414 unsigned long pfn = page_to_pfn(page);
2415 int migratetype;
2416
2417 if (!free_pcp_prepare(page))
2418 return;
2419
2420 migratetype = get_pfnblock_migratetype(page, pfn);
2421 set_pcppage_migratetype(page, migratetype);
2422 local_irq_save(flags);
2423 __count_vm_event(PGFREE);
2424
2425
2426
2427
2428
2429
2430
2431
2432 if (migratetype >= MIGRATE_PCPTYPES) {
2433 if (unlikely(is_migrate_isolate(migratetype))) {
2434 free_one_page(zone, page, pfn, 0, migratetype);
2435 goto out;
2436 }
2437 migratetype = MIGRATE_MOVABLE;
2438 }
2439
2440 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2441 if (!cold)
2442 list_add(&page->lru, &pcp->lists[migratetype]);
2443 else
2444 list_add_tail(&page->lru, &pcp->lists[migratetype]);
2445 pcp->count++;
2446 if (pcp->count >= pcp->high) {
2447 unsigned long batch = READ_ONCE(pcp->batch);
2448 free_pcppages_bulk(zone, batch, pcp);
2449 pcp->count -= batch;
2450 }
2451
2452out:
2453 local_irq_restore(flags);
2454}
2455
2456
2457
2458
2459void free_hot_cold_page_list(struct list_head *list, bool cold)
2460{
2461 struct page *page, *next;
2462
2463 list_for_each_entry_safe(page, next, list, lru) {
2464 trace_mm_page_free_batched(page, cold);
2465 free_hot_cold_page(page, cold);
2466 }
2467}
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477void split_page(struct page *page, unsigned int order)
2478{
2479 int i;
2480
2481 VM_BUG_ON_PAGE(PageCompound(page), page);
2482 VM_BUG_ON_PAGE(!page_count(page), page);
2483
2484#ifdef CONFIG_KMEMCHECK
2485
2486
2487
2488
2489 if (kmemcheck_page_is_tracked(page))
2490 split_page(virt_to_page(page[0].shadow), order);
2491#endif
2492
2493 for (i = 1; i < (1 << order); i++)
2494 set_page_refcounted(page + i);
2495 split_page_owner(page, order);
2496}
2497EXPORT_SYMBOL_GPL(split_page);
2498
2499int __isolate_free_page(struct page *page, unsigned int order)
2500{
2501 unsigned long watermark;
2502 struct zone *zone;
2503 int mt;
2504
2505 BUG_ON(!PageBuddy(page));
2506
2507 zone = page_zone(page);
2508 mt = get_pageblock_migratetype(page);
2509
2510 if (!is_migrate_isolate(mt)) {
2511
2512
2513
2514
2515
2516
2517 watermark = min_wmark_pages(zone) + (1UL << order);
2518 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2519 return 0;
2520
2521 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2522 }
2523
2524
2525 list_del(&page->lru);
2526 zone->free_area[order].nr_free--;
2527 rmv_page_order(page);
2528
2529
2530
2531
2532
2533 if (order >= pageblock_order - 1) {
2534 struct page *endpage = page + (1 << order) - 1;
2535 for (; page < endpage; page += pageblock_nr_pages) {
2536 int mt = get_pageblock_migratetype(page);
2537 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
2538 set_pageblock_migratetype(page,
2539 MIGRATE_MOVABLE);
2540 }
2541 }
2542
2543
2544 return 1UL << order;
2545}
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2557 gfp_t flags)
2558{
2559#ifdef CONFIG_NUMA
2560 int local_nid = numa_node_id();
2561 enum zone_stat_item local_stat = NUMA_LOCAL;
2562
2563 if (unlikely(flags & __GFP_OTHER_NODE)) {
2564 local_stat = NUMA_OTHER;
2565 local_nid = preferred_zone->node;
2566 }
2567
2568 if (z->node == local_nid) {
2569 __inc_zone_state(z, NUMA_HIT);
2570 __inc_zone_state(z, local_stat);
2571 } else {
2572 __inc_zone_state(z, NUMA_MISS);
2573 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2574 }
2575#endif
2576}
2577
2578
2579
2580
2581static inline
2582struct page *buffered_rmqueue(struct zone *preferred_zone,
2583 struct zone *zone, unsigned int order,
2584 gfp_t gfp_flags, unsigned int alloc_flags,
2585 int migratetype)
2586{
2587 unsigned long flags;
2588 struct page *page;
2589 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2590
2591 if (likely(order == 0)) {
2592 struct per_cpu_pages *pcp;
2593 struct list_head *list;
2594
2595 local_irq_save(flags);
2596 do {
2597 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2598 list = &pcp->lists[migratetype];
2599 if (list_empty(list)) {
2600 pcp->count += rmqueue_bulk(zone, 0,
2601 pcp->batch, list,
2602 migratetype, cold);
2603 if (unlikely(list_empty(list)))
2604 goto failed;
2605 }
2606
2607 if (cold)
2608 page = list_last_entry(list, struct page, lru);
2609 else
2610 page = list_first_entry(list, struct page, lru);
2611
2612 list_del(&page->lru);
2613 pcp->count--;
2614
2615 } while (check_new_pcp(page));
2616 } else {
2617
2618
2619
2620
2621 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2622 spin_lock_irqsave(&zone->lock, flags);
2623
2624 do {
2625 page = NULL;
2626 if (alloc_flags & ALLOC_HARDER) {
2627 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2628 if (page)
2629 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2630 }
2631 if (!page)
2632 page = __rmqueue(zone, order, migratetype);
2633 } while (page && check_new_pages(page, order));
2634 spin_unlock(&zone->lock);
2635 if (!page)
2636 goto failed;
2637 __mod_zone_freepage_state(zone, -(1 << order),
2638 get_pcppage_migratetype(page));
2639 }
2640
2641 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2642 zone_statistics(preferred_zone, zone, gfp_flags);
2643 local_irq_restore(flags);
2644
2645 VM_BUG_ON_PAGE(bad_range(zone, page), page);
2646 return page;
2647
2648failed:
2649 local_irq_restore(flags);
2650 return NULL;
2651}
2652
2653#ifdef CONFIG_FAIL_PAGE_ALLOC
2654
2655static struct {
2656 struct fault_attr attr;
2657
2658 bool ignore_gfp_highmem;
2659 bool ignore_gfp_reclaim;
2660 u32 min_order;
2661} fail_page_alloc = {
2662 .attr = FAULT_ATTR_INITIALIZER,
2663 .ignore_gfp_reclaim = true,
2664 .ignore_gfp_highmem = true,
2665 .min_order = 1,
2666};
2667
2668static int __init setup_fail_page_alloc(char *str)
2669{
2670 return setup_fault_attr(&fail_page_alloc.attr, str);
2671}
2672__setup("fail_page_alloc=", setup_fail_page_alloc);
2673
2674static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2675{
2676 if (order < fail_page_alloc.min_order)
2677 return false;
2678 if (gfp_mask & __GFP_NOFAIL)
2679 return false;
2680 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
2681 return false;
2682 if (fail_page_alloc.ignore_gfp_reclaim &&
2683 (gfp_mask & __GFP_DIRECT_RECLAIM))
2684 return false;
2685
2686 return should_fail(&fail_page_alloc.attr, 1 << order);
2687}
2688
2689#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2690
2691static int __init fail_page_alloc_debugfs(void)
2692{
2693 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
2694 struct dentry *dir;
2695
2696 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2697 &fail_page_alloc.attr);
2698 if (IS_ERR(dir))
2699 return PTR_ERR(dir);
2700
2701 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
2702 &fail_page_alloc.ignore_gfp_reclaim))
2703 goto fail;
2704 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2705 &fail_page_alloc.ignore_gfp_highmem))
2706 goto fail;
2707 if (!debugfs_create_u32("min-order", mode, dir,
2708 &fail_page_alloc.min_order))
2709 goto fail;
2710
2711 return 0;
2712fail:
2713 debugfs_remove_recursive(dir);
2714
2715 return -ENOMEM;
2716}
2717
2718late_initcall(fail_page_alloc_debugfs);
2719
2720#endif
2721
2722#else
2723
2724static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2725{
2726 return false;
2727}
2728
2729#endif
2730
2731
2732
2733
2734
2735
2736
2737bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2738 int classzone_idx, unsigned int alloc_flags,
2739 long free_pages)
2740{
2741 long min = mark;
2742 int o;
2743 const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
2744
2745
2746 free_pages -= (1 << order) - 1;
2747
2748 if (alloc_flags & ALLOC_HIGH)
2749 min -= min / 2;
2750
2751
2752
2753
2754
2755
2756 if (likely(!alloc_harder))
2757 free_pages -= z->nr_reserved_highatomic;
2758 else
2759 min -= min / 4;
2760
2761#ifdef CONFIG_CMA
2762
2763 if (!(alloc_flags & ALLOC_CMA))
2764 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
2765#endif
2766
2767
2768
2769
2770
2771
2772 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
2773 return false;
2774
2775
2776 if (!order)
2777 return true;
2778
2779
2780 for (o = order; o < MAX_ORDER; o++) {
2781 struct free_area *area = &z->free_area[o];
2782 int mt;
2783
2784 if (!area->nr_free)
2785 continue;
2786
2787 if (alloc_harder)
2788 return true;
2789
2790 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2791 if (!list_empty(&area->free_list[mt]))
2792 return true;
2793 }
2794
2795#ifdef CONFIG_CMA
2796 if ((alloc_flags & ALLOC_CMA) &&
2797 !list_empty(&area->free_list[MIGRATE_CMA])) {
2798 return true;
2799 }
2800#endif
2801 }
2802 return false;
2803}
2804
2805bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2806 int classzone_idx, unsigned int alloc_flags)
2807{
2808 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2809 zone_page_state(z, NR_FREE_PAGES));
2810}
2811
2812static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2813 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
2814{
2815 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2816 long cma_pages = 0;
2817
2818#ifdef CONFIG_CMA
2819
2820 if (!(alloc_flags & ALLOC_CMA))
2821 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
2822#endif
2823
2824
2825
2826
2827
2828
2829
2830
2831 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
2832 return true;
2833
2834 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2835 free_pages);
2836}
2837
2838bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
2839 unsigned long mark, int classzone_idx)
2840{
2841 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2842
2843 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2844 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2845
2846 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
2847 free_pages);
2848}
2849
2850#ifdef CONFIG_NUMA
2851static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2852{
2853 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
2854 RECLAIM_DISTANCE;
2855}
2856#else
2857static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2858{
2859 return true;
2860}
2861#endif
2862
2863
2864
2865
2866
2867static struct page *
2868get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2869 const struct alloc_context *ac)
2870{
2871 struct zoneref *z = ac->preferred_zoneref;
2872 struct zone *zone;
2873 struct pglist_data *last_pgdat_dirty_limit = NULL;
2874
2875
2876
2877
2878
2879 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
2880 ac->nodemask) {
2881 struct page *page;
2882 unsigned long mark;
2883
2884 if (cpusets_enabled() &&
2885 (alloc_flags & ALLOC_CPUSET) &&
2886 !__cpuset_zone_allowed(zone, gfp_mask))
2887 continue;
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907 if (ac->spread_dirty_pages) {
2908 if (last_pgdat_dirty_limit == zone->zone_pgdat)
2909 continue;
2910
2911 if (!node_dirty_ok(zone->zone_pgdat)) {
2912 last_pgdat_dirty_limit = zone->zone_pgdat;
2913 continue;
2914 }
2915 }
2916
2917 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2918 if (!zone_watermark_fast(zone, order, mark,
2919 ac_classzone_idx(ac), alloc_flags)) {
2920 int ret;
2921
2922
2923 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2924 if (alloc_flags & ALLOC_NO_WATERMARKS)
2925 goto try_this_zone;
2926
2927 if (node_reclaim_mode == 0 ||
2928 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
2929 continue;
2930
2931 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
2932 switch (ret) {
2933 case NODE_RECLAIM_NOSCAN:
2934
2935 continue;
2936 case NODE_RECLAIM_FULL:
2937
2938 continue;
2939 default:
2940
2941 if (zone_watermark_ok(zone, order, mark,
2942 ac_classzone_idx(ac), alloc_flags))
2943 goto try_this_zone;
2944
2945 continue;
2946 }
2947 }
2948
2949try_this_zone:
2950 page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order,
2951 gfp_mask, alloc_flags, ac->migratetype);
2952 if (page) {
2953 prep_new_page(page, order, gfp_mask, alloc_flags);
2954
2955
2956
2957
2958
2959 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
2960 reserve_highatomic_pageblock(page, zone, order);
2961
2962 return page;
2963 }
2964 }
2965
2966 return NULL;
2967}
2968
2969
2970
2971
2972
2973static inline bool should_suppress_show_mem(void)
2974{
2975 bool ret = false;
2976
2977#if NODES_SHIFT > 8
2978 ret = in_interrupt();
2979#endif
2980 return ret;
2981}
2982
2983static DEFINE_RATELIMIT_STATE(nopage_rs,
2984 DEFAULT_RATELIMIT_INTERVAL,
2985 DEFAULT_RATELIMIT_BURST);
2986
2987void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
2988{
2989 unsigned int filter = SHOW_MEM_FILTER_NODES;
2990 struct va_format vaf;
2991 va_list args;
2992
2993 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2994 debug_guardpage_minorder() > 0)
2995 return;
2996
2997
2998
2999
3000
3001
3002 if (!(gfp_mask & __GFP_NOMEMALLOC))
3003 if (test_thread_flag(TIF_MEMDIE) ||
3004 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3005 filter &= ~SHOW_MEM_FILTER_NODES;
3006 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3007 filter &= ~SHOW_MEM_FILTER_NODES;
3008
3009 pr_warn("%s: ", current->comm);
3010
3011 va_start(args, fmt);
3012 vaf.fmt = fmt;
3013 vaf.va = &args;
3014 pr_cont("%pV", &vaf);
3015 va_end(args);
3016
3017 pr_cont(", mode:%#x(%pGg)\n", gfp_mask, &gfp_mask);
3018
3019 dump_stack();
3020 if (!should_suppress_show_mem())
3021 show_mem(filter);
3022}
3023
3024static inline struct page *
3025__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3026 const struct alloc_context *ac, unsigned long *did_some_progress)
3027{
3028 struct oom_control oc = {
3029 .zonelist = ac->zonelist,
3030 .nodemask = ac->nodemask,
3031 .memcg = NULL,
3032 .gfp_mask = gfp_mask,
3033 .order = order,
3034 };
3035 struct page *page;
3036
3037 *did_some_progress = 0;
3038
3039
3040
3041
3042
3043 if (!mutex_trylock(&oom_lock)) {
3044 *did_some_progress = 1;
3045 schedule_timeout_uninterruptible(1);
3046 return NULL;
3047 }
3048
3049
3050
3051
3052
3053
3054 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
3055 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3056 if (page)
3057 goto out;
3058
3059 if (!(gfp_mask & __GFP_NOFAIL)) {
3060
3061 if (current->flags & PF_DUMPCORE)
3062 goto out;
3063
3064 if (order > PAGE_ALLOC_COSTLY_ORDER)
3065 goto out;
3066
3067 if (ac->high_zoneidx < ZONE_NORMAL)
3068 goto out;
3069 if (pm_suspended_storage())
3070 goto out;
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082 if (gfp_mask & __GFP_THISNODE)
3083 goto out;
3084 }
3085
3086 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3087 *did_some_progress = 1;
3088
3089 if (gfp_mask & __GFP_NOFAIL) {
3090 page = get_page_from_freelist(gfp_mask, order,
3091 ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
3092
3093
3094
3095
3096 if (!page)
3097 page = get_page_from_freelist(gfp_mask, order,
3098 ALLOC_NO_WATERMARKS, ac);
3099 }
3100 }
3101out:
3102 mutex_unlock(&oom_lock);
3103 return page;
3104}
3105
3106
3107
3108
3109
3110#define MAX_COMPACT_RETRIES 16
3111
3112#ifdef CONFIG_COMPACTION
3113
3114static struct page *
3115__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3116 unsigned int alloc_flags, const struct alloc_context *ac,
3117 enum compact_priority prio, enum compact_result *compact_result)
3118{
3119 struct page *page;
3120
3121 if (!order)
3122 return NULL;
3123
3124 current->flags |= PF_MEMALLOC;
3125 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3126 prio);
3127 current->flags &= ~PF_MEMALLOC;
3128
3129 if (*compact_result <= COMPACT_INACTIVE)
3130 return NULL;
3131
3132
3133
3134
3135
3136 count_vm_event(COMPACTSTALL);
3137
3138 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3139
3140 if (page) {
3141 struct zone *zone = page_zone(page);
3142
3143 zone->compact_blockskip_flush = false;
3144 compaction_defer_reset(zone, order, true);
3145 count_vm_event(COMPACTSUCCESS);
3146 return page;
3147 }
3148
3149
3150
3151
3152
3153 count_vm_event(COMPACTFAIL);
3154
3155 cond_resched();
3156
3157 return NULL;
3158}
3159
3160static inline bool
3161should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3162 enum compact_result compact_result,
3163 enum compact_priority *compact_priority,
3164 int *compaction_retries)
3165{
3166 int max_retries = MAX_COMPACT_RETRIES;
3167 int min_priority;
3168
3169 if (!order)
3170 return false;
3171
3172 if (compaction_made_progress(compact_result))
3173 (*compaction_retries)++;
3174
3175
3176
3177
3178
3179
3180 if (compaction_failed(compact_result))
3181 goto check_priority;
3182
3183
3184
3185
3186
3187
3188
3189 if (compaction_withdrawn(compact_result))
3190 return compaction_zonelist_suitable(ac, order, alloc_flags);
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200 if (order > PAGE_ALLOC_COSTLY_ORDER)
3201 max_retries /= 4;
3202 if (*compaction_retries <= max_retries)
3203 return true;
3204
3205
3206
3207
3208
3209check_priority:
3210 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3211 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3212 if (*compact_priority > min_priority) {
3213 (*compact_priority)--;
3214 *compaction_retries = 0;
3215 return true;
3216 }
3217 return false;
3218}
3219#else
3220static inline struct page *
3221__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3222 unsigned int alloc_flags, const struct alloc_context *ac,
3223 enum compact_priority prio, enum compact_result *compact_result)
3224{
3225 *compact_result = COMPACT_SKIPPED;
3226 return NULL;
3227}
3228
3229static inline bool
3230should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3231 enum compact_result compact_result,
3232 enum compact_priority *compact_priority,
3233 int *compaction_retries)
3234{
3235 struct zone *zone;
3236 struct zoneref *z;
3237
3238 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3239 return false;
3240
3241
3242
3243
3244
3245
3246
3247 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3248 ac->nodemask) {
3249 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3250 ac_classzone_idx(ac), alloc_flags))
3251 return true;
3252 }
3253 return false;
3254}
3255#endif
3256
3257
3258static int
3259__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3260 const struct alloc_context *ac)
3261{
3262 struct reclaim_state reclaim_state;
3263 int progress;
3264
3265 cond_resched();
3266
3267
3268 cpuset_memory_pressure_bump();
3269 current->flags |= PF_MEMALLOC;
3270 lockdep_set_current_reclaim_state(gfp_mask);
3271 reclaim_state.reclaimed_slab = 0;
3272 current->reclaim_state = &reclaim_state;
3273
3274 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3275 ac->nodemask);
3276
3277 current->reclaim_state = NULL;
3278 lockdep_clear_current_reclaim_state();
3279 current->flags &= ~PF_MEMALLOC;
3280
3281 cond_resched();
3282
3283 return progress;
3284}
3285
3286
3287static inline struct page *
3288__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3289 unsigned int alloc_flags, const struct alloc_context *ac,
3290 unsigned long *did_some_progress)
3291{
3292 struct page *page = NULL;
3293 bool drained = false;
3294
3295 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3296 if (unlikely(!(*did_some_progress)))
3297 return NULL;
3298
3299retry:
3300 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3301
3302
3303
3304
3305
3306
3307 if (!page && !drained) {
3308 unreserve_highatomic_pageblock(ac);
3309 drain_all_pages(NULL);
3310 drained = true;
3311 goto retry;
3312 }
3313
3314 return page;
3315}
3316
3317static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3318{
3319 struct zoneref *z;
3320 struct zone *zone;
3321 pg_data_t *last_pgdat = NULL;
3322
3323 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3324 ac->high_zoneidx, ac->nodemask) {
3325 if (last_pgdat != zone->zone_pgdat)
3326 wakeup_kswapd(zone, order, ac->high_zoneidx);
3327 last_pgdat = zone->zone_pgdat;
3328 }
3329}
3330
3331static inline unsigned int
3332gfp_to_alloc_flags(gfp_t gfp_mask)
3333{
3334 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3335
3336
3337 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
3338
3339
3340
3341
3342
3343
3344
3345 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
3346
3347 if (gfp_mask & __GFP_ATOMIC) {
3348
3349
3350
3351
3352 if (!(gfp_mask & __GFP_NOMEMALLOC))
3353 alloc_flags |= ALLOC_HARDER;
3354
3355
3356
3357
3358 alloc_flags &= ~ALLOC_CPUSET;
3359 } else if (unlikely(rt_task(current)) && !in_interrupt())
3360 alloc_flags |= ALLOC_HARDER;
3361
3362#ifdef CONFIG_CMA
3363 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3364 alloc_flags |= ALLOC_CMA;
3365#endif
3366 return alloc_flags;
3367}
3368
3369bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3370{
3371 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3372 return false;
3373
3374 if (gfp_mask & __GFP_MEMALLOC)
3375 return true;
3376 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3377 return true;
3378 if (!in_interrupt() &&
3379 ((current->flags & PF_MEMALLOC) ||
3380 unlikely(test_thread_flag(TIF_MEMDIE))))
3381 return true;
3382
3383 return false;
3384}
3385
3386
3387
3388
3389
3390#define MAX_RECLAIM_RETRIES 16
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403static inline bool
3404should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3405 struct alloc_context *ac, int alloc_flags,
3406 bool did_some_progress, int *no_progress_loops)
3407{
3408 struct zone *zone;
3409 struct zoneref *z;
3410
3411
3412
3413
3414
3415
3416 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3417 *no_progress_loops = 0;
3418 else
3419 (*no_progress_loops)++;
3420
3421
3422
3423
3424
3425 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
3426 return false;
3427
3428
3429
3430
3431
3432
3433
3434 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3435 ac->nodemask) {
3436 unsigned long available;
3437 unsigned long reclaimable;
3438
3439 available = reclaimable = zone_reclaimable_pages(zone);
3440 available -= DIV_ROUND_UP((*no_progress_loops) * available,
3441 MAX_RECLAIM_RETRIES);
3442 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3443
3444
3445
3446
3447
3448 if (__zone_watermark_ok(zone, order, min_wmark_pages(zone),
3449 ac_classzone_idx(ac), alloc_flags, available)) {
3450
3451
3452
3453
3454
3455
3456 if (!did_some_progress) {
3457 unsigned long write_pending;
3458
3459 write_pending = zone_page_state_snapshot(zone,
3460 NR_ZONE_WRITE_PENDING);
3461
3462 if (2 * write_pending > reclaimable) {
3463 congestion_wait(BLK_RW_ASYNC, HZ/10);
3464 return true;
3465 }
3466 }
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477 if (current->flags & PF_WQ_WORKER)
3478 schedule_timeout_uninterruptible(1);
3479 else
3480 cond_resched();
3481
3482 return true;
3483 }
3484 }
3485
3486 return false;
3487}
3488
3489static inline struct page *
3490__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3491 struct alloc_context *ac)
3492{
3493 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3494 struct page *page = NULL;
3495 unsigned int alloc_flags;
3496 unsigned long did_some_progress;
3497 enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
3498 enum compact_result compact_result;
3499 int compaction_retries = 0;
3500 int no_progress_loops = 0;
3501 unsigned long alloc_start = jiffies;
3502 unsigned int stall_timeout = 10 * HZ;
3503
3504
3505
3506
3507
3508
3509
3510 if (order >= MAX_ORDER) {
3511 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
3512 return NULL;
3513 }
3514
3515
3516
3517
3518
3519 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3520 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3521 gfp_mask &= ~__GFP_ATOMIC;
3522
3523
3524
3525
3526
3527
3528 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3529
3530 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3531 wake_all_kswapds(order, ac);
3532
3533
3534
3535
3536
3537 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3538 if (page)
3539 goto got_pg;
3540
3541
3542
3543
3544
3545
3546
3547 if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER &&
3548 !gfp_pfmemalloc_allowed(gfp_mask)) {
3549 page = __alloc_pages_direct_compact(gfp_mask, order,
3550 alloc_flags, ac,
3551 INIT_COMPACT_PRIORITY,
3552 &compact_result);
3553 if (page)
3554 goto got_pg;
3555
3556
3557
3558
3559
3560 if (gfp_mask & __GFP_NORETRY) {
3561
3562
3563
3564
3565
3566
3567
3568
3569 if (compact_result == COMPACT_DEFERRED)
3570 goto nopage;
3571
3572
3573
3574
3575
3576
3577 compact_priority = INIT_COMPACT_PRIORITY;
3578 }
3579 }
3580
3581retry:
3582
3583 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3584 wake_all_kswapds(order, ac);
3585
3586 if (gfp_pfmemalloc_allowed(gfp_mask))
3587 alloc_flags = ALLOC_NO_WATERMARKS;
3588
3589
3590
3591
3592
3593
3594 if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) {
3595 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3596 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3597 ac->high_zoneidx, ac->nodemask);
3598 }
3599
3600
3601 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3602 if (page)
3603 goto got_pg;
3604
3605
3606 if (!can_direct_reclaim) {
3607
3608
3609
3610
3611
3612 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
3613 goto nopage;
3614 }
3615
3616
3617 if (current->flags & PF_MEMALLOC) {
3618
3619
3620
3621
3622
3623 if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3624 cond_resched();
3625 goto retry;
3626 }
3627 goto nopage;
3628 }
3629
3630
3631 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
3632 goto nopage;
3633
3634
3635
3636 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3637 &did_some_progress);
3638 if (page)
3639 goto got_pg;
3640
3641
3642 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3643 compact_priority, &compact_result);
3644 if (page)
3645 goto got_pg;
3646
3647
3648 if (gfp_mask & __GFP_NORETRY)
3649 goto nopage;
3650
3651
3652
3653
3654
3655 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
3656 goto nopage;
3657
3658
3659 if (time_after(jiffies, alloc_start + stall_timeout)) {
3660 warn_alloc(gfp_mask,
3661 "page allocation stalls for %ums, order:%u",
3662 jiffies_to_msecs(jiffies-alloc_start), order);
3663 stall_timeout += 10 * HZ;
3664 }
3665
3666 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
3667 did_some_progress > 0, &no_progress_loops))
3668 goto retry;
3669
3670
3671
3672
3673
3674
3675
3676 if (did_some_progress > 0 &&
3677 should_compact_retry(ac, order, alloc_flags,
3678 compact_result, &compact_priority,
3679 &compaction_retries))
3680 goto retry;
3681
3682
3683 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3684 if (page)
3685 goto got_pg;
3686
3687
3688 if (did_some_progress) {
3689 no_progress_loops = 0;
3690 goto retry;
3691 }
3692
3693nopage:
3694 warn_alloc(gfp_mask,
3695 "page allocation failure: order:%u", order);
3696got_pg:
3697 return page;
3698}
3699
3700
3701
3702
3703struct page *
3704__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3705 struct zonelist *zonelist, nodemask_t *nodemask)
3706{
3707 struct page *page;
3708 unsigned int cpuset_mems_cookie;
3709 unsigned int alloc_flags = ALLOC_WMARK_LOW;
3710 gfp_t alloc_mask = gfp_mask;
3711 struct alloc_context ac = {
3712 .high_zoneidx = gfp_zone(gfp_mask),
3713 .zonelist = zonelist,
3714 .nodemask = nodemask,
3715 .migratetype = gfpflags_to_migratetype(gfp_mask),
3716 };
3717
3718 if (cpusets_enabled()) {
3719 alloc_mask |= __GFP_HARDWALL;
3720 alloc_flags |= ALLOC_CPUSET;
3721 if (!ac.nodemask)
3722 ac.nodemask = &cpuset_current_mems_allowed;
3723 }
3724
3725 gfp_mask &= gfp_allowed_mask;
3726
3727 lockdep_trace_alloc(gfp_mask);
3728
3729 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
3730
3731 if (should_fail_alloc_page(gfp_mask, order))
3732 return NULL;
3733
3734
3735
3736
3737
3738
3739 if (unlikely(!zonelist->_zonerefs->zone))
3740 return NULL;
3741
3742 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
3743 alloc_flags |= ALLOC_CMA;
3744
3745retry_cpuset:
3746 cpuset_mems_cookie = read_mems_allowed_begin();
3747
3748
3749 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3750
3751
3752
3753
3754
3755
3756 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3757 ac.high_zoneidx, ac.nodemask);
3758 if (!ac.preferred_zoneref) {
3759 page = NULL;
3760 goto no_zone;
3761 }
3762
3763
3764 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
3765 if (likely(page))
3766 goto out;
3767
3768
3769
3770
3771
3772 alloc_mask = memalloc_noio_flags(gfp_mask);
3773 ac.spread_dirty_pages = false;
3774
3775
3776
3777
3778
3779 if (cpusets_enabled())
3780 ac.nodemask = nodemask;
3781 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
3782
3783no_zone:
3784
3785
3786
3787
3788
3789
3790 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
3791 alloc_mask = gfp_mask;
3792 goto retry_cpuset;
3793 }
3794
3795out:
3796 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
3797 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
3798 __free_pages(page, order);
3799 page = NULL;
3800 }
3801
3802 if (kmemcheck_enabled && page)
3803 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3804
3805 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
3806
3807 return page;
3808}
3809EXPORT_SYMBOL(__alloc_pages_nodemask);
3810
3811
3812
3813
3814unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
3815{
3816 struct page *page;
3817
3818
3819
3820
3821
3822 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3823
3824 page = alloc_pages(gfp_mask, order);
3825 if (!page)
3826 return 0;
3827 return (unsigned long) page_address(page);
3828}
3829EXPORT_SYMBOL(__get_free_pages);
3830
3831unsigned long get_zeroed_page(gfp_t gfp_mask)
3832{
3833 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
3834}
3835EXPORT_SYMBOL(get_zeroed_page);
3836
3837void __free_pages(struct page *page, unsigned int order)
3838{
3839 if (put_page_testzero(page)) {
3840 if (order == 0)
3841 free_hot_cold_page(page, false);
3842 else
3843 __free_pages_ok(page, order);
3844 }
3845}
3846
3847EXPORT_SYMBOL(__free_pages);
3848
3849void free_pages(unsigned long addr, unsigned int order)
3850{
3851 if (addr != 0) {
3852 VM_BUG_ON(!virt_addr_valid((void *)addr));
3853 __free_pages(virt_to_page((void *)addr), order);
3854 }
3855}
3856
3857EXPORT_SYMBOL(free_pages);
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870static struct page *__page_frag_refill(struct page_frag_cache *nc,
3871 gfp_t gfp_mask)
3872{
3873 struct page *page = NULL;
3874 gfp_t gfp = gfp_mask;
3875
3876#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3877 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
3878 __GFP_NOMEMALLOC;
3879 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
3880 PAGE_FRAG_CACHE_MAX_ORDER);
3881 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
3882#endif
3883 if (unlikely(!page))
3884 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
3885
3886 nc->va = page ? page_address(page) : NULL;
3887
3888 return page;
3889}
3890
3891void *__alloc_page_frag(struct page_frag_cache *nc,
3892 unsigned int fragsz, gfp_t gfp_mask)
3893{
3894 unsigned int size = PAGE_SIZE;
3895 struct page *page;
3896 int offset;
3897
3898 if (unlikely(!nc->va)) {
3899refill:
3900 page = __page_frag_refill(nc, gfp_mask);
3901 if (!page)
3902 return NULL;
3903
3904#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3905
3906 size = nc->size;
3907#endif
3908
3909
3910
3911 page_ref_add(page, size - 1);
3912
3913
3914 nc->pfmemalloc = page_is_pfmemalloc(page);
3915 nc->pagecnt_bias = size;
3916 nc->offset = size;
3917 }
3918
3919 offset = nc->offset - fragsz;
3920 if (unlikely(offset < 0)) {
3921 page = virt_to_page(nc->va);
3922
3923 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
3924 goto refill;
3925
3926#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3927
3928 size = nc->size;
3929#endif
3930
3931 set_page_count(page, size);
3932
3933
3934 nc->pagecnt_bias = size;
3935 offset = size - fragsz;
3936 }
3937
3938 nc->pagecnt_bias--;
3939 nc->offset = offset;
3940
3941 return nc->va + offset;
3942}
3943EXPORT_SYMBOL(__alloc_page_frag);
3944
3945
3946
3947
3948void __free_page_frag(void *addr)
3949{
3950 struct page *page = virt_to_head_page(addr);
3951
3952 if (unlikely(put_page_testzero(page)))
3953 __free_pages_ok(page, compound_order(page));
3954}
3955EXPORT_SYMBOL(__free_page_frag);
3956
3957static void *make_alloc_exact(unsigned long addr, unsigned int order,
3958 size_t size)
3959{
3960 if (addr) {
3961 unsigned long alloc_end = addr + (PAGE_SIZE << order);
3962 unsigned long used = addr + PAGE_ALIGN(size);
3963
3964 split_page(virt_to_page((void *)addr), order);
3965 while (used < alloc_end) {
3966 free_page(used);
3967 used += PAGE_SIZE;
3968 }
3969 }
3970 return (void *)addr;
3971}
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3987{
3988 unsigned int order = get_order(size);
3989 unsigned long addr;
3990
3991 addr = __get_free_pages(gfp_mask, order);
3992 return make_alloc_exact(addr, order, size);
3993}
3994EXPORT_SYMBOL(alloc_pages_exact);
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4007{
4008 unsigned int order = get_order(size);
4009 struct page *p = alloc_pages_node(nid, gfp_mask, order);
4010 if (!p)
4011 return NULL;
4012 return make_alloc_exact((unsigned long)page_address(p), order, size);
4013}
4014
4015
4016
4017
4018
4019
4020
4021
4022void free_pages_exact(void *virt, size_t size)
4023{
4024 unsigned long addr = (unsigned long)virt;
4025 unsigned long end = addr + PAGE_ALIGN(size);
4026
4027 while (addr < end) {
4028 free_page(addr);
4029 addr += PAGE_SIZE;
4030 }
4031}
4032EXPORT_SYMBOL(free_pages_exact);
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043static unsigned long nr_free_zone_pages(int offset)
4044{
4045 struct zoneref *z;
4046 struct zone *zone;
4047
4048
4049 unsigned long sum = 0;
4050
4051 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4052
4053 for_each_zone_zonelist(zone, z, zonelist, offset) {
4054 unsigned long size = zone->managed_pages;
4055 unsigned long high = high_wmark_pages(zone);
4056 if (size > high)
4057 sum += size - high;
4058 }
4059
4060 return sum;
4061}
4062
4063
4064
4065
4066
4067
4068
4069unsigned long nr_free_buffer_pages(void)
4070{
4071 return nr_free_zone_pages(gfp_zone(GFP_USER));
4072}
4073EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4074
4075
4076
4077
4078
4079
4080
4081unsigned long nr_free_pagecache_pages(void)
4082{
4083 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
4084}
4085
4086static inline void show_node(struct zone *zone)
4087{
4088 if (IS_ENABLED(CONFIG_NUMA))
4089 printk("Node %d ", zone_to_nid(zone));
4090}
4091
4092long si_mem_available(void)
4093{
4094 long available;
4095 unsigned long pagecache;
4096 unsigned long wmark_low = 0;
4097 unsigned long pages[NR_LRU_LISTS];
4098 struct zone *zone;
4099 int lru;
4100
4101 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4102 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4103
4104 for_each_zone(zone)
4105 wmark_low += zone->watermark[WMARK_LOW];
4106
4107
4108
4109
4110
4111 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
4112
4113
4114
4115
4116
4117
4118 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4119 pagecache -= min(pagecache / 2, wmark_low);
4120 available += pagecache;
4121
4122
4123
4124
4125
4126 available += global_page_state(NR_SLAB_RECLAIMABLE) -
4127 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
4128
4129 if (available < 0)
4130 available = 0;
4131 return available;
4132}
4133EXPORT_SYMBOL_GPL(si_mem_available);
4134
4135void si_meminfo(struct sysinfo *val)
4136{
4137 val->totalram = totalram_pages;
4138 val->sharedram = global_node_page_state(NR_SHMEM);
4139 val->freeram = global_page_state(NR_FREE_PAGES);
4140 val->bufferram = nr_blockdev_pages();
4141 val->totalhigh = totalhigh_pages;
4142 val->freehigh = nr_free_highpages();
4143 val->mem_unit = PAGE_SIZE;
4144}
4145
4146EXPORT_SYMBOL(si_meminfo);
4147
4148#ifdef CONFIG_NUMA
4149void si_meminfo_node(struct sysinfo *val, int nid)
4150{
4151 int zone_type;
4152 unsigned long managed_pages = 0;
4153 unsigned long managed_highpages = 0;
4154 unsigned long free_highpages = 0;
4155 pg_data_t *pgdat = NODE_DATA(nid);
4156
4157 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4158 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4159 val->totalram = managed_pages;
4160 val->sharedram = node_page_state(pgdat, NR_SHMEM);
4161 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
4162#ifdef CONFIG_HIGHMEM
4163 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4164 struct zone *zone = &pgdat->node_zones[zone_type];
4165
4166 if (is_highmem(zone)) {
4167 managed_highpages += zone->managed_pages;
4168 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4169 }
4170 }
4171 val->totalhigh = managed_highpages;
4172 val->freehigh = free_highpages;
4173#else
4174 val->totalhigh = managed_highpages;
4175 val->freehigh = free_highpages;
4176#endif
4177 val->mem_unit = PAGE_SIZE;
4178}
4179#endif
4180
4181
4182
4183
4184
4185bool skip_free_areas_node(unsigned int flags, int nid)
4186{
4187 bool ret = false;
4188 unsigned int cpuset_mems_cookie;
4189
4190 if (!(flags & SHOW_MEM_FILTER_NODES))
4191 goto out;
4192
4193 do {
4194 cpuset_mems_cookie = read_mems_allowed_begin();
4195 ret = !node_isset(nid, cpuset_current_mems_allowed);
4196 } while (read_mems_allowed_retry(cpuset_mems_cookie));
4197out:
4198 return ret;
4199}
4200
4201#define K(x) ((x) << (PAGE_SHIFT-10))
4202
4203static void show_migration_types(unsigned char type)
4204{
4205 static const char types[MIGRATE_TYPES] = {
4206 [MIGRATE_UNMOVABLE] = 'U',
4207 [MIGRATE_MOVABLE] = 'M',
4208 [MIGRATE_RECLAIMABLE] = 'E',
4209 [MIGRATE_HIGHATOMIC] = 'H',
4210#ifdef CONFIG_CMA
4211 [MIGRATE_CMA] = 'C',
4212#endif
4213#ifdef CONFIG_MEMORY_ISOLATION
4214 [MIGRATE_ISOLATE] = 'I',
4215#endif
4216 };
4217 char tmp[MIGRATE_TYPES + 1];
4218 char *p = tmp;
4219 int i;
4220
4221 for (i = 0; i < MIGRATE_TYPES; i++) {
4222 if (type & (1 << i))
4223 *p++ = types[i];
4224 }
4225
4226 *p = '\0';
4227 printk(KERN_CONT "(%s) ", tmp);
4228}
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239void show_free_areas(unsigned int filter)
4240{
4241 unsigned long free_pcp = 0;
4242 int cpu;
4243 struct zone *zone;
4244 pg_data_t *pgdat;
4245
4246 for_each_populated_zone(zone) {
4247 if (skip_free_areas_node(filter, zone_to_nid(zone)))
4248 continue;
4249
4250 for_each_online_cpu(cpu)
4251 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4252 }
4253
4254 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4255 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
4256 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4257 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4258 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
4259 " free:%lu free_pcp:%lu free_cma:%lu\n",
4260 global_node_page_state(NR_ACTIVE_ANON),
4261 global_node_page_state(NR_INACTIVE_ANON),
4262 global_node_page_state(NR_ISOLATED_ANON),
4263 global_node_page_state(NR_ACTIVE_FILE),
4264 global_node_page_state(NR_INACTIVE_FILE),
4265 global_node_page_state(NR_ISOLATED_FILE),
4266 global_node_page_state(NR_UNEVICTABLE),
4267 global_node_page_state(NR_FILE_DIRTY),
4268 global_node_page_state(NR_WRITEBACK),
4269 global_node_page_state(NR_UNSTABLE_NFS),
4270 global_page_state(NR_SLAB_RECLAIMABLE),
4271 global_page_state(NR_SLAB_UNRECLAIMABLE),
4272 global_node_page_state(NR_FILE_MAPPED),
4273 global_node_page_state(NR_SHMEM),
4274 global_page_state(NR_PAGETABLE),
4275 global_page_state(NR_BOUNCE),
4276 global_page_state(NR_FREE_PAGES),
4277 free_pcp,
4278 global_page_state(NR_FREE_CMA_PAGES));
4279
4280 for_each_online_pgdat(pgdat) {
4281 printk("Node %d"
4282 " active_anon:%lukB"
4283 " inactive_anon:%lukB"
4284 " active_file:%lukB"
4285 " inactive_file:%lukB"
4286 " unevictable:%lukB"
4287 " isolated(anon):%lukB"
4288 " isolated(file):%lukB"
4289 " mapped:%lukB"
4290 " dirty:%lukB"
4291 " writeback:%lukB"
4292 " shmem:%lukB"
4293#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4294 " shmem_thp: %lukB"
4295 " shmem_pmdmapped: %lukB"
4296 " anon_thp: %lukB"
4297#endif
4298 " writeback_tmp:%lukB"
4299 " unstable:%lukB"
4300 " pages_scanned:%lu"
4301 " all_unreclaimable? %s"
4302 "\n",
4303 pgdat->node_id,
4304 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4305 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4306 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4307 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4308 K(node_page_state(pgdat, NR_UNEVICTABLE)),
4309 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4310 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
4311 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4312 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4313 K(node_page_state(pgdat, NR_WRITEBACK)),
4314#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4315 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4316 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4317 * HPAGE_PMD_NR),
4318 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4319#endif
4320 K(node_page_state(pgdat, NR_SHMEM)),
4321 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4322 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4323 node_page_state(pgdat, NR_PAGES_SCANNED),
4324 !pgdat_reclaimable(pgdat) ? "yes" : "no");
4325 }
4326
4327 for_each_populated_zone(zone) {
4328 int i;
4329
4330 if (skip_free_areas_node(filter, zone_to_nid(zone)))
4331 continue;
4332
4333 free_pcp = 0;
4334 for_each_online_cpu(cpu)
4335 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4336
4337 show_node(zone);
4338 printk(KERN_CONT
4339 "%s"
4340 " free:%lukB"
4341 " min:%lukB"
4342 " low:%lukB"
4343 " high:%lukB"
4344 " active_anon:%lukB"
4345 " inactive_anon:%lukB"
4346 " active_file:%lukB"
4347 " inactive_file:%lukB"
4348 " unevictable:%lukB"
4349 " writepending:%lukB"
4350 " present:%lukB"
4351 " managed:%lukB"
4352 " mlocked:%lukB"
4353 " slab_reclaimable:%lukB"
4354 " slab_unreclaimable:%lukB"
4355 " kernel_stack:%lukB"
4356 " pagetables:%lukB"
4357 " bounce:%lukB"
4358 " free_pcp:%lukB"
4359 " local_pcp:%ukB"
4360 " free_cma:%lukB"
4361 "\n",
4362 zone->name,
4363 K(zone_page_state(zone, NR_FREE_PAGES)),
4364 K(min_wmark_pages(zone)),
4365 K(low_wmark_pages(zone)),
4366 K(high_wmark_pages(zone)),
4367 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4368 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4369 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4370 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4371 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
4372 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
4373 K(zone->present_pages),
4374 K(zone->managed_pages),
4375 K(zone_page_state(zone, NR_MLOCK)),
4376 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
4377 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
4378 zone_page_state(zone, NR_KERNEL_STACK_KB),
4379 K(zone_page_state(zone, NR_PAGETABLE)),
4380 K(zone_page_state(zone, NR_BOUNCE)),
4381 K(free_pcp),
4382 K(this_cpu_read(zone->pageset->pcp.count)),
4383 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
4384 printk("lowmem_reserve[]:");
4385 for (i = 0; i < MAX_NR_ZONES; i++)
4386 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4387 printk(KERN_CONT "\n");
4388 }
4389
4390 for_each_populated_zone(zone) {
4391 unsigned int order;
4392 unsigned long nr[MAX_ORDER], flags, total = 0;
4393 unsigned char types[MAX_ORDER];
4394
4395 if (skip_free_areas_node(filter, zone_to_nid(zone)))
4396 continue;
4397 show_node(zone);
4398 printk(KERN_CONT "%s: ", zone->name);
4399
4400 spin_lock_irqsave(&zone->lock, flags);
4401 for (order = 0; order < MAX_ORDER; order++) {
4402 struct free_area *area = &zone->free_area[order];
4403 int type;
4404
4405 nr[order] = area->nr_free;
4406 total += nr[order] << order;
4407
4408 types[order] = 0;
4409 for (type = 0; type < MIGRATE_TYPES; type++) {
4410 if (!list_empty(&area->free_list[type]))
4411 types[order] |= 1 << type;
4412 }
4413 }
4414 spin_unlock_irqrestore(&zone->lock, flags);
4415 for (order = 0; order < MAX_ORDER; order++) {
4416 printk(KERN_CONT "%lu*%lukB ",
4417 nr[order], K(1UL) << order);
4418 if (nr[order])
4419 show_migration_types(types[order]);
4420 }
4421 printk(KERN_CONT "= %lukB\n", K(total));
4422 }
4423
4424 hugetlb_show_meminfo();
4425
4426 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
4427
4428 show_swap_cache_info();
4429}
4430
4431static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4432{
4433 zoneref->zone = zone;
4434 zoneref->zone_idx = zone_idx(zone);
4435}
4436
4437
4438
4439
4440
4441
4442static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
4443 int nr_zones)
4444{
4445 struct zone *zone;
4446 enum zone_type zone_type = MAX_NR_ZONES;
4447
4448 do {
4449 zone_type--;
4450 zone = pgdat->node_zones + zone_type;
4451 if (managed_zone(zone)) {
4452 zoneref_set_zone(zone,
4453 &zonelist->_zonerefs[nr_zones++]);
4454 check_highest_zone(zone_type);
4455 }
4456 } while (zone_type);
4457
4458 return nr_zones;
4459}
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471#define ZONELIST_ORDER_DEFAULT 0
4472#define ZONELIST_ORDER_NODE 1
4473#define ZONELIST_ORDER_ZONE 2
4474
4475
4476
4477
4478static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4479static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4480
4481
4482#ifdef CONFIG_NUMA
4483
4484static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4485
4486#define NUMA_ZONELIST_ORDER_LEN 16
4487char numa_zonelist_order[16] = "default";
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497static int __parse_numa_zonelist_order(char *s)
4498{
4499 if (*s == 'd' || *s == 'D') {
4500 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4501 } else if (*s == 'n' || *s == 'N') {
4502 user_zonelist_order = ZONELIST_ORDER_NODE;
4503 } else if (*s == 'z' || *s == 'Z') {
4504 user_zonelist_order = ZONELIST_ORDER_ZONE;
4505 } else {
4506 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
4507 return -EINVAL;
4508 }
4509 return 0;
4510}
4511
4512static __init int setup_numa_zonelist_order(char *s)
4513{
4514 int ret;
4515
4516 if (!s)
4517 return 0;
4518
4519 ret = __parse_numa_zonelist_order(s);
4520 if (ret == 0)
4521 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4522
4523 return ret;
4524}
4525early_param("numa_zonelist_order", setup_numa_zonelist_order);
4526
4527
4528
4529
4530int numa_zonelist_order_handler(struct ctl_table *table, int write,
4531 void __user *buffer, size_t *length,
4532 loff_t *ppos)
4533{
4534 char saved_string[NUMA_ZONELIST_ORDER_LEN];
4535 int ret;
4536 static DEFINE_MUTEX(zl_order_mutex);
4537
4538 mutex_lock(&zl_order_mutex);
4539 if (write) {
4540 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4541 ret = -EINVAL;
4542 goto out;
4543 }
4544 strcpy(saved_string, (char *)table->data);
4545 }
4546 ret = proc_dostring(table, write, buffer, length, ppos);
4547 if (ret)
4548 goto out;
4549 if (write) {
4550 int oldval = user_zonelist_order;
4551
4552 ret = __parse_numa_zonelist_order((char *)table->data);
4553 if (ret) {
4554
4555
4556
4557 strncpy((char *)table->data, saved_string,
4558 NUMA_ZONELIST_ORDER_LEN);
4559 user_zonelist_order = oldval;
4560 } else if (oldval != user_zonelist_order) {
4561 mutex_lock(&zonelists_mutex);
4562 build_all_zonelists(NULL, NULL);
4563 mutex_unlock(&zonelists_mutex);
4564 }
4565 }
4566out:
4567 mutex_unlock(&zl_order_mutex);
4568 return ret;
4569}
4570
4571
4572#define MAX_NODE_LOAD (nr_online_nodes)
4573static int node_load[MAX_NUMNODES];
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589static int find_next_best_node(int node, nodemask_t *used_node_mask)
4590{
4591 int n, val;
4592 int min_val = INT_MAX;
4593 int best_node = NUMA_NO_NODE;
4594 const struct cpumask *tmp = cpumask_of_node(0);
4595
4596
4597 if (!node_isset(node, *used_node_mask)) {
4598 node_set(node, *used_node_mask);
4599 return node;
4600 }
4601
4602 for_each_node_state(n, N_MEMORY) {
4603
4604
4605 if (node_isset(n, *used_node_mask))
4606 continue;
4607
4608
4609 val = node_distance(node, n);
4610
4611
4612 val += (n < node);
4613
4614
4615 tmp = cpumask_of_node(n);
4616 if (!cpumask_empty(tmp))
4617 val += PENALTY_FOR_NODE_WITH_CPUS;
4618
4619
4620 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4621 val += node_load[n];
4622
4623 if (val < min_val) {
4624 min_val = val;
4625 best_node = n;
4626 }
4627 }
4628
4629 if (best_node >= 0)
4630 node_set(best_node, *used_node_mask);
4631
4632 return best_node;
4633}
4634
4635
4636
4637
4638
4639
4640
4641static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
4642{
4643 int j;
4644 struct zonelist *zonelist;
4645
4646 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
4647 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
4648 ;
4649 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4650 zonelist->_zonerefs[j].zone = NULL;
4651 zonelist->_zonerefs[j].zone_idx = 0;
4652}
4653
4654
4655
4656
4657static void build_thisnode_zonelists(pg_data_t *pgdat)
4658{
4659 int j;
4660 struct zonelist *zonelist;
4661
4662 zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
4663 j = build_zonelists_node(pgdat, zonelist, 0);
4664 zonelist->_zonerefs[j].zone = NULL;
4665 zonelist->_zonerefs[j].zone_idx = 0;
4666}
4667
4668
4669
4670
4671
4672
4673
4674static int node_order[MAX_NUMNODES];
4675
4676static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4677{
4678 int pos, j, node;
4679 int zone_type;
4680 struct zone *z;
4681 struct zonelist *zonelist;
4682
4683 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
4684 pos = 0;
4685 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4686 for (j = 0; j < nr_nodes; j++) {
4687 node = node_order[j];
4688 z = &NODE_DATA(node)->node_zones[zone_type];
4689 if (managed_zone(z)) {
4690 zoneref_set_zone(z,
4691 &zonelist->_zonerefs[pos++]);
4692 check_highest_zone(zone_type);
4693 }
4694 }
4695 }
4696 zonelist->_zonerefs[pos].zone = NULL;
4697 zonelist->_zonerefs[pos].zone_idx = 0;
4698}
4699
4700#if defined(CONFIG_64BIT)
4701
4702
4703
4704
4705
4706static int default_zonelist_order(void)
4707{
4708 return ZONELIST_ORDER_NODE;
4709}
4710#else
4711
4712
4713
4714
4715
4716
4717
4718
4719static int default_zonelist_order(void)
4720{
4721 return ZONELIST_ORDER_ZONE;
4722}
4723#endif
4724
4725static void set_zonelist_order(void)
4726{
4727 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
4728 current_zonelist_order = default_zonelist_order();
4729 else
4730 current_zonelist_order = user_zonelist_order;
4731}
4732
4733static void build_zonelists(pg_data_t *pgdat)
4734{
4735 int i, node, load;
4736 nodemask_t used_mask;
4737 int local_node, prev_node;
4738 struct zonelist *zonelist;
4739 unsigned int order = current_zonelist_order;
4740
4741
4742 for (i = 0; i < MAX_ZONELISTS; i++) {
4743 zonelist = pgdat->node_zonelists + i;
4744 zonelist->_zonerefs[0].zone = NULL;
4745 zonelist->_zonerefs[0].zone_idx = 0;
4746 }
4747
4748
4749 local_node = pgdat->node_id;
4750 load = nr_online_nodes;
4751 prev_node = local_node;
4752 nodes_clear(used_mask);
4753
4754 memset(node_order, 0, sizeof(node_order));
4755 i = 0;
4756
4757 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4758
4759
4760
4761
4762
4763 if (node_distance(local_node, node) !=
4764 node_distance(local_node, prev_node))
4765 node_load[node] = load;
4766
4767 prev_node = node;
4768 load--;
4769 if (order == ZONELIST_ORDER_NODE)
4770 build_zonelists_in_node_order(pgdat, node);
4771 else
4772 node_order[i++] = node;
4773 }
4774
4775 if (order == ZONELIST_ORDER_ZONE) {
4776
4777 build_zonelists_in_zone_order(pgdat, i);
4778 }
4779
4780 build_thisnode_zonelists(pgdat);
4781}
4782
4783#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4784
4785
4786
4787
4788
4789
4790int local_memory_node(int node)
4791{
4792 struct zoneref *z;
4793
4794 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
4795 gfp_zone(GFP_KERNEL),
4796 NULL);
4797 return z->zone->node;
4798}
4799#endif
4800
4801static void setup_min_unmapped_ratio(void);
4802static void setup_min_slab_ratio(void);
4803#else
4804
4805static void set_zonelist_order(void)
4806{
4807 current_zonelist_order = ZONELIST_ORDER_ZONE;
4808}
4809
4810static void build_zonelists(pg_data_t *pgdat)
4811{
4812 int node, local_node;
4813 enum zone_type j;
4814 struct zonelist *zonelist;
4815
4816 local_node = pgdat->node_id;
4817
4818 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
4819 j = build_zonelists_node(pgdat, zonelist, 0);
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
4830 if (!node_online(node))
4831 continue;
4832 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4833 }
4834 for (node = 0; node < local_node; node++) {
4835 if (!node_online(node))
4836 continue;
4837 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4838 }
4839
4840 zonelist->_zonerefs[j].zone = NULL;
4841 zonelist->_zonerefs[j].zone_idx = 0;
4842}
4843
4844#endif
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
4862static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
4863static void setup_zone_pageset(struct zone *zone);
4864
4865
4866
4867
4868
4869DEFINE_MUTEX(zonelists_mutex);
4870
4871
4872static int __build_all_zonelists(void *data)
4873{
4874 int nid;
4875 int cpu;
4876 pg_data_t *self = data;
4877
4878#ifdef CONFIG_NUMA
4879 memset(node_load, 0, sizeof(node_load));
4880#endif
4881
4882 if (self && !node_online(self->node_id)) {
4883 build_zonelists(self);
4884 }
4885
4886 for_each_online_node(nid) {
4887 pg_data_t *pgdat = NODE_DATA(nid);
4888
4889 build_zonelists(pgdat);
4890 }
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905 for_each_possible_cpu(cpu) {
4906 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
4907
4908#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4909
4910
4911
4912
4913
4914
4915
4916
4917 if (cpu_online(cpu))
4918 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
4919#endif
4920 }
4921
4922 return 0;
4923}
4924
4925static noinline void __init
4926build_all_zonelists_init(void)
4927{
4928 __build_all_zonelists(NULL);
4929 mminit_verify_zonelist();
4930 cpuset_init_current_mems_allowed();
4931}
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
4943{
4944 set_zonelist_order();
4945
4946 if (system_state == SYSTEM_BOOTING) {
4947 build_all_zonelists_init();
4948 } else {
4949#ifdef CONFIG_MEMORY_HOTPLUG
4950 if (zone)
4951 setup_zone_pageset(zone);
4952#endif
4953
4954
4955 stop_machine(__build_all_zonelists, pgdat, NULL);
4956
4957 }
4958 vm_total_pages = nr_free_pagecache_pages();
4959
4960
4961
4962
4963
4964
4965
4966 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
4967 page_group_by_mobility_disabled = 1;
4968 else
4969 page_group_by_mobility_disabled = 0;
4970
4971 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
4972 nr_online_nodes,
4973 zonelist_order_name[current_zonelist_order],
4974 page_group_by_mobility_disabled ? "off" : "on",
4975 vm_total_pages);
4976#ifdef CONFIG_NUMA
4977 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
4978#endif
4979}
4980
4981
4982
4983
4984
4985
4986void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4987 unsigned long start_pfn, enum memmap_context context)
4988{
4989 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
4990 unsigned long end_pfn = start_pfn + size;
4991 pg_data_t *pgdat = NODE_DATA(nid);
4992 unsigned long pfn;
4993 unsigned long nr_initialised = 0;
4994#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4995 struct memblock_region *r = NULL, *tmp;
4996#endif
4997
4998 if (highest_memmap_pfn < end_pfn - 1)
4999 highest_memmap_pfn = end_pfn - 1;
5000
5001
5002
5003
5004
5005 if (altmap && start_pfn == altmap->base_pfn)
5006 start_pfn += altmap->reserve;
5007
5008 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5009
5010
5011
5012
5013 if (context != MEMMAP_EARLY)
5014 goto not_early;
5015
5016 if (!early_pfn_valid(pfn))
5017 continue;
5018 if (!early_pfn_in_nid(pfn, nid))
5019 continue;
5020 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5021 break;
5022
5023#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5024
5025
5026
5027
5028
5029 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5030 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5031 for_each_memblock(memory, tmp)
5032 if (pfn < memblock_region_memory_end_pfn(tmp))
5033 break;
5034 r = tmp;
5035 }
5036 if (pfn >= memblock_region_memory_base_pfn(r) &&
5037 memblock_is_mirror(r)) {
5038
5039 pfn = memblock_region_memory_end_pfn(r);
5040 continue;
5041 }
5042 }
5043#endif
5044
5045not_early:
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058 if (!(pfn & (pageblock_nr_pages - 1))) {
5059 struct page *page = pfn_to_page(pfn);
5060
5061 __init_single_page(page, pfn, zone, nid);
5062 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5063 } else {
5064 __init_single_pfn(pfn, zone, nid);
5065 }
5066 }
5067}
5068
5069static void __meminit zone_init_free_lists(struct zone *zone)
5070{
5071 unsigned int order, t;
5072 for_each_migratetype_order(order, t) {
5073 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
5074 zone->free_area[order].nr_free = 0;
5075 }
5076}
5077
5078#ifndef __HAVE_ARCH_MEMMAP_INIT
5079#define memmap_init(size, nid, zone, start_pfn) \
5080 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
5081#endif
5082
5083static int zone_batchsize(struct zone *zone)
5084{
5085#ifdef CONFIG_MMU
5086 int batch;
5087
5088
5089
5090
5091
5092
5093
5094 batch = zone->managed_pages / 1024;
5095 if (batch * PAGE_SIZE > 512 * 1024)
5096 batch = (512 * 1024) / PAGE_SIZE;
5097 batch /= 4;
5098 if (batch < 1)
5099 batch = 1;
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111 batch = rounddown_pow_of_two(batch + batch/2) - 1;
5112
5113 return batch;
5114
5115#else
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129 return 0;
5130#endif
5131}
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5147 unsigned long batch)
5148{
5149
5150 pcp->batch = 1;
5151 smp_wmb();
5152
5153
5154 pcp->high = high;
5155 smp_wmb();
5156
5157 pcp->batch = batch;
5158}
5159
5160
5161static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5162{
5163 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
5164}
5165
5166static void pageset_init(struct per_cpu_pageset *p)
5167{
5168 struct per_cpu_pages *pcp;
5169 int migratetype;
5170
5171 memset(p, 0, sizeof(*p));
5172
5173 pcp = &p->pcp;
5174 pcp->count = 0;
5175 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5176 INIT_LIST_HEAD(&pcp->lists[migratetype]);
5177}
5178
5179static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5180{
5181 pageset_init(p);
5182 pageset_set_batch(p, batch);
5183}
5184
5185
5186
5187
5188
5189static void pageset_set_high(struct per_cpu_pageset *p,
5190 unsigned long high)
5191{
5192 unsigned long batch = max(1UL, high / 4);
5193 if ((high / 4) > (PAGE_SHIFT * 8))
5194 batch = PAGE_SHIFT * 8;
5195
5196 pageset_update(&p->pcp, high, batch);
5197}
5198
5199static void pageset_set_high_and_batch(struct zone *zone,
5200 struct per_cpu_pageset *pcp)
5201{
5202 if (percpu_pagelist_fraction)
5203 pageset_set_high(pcp,
5204 (zone->managed_pages /
5205 percpu_pagelist_fraction));
5206 else
5207 pageset_set_batch(pcp, zone_batchsize(zone));
5208}
5209
5210static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5211{
5212 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5213
5214 pageset_init(pcp);
5215 pageset_set_high_and_batch(zone, pcp);
5216}
5217
5218static void __meminit setup_zone_pageset(struct zone *zone)
5219{
5220 int cpu;
5221 zone->pageset = alloc_percpu(struct per_cpu_pageset);
5222 for_each_possible_cpu(cpu)
5223 zone_pageset_init(zone, cpu);
5224}
5225
5226
5227
5228
5229
5230void __init setup_per_cpu_pageset(void)
5231{
5232 struct pglist_data *pgdat;
5233 struct zone *zone;
5234
5235 for_each_populated_zone(zone)
5236 setup_zone_pageset(zone);
5237
5238 for_each_online_pgdat(pgdat)
5239 pgdat->per_cpu_nodestats =
5240 alloc_percpu(struct per_cpu_nodestat);
5241}
5242
5243static __meminit void zone_pcp_init(struct zone *zone)
5244{
5245
5246
5247
5248
5249
5250 zone->pageset = &boot_pageset;
5251
5252 if (populated_zone(zone))
5253 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
5254 zone->name, zone->present_pages,
5255 zone_batchsize(zone));
5256}
5257
5258int __meminit init_currently_empty_zone(struct zone *zone,
5259 unsigned long zone_start_pfn,
5260 unsigned long size)
5261{
5262 struct pglist_data *pgdat = zone->zone_pgdat;
5263
5264 pgdat->nr_zones = zone_idx(zone) + 1;
5265
5266 zone->zone_start_pfn = zone_start_pfn;
5267
5268 mminit_dprintk(MMINIT_TRACE, "memmap_init",
5269 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5270 pgdat->node_id,
5271 (unsigned long)zone_idx(zone),
5272 zone_start_pfn, (zone_start_pfn + size));
5273
5274 zone_init_free_lists(zone);
5275 zone->initialized = 1;
5276
5277 return 0;
5278}
5279
5280#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5281#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
5282
5283
5284
5285
5286int __meminit __early_pfn_to_nid(unsigned long pfn,
5287 struct mminit_pfnnid_cache *state)
5288{
5289 unsigned long start_pfn, end_pfn;
5290 int nid;
5291
5292 if (state->last_start <= pfn && pfn < state->last_end)
5293 return state->last_nid;
5294
5295 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5296 if (nid != -1) {
5297 state->last_start = start_pfn;
5298 state->last_end = end_pfn;
5299 state->last_nid = nid;
5300 }
5301
5302 return nid;
5303}
5304#endif
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
5316{
5317 unsigned long start_pfn, end_pfn;
5318 int i, this_nid;
5319
5320 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5321 start_pfn = min(start_pfn, max_low_pfn);
5322 end_pfn = min(end_pfn, max_low_pfn);
5323
5324 if (start_pfn < end_pfn)
5325 memblock_free_early_nid(PFN_PHYS(start_pfn),
5326 (end_pfn - start_pfn) << PAGE_SHIFT,
5327 this_nid);
5328 }
5329}
5330
5331
5332
5333
5334
5335
5336
5337
5338void __init sparse_memory_present_with_active_regions(int nid)
5339{
5340 unsigned long start_pfn, end_pfn;
5341 int i, this_nid;
5342
5343 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5344 memory_present(this_nid, start_pfn, end_pfn);
5345}
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358void __meminit get_pfn_range_for_nid(unsigned int nid,
5359 unsigned long *start_pfn, unsigned long *end_pfn)
5360{
5361 unsigned long this_start_pfn, this_end_pfn;
5362 int i;
5363
5364 *start_pfn = -1UL;
5365 *end_pfn = 0;
5366
5367 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5368 *start_pfn = min(*start_pfn, this_start_pfn);
5369 *end_pfn = max(*end_pfn, this_end_pfn);
5370 }
5371
5372 if (*start_pfn == -1UL)
5373 *start_pfn = 0;
5374}
5375
5376
5377
5378
5379
5380
5381static void __init find_usable_zone_for_movable(void)
5382{
5383 int zone_index;
5384 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5385 if (zone_index == ZONE_MOVABLE)
5386 continue;
5387
5388 if (arch_zone_highest_possible_pfn[zone_index] >
5389 arch_zone_lowest_possible_pfn[zone_index])
5390 break;
5391 }
5392
5393 VM_BUG_ON(zone_index == -1);
5394 movable_zone = zone_index;
5395}
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407static void __meminit adjust_zone_range_for_zone_movable(int nid,
5408 unsigned long zone_type,
5409 unsigned long node_start_pfn,
5410 unsigned long node_end_pfn,
5411 unsigned long *zone_start_pfn,
5412 unsigned long *zone_end_pfn)
5413{
5414
5415 if (zone_movable_pfn[nid]) {
5416
5417 if (zone_type == ZONE_MOVABLE) {
5418 *zone_start_pfn = zone_movable_pfn[nid];
5419 *zone_end_pfn = min(node_end_pfn,
5420 arch_zone_highest_possible_pfn[movable_zone]);
5421
5422
5423 } else if (!mirrored_kernelcore &&
5424 *zone_start_pfn < zone_movable_pfn[nid] &&
5425 *zone_end_pfn > zone_movable_pfn[nid]) {
5426 *zone_end_pfn = zone_movable_pfn[nid];
5427
5428
5429 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5430 *zone_start_pfn = *zone_end_pfn;
5431 }
5432}
5433
5434
5435
5436
5437
5438static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5439 unsigned long zone_type,
5440 unsigned long node_start_pfn,
5441 unsigned long node_end_pfn,
5442 unsigned long *zone_start_pfn,
5443 unsigned long *zone_end_pfn,
5444 unsigned long *ignored)
5445{
5446
5447 if (!node_start_pfn && !node_end_pfn)
5448 return 0;
5449
5450
5451 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5452 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
5453 adjust_zone_range_for_zone_movable(nid, zone_type,
5454 node_start_pfn, node_end_pfn,
5455 zone_start_pfn, zone_end_pfn);
5456
5457
5458 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
5459 return 0;
5460
5461
5462 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5463 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
5464
5465
5466 return *zone_end_pfn - *zone_start_pfn;
5467}
5468
5469
5470
5471
5472
5473unsigned long __meminit __absent_pages_in_range(int nid,
5474 unsigned long range_start_pfn,
5475 unsigned long range_end_pfn)
5476{
5477 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5478 unsigned long start_pfn, end_pfn;
5479 int i;
5480
5481 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5482 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5483 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5484 nr_absent -= end_pfn - start_pfn;
5485 }
5486 return nr_absent;
5487}
5488
5489
5490
5491
5492
5493
5494
5495
5496unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5497 unsigned long end_pfn)
5498{
5499 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5500}
5501
5502
5503static unsigned long __meminit zone_absent_pages_in_node(int nid,
5504 unsigned long zone_type,
5505 unsigned long node_start_pfn,
5506 unsigned long node_end_pfn,
5507 unsigned long *ignored)
5508{
5509 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5510 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5511 unsigned long zone_start_pfn, zone_end_pfn;
5512 unsigned long nr_absent;
5513
5514
5515 if (!node_start_pfn && !node_end_pfn)
5516 return 0;
5517
5518 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5519 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5520
5521 adjust_zone_range_for_zone_movable(nid, zone_type,
5522 node_start_pfn, node_end_pfn,
5523 &zone_start_pfn, &zone_end_pfn);
5524 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5525
5526
5527
5528
5529
5530
5531 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5532 unsigned long start_pfn, end_pfn;
5533 struct memblock_region *r;
5534
5535 for_each_memblock(memory, r) {
5536 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5537 zone_start_pfn, zone_end_pfn);
5538 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5539 zone_start_pfn, zone_end_pfn);
5540
5541 if (zone_type == ZONE_MOVABLE &&
5542 memblock_is_mirror(r))
5543 nr_absent += end_pfn - start_pfn;
5544
5545 if (zone_type == ZONE_NORMAL &&
5546 !memblock_is_mirror(r))
5547 nr_absent += end_pfn - start_pfn;
5548 }
5549 }
5550
5551 return nr_absent;
5552}
5553
5554#else
5555static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
5556 unsigned long zone_type,
5557 unsigned long node_start_pfn,
5558 unsigned long node_end_pfn,
5559 unsigned long *zone_start_pfn,
5560 unsigned long *zone_end_pfn,
5561 unsigned long *zones_size)
5562{
5563 unsigned int zone;
5564
5565 *zone_start_pfn = node_start_pfn;
5566 for (zone = 0; zone < zone_type; zone++)
5567 *zone_start_pfn += zones_size[zone];
5568
5569 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5570
5571 return zones_size[zone_type];
5572}
5573
5574static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
5575 unsigned long zone_type,
5576 unsigned long node_start_pfn,
5577 unsigned long node_end_pfn,
5578 unsigned long *zholes_size)
5579{
5580 if (!zholes_size)
5581 return 0;
5582
5583 return zholes_size[zone_type];
5584}
5585
5586#endif
5587
5588static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
5589 unsigned long node_start_pfn,
5590 unsigned long node_end_pfn,
5591 unsigned long *zones_size,
5592 unsigned long *zholes_size)
5593{
5594 unsigned long realtotalpages = 0, totalpages = 0;
5595 enum zone_type i;
5596
5597 for (i = 0; i < MAX_NR_ZONES; i++) {
5598 struct zone *zone = pgdat->node_zones + i;
5599 unsigned long zone_start_pfn, zone_end_pfn;
5600 unsigned long size, real_size;
5601
5602 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5603 node_start_pfn,
5604 node_end_pfn,
5605 &zone_start_pfn,
5606 &zone_end_pfn,
5607 zones_size);
5608 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
5609 node_start_pfn, node_end_pfn,
5610 zholes_size);
5611 if (size)
5612 zone->zone_start_pfn = zone_start_pfn;
5613 else
5614 zone->zone_start_pfn = 0;
5615 zone->spanned_pages = size;
5616 zone->present_pages = real_size;
5617
5618 totalpages += size;
5619 realtotalpages += real_size;
5620 }
5621
5622 pgdat->node_spanned_pages = totalpages;
5623 pgdat->node_present_pages = realtotalpages;
5624 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5625 realtotalpages);
5626}
5627
5628#ifndef CONFIG_SPARSEMEM
5629
5630
5631
5632
5633
5634
5635
5636static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
5637{
5638 unsigned long usemapsize;
5639
5640 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
5641 usemapsize = roundup(zonesize, pageblock_nr_pages);
5642 usemapsize = usemapsize >> pageblock_order;
5643 usemapsize *= NR_PAGEBLOCK_BITS;
5644 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5645
5646 return usemapsize / 8;
5647}
5648
5649static void __init setup_usemap(struct pglist_data *pgdat,
5650 struct zone *zone,
5651 unsigned long zone_start_pfn,
5652 unsigned long zonesize)
5653{
5654 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
5655 zone->pageblock_flags = NULL;
5656 if (usemapsize)
5657 zone->pageblock_flags =
5658 memblock_virt_alloc_node_nopanic(usemapsize,
5659 pgdat->node_id);
5660}
5661#else
5662static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5663 unsigned long zone_start_pfn, unsigned long zonesize) {}
5664#endif
5665
5666#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
5667
5668
5669void __paginginit set_pageblock_order(void)
5670{
5671 unsigned int order;
5672
5673
5674 if (pageblock_order)
5675 return;
5676
5677 if (HPAGE_SHIFT > PAGE_SHIFT)
5678 order = HUGETLB_PAGE_ORDER;
5679 else
5680 order = MAX_ORDER - 1;
5681
5682
5683
5684
5685
5686
5687 pageblock_order = order;
5688}
5689#else
5690
5691
5692
5693
5694
5695
5696
5697void __paginginit set_pageblock_order(void)
5698{
5699}
5700
5701#endif
5702
5703static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5704 unsigned long present_pages)
5705{
5706 unsigned long pages = spanned_pages;
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716 if (spanned_pages > present_pages + (present_pages >> 4) &&
5717 IS_ENABLED(CONFIG_SPARSEMEM))
5718 pages = present_pages;
5719
5720 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5721}
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731static void __paginginit free_area_init_core(struct pglist_data *pgdat)
5732{
5733 enum zone_type j;
5734 int nid = pgdat->node_id;
5735 int ret;
5736
5737 pgdat_resize_init(pgdat);
5738#ifdef CONFIG_NUMA_BALANCING
5739 spin_lock_init(&pgdat->numabalancing_migrate_lock);
5740 pgdat->numabalancing_migrate_nr_pages = 0;
5741 pgdat->numabalancing_migrate_next_window = jiffies;
5742#endif
5743#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5744 spin_lock_init(&pgdat->split_queue_lock);
5745 INIT_LIST_HEAD(&pgdat->split_queue);
5746 pgdat->split_queue_len = 0;
5747#endif
5748 init_waitqueue_head(&pgdat->kswapd_wait);
5749 init_waitqueue_head(&pgdat->pfmemalloc_wait);
5750#ifdef CONFIG_COMPACTION
5751 init_waitqueue_head(&pgdat->kcompactd_wait);
5752#endif
5753 pgdat_page_ext_init(pgdat);
5754 spin_lock_init(&pgdat->lru_lock);
5755 lruvec_init(node_lruvec(pgdat));
5756
5757 for (j = 0; j < MAX_NR_ZONES; j++) {
5758 struct zone *zone = pgdat->node_zones + j;
5759 unsigned long size, realsize, freesize, memmap_pages;
5760 unsigned long zone_start_pfn = zone->zone_start_pfn;
5761
5762 size = zone->spanned_pages;
5763 realsize = freesize = zone->present_pages;
5764
5765
5766
5767
5768
5769
5770 memmap_pages = calc_memmap_size(size, realsize);
5771 if (!is_highmem_idx(j)) {
5772 if (freesize >= memmap_pages) {
5773 freesize -= memmap_pages;
5774 if (memmap_pages)
5775 printk(KERN_DEBUG
5776 " %s zone: %lu pages used for memmap\n",
5777 zone_names[j], memmap_pages);
5778 } else
5779 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
5780 zone_names[j], memmap_pages, freesize);
5781 }
5782
5783
5784 if (j == 0 && freesize > dma_reserve) {
5785 freesize -= dma_reserve;
5786 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
5787 zone_names[0], dma_reserve);
5788 }
5789
5790 if (!is_highmem_idx(j))
5791 nr_kernel_pages += freesize;
5792
5793 else if (nr_kernel_pages > memmap_pages * 2)
5794 nr_kernel_pages -= memmap_pages;
5795 nr_all_pages += freesize;
5796
5797
5798
5799
5800
5801
5802 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
5803#ifdef CONFIG_NUMA
5804 zone->node = nid;
5805#endif
5806 zone->name = zone_names[j];
5807 zone->zone_pgdat = pgdat;
5808 spin_lock_init(&zone->lock);
5809 zone_seqlock_init(zone);
5810 zone_pcp_init(zone);
5811
5812 if (!size)
5813 continue;
5814
5815 set_pageblock_order();
5816 setup_usemap(pgdat, zone, zone_start_pfn, size);
5817 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
5818 BUG_ON(ret);
5819 memmap_init(size, nid, j, zone_start_pfn);
5820 }
5821}
5822
5823static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
5824{
5825 unsigned long __maybe_unused start = 0;
5826 unsigned long __maybe_unused offset = 0;
5827
5828
5829 if (!pgdat->node_spanned_pages)
5830 return;
5831
5832#ifdef CONFIG_FLAT_NODE_MEM_MAP
5833 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5834 offset = pgdat->node_start_pfn - start;
5835
5836 if (!pgdat->node_mem_map) {
5837 unsigned long size, end;
5838 struct page *map;
5839
5840
5841
5842
5843
5844
5845 end = pgdat_end_pfn(pgdat);
5846 end = ALIGN(end, MAX_ORDER_NR_PAGES);
5847 size = (end - start) * sizeof(struct page);
5848 map = alloc_remap(pgdat->node_id, size);
5849 if (!map)
5850 map = memblock_virt_alloc_node_nopanic(size,
5851 pgdat->node_id);
5852 pgdat->node_mem_map = map + offset;
5853 }
5854#ifndef CONFIG_NEED_MULTIPLE_NODES
5855
5856
5857
5858 if (pgdat == NODE_DATA(0)) {
5859 mem_map = NODE_DATA(0)->node_mem_map;
5860#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
5861 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
5862 mem_map -= offset;
5863#endif
5864 }
5865#endif
5866#endif
5867}
5868
5869void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5870 unsigned long node_start_pfn, unsigned long *zholes_size)
5871{
5872 pg_data_t *pgdat = NODE_DATA(nid);
5873 unsigned long start_pfn = 0;
5874 unsigned long end_pfn = 0;
5875
5876
5877 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
5878
5879 reset_deferred_meminit(pgdat);
5880 pgdat->node_id = nid;
5881 pgdat->node_start_pfn = node_start_pfn;
5882 pgdat->per_cpu_nodestats = NULL;
5883#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5884 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
5885 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
5886 (u64)start_pfn << PAGE_SHIFT,
5887 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
5888#else
5889 start_pfn = node_start_pfn;
5890#endif
5891 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5892 zones_size, zholes_size);
5893
5894 alloc_node_mem_map(pgdat);
5895#ifdef CONFIG_FLAT_NODE_MEM_MAP
5896 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5897 nid, (unsigned long)pgdat,
5898 (unsigned long)pgdat->node_mem_map);
5899#endif
5900
5901 free_area_init_core(pgdat);
5902}
5903
5904#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5905
5906#if MAX_NUMNODES > 1
5907
5908
5909
5910void __init setup_nr_node_ids(void)
5911{
5912 unsigned int highest;
5913
5914 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
5915 nr_node_ids = highest + 1;
5916}
5917#endif
5918
5919
5920
5921
5922
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935
5936
5937
5938unsigned long __init node_map_pfn_alignment(void)
5939{
5940 unsigned long accl_mask = 0, last_end = 0;
5941 unsigned long start, end, mask;
5942 int last_nid = -1;
5943 int i, nid;
5944
5945 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
5946 if (!start || last_nid < 0 || last_nid == nid) {
5947 last_nid = nid;
5948 last_end = end;
5949 continue;
5950 }
5951
5952
5953
5954
5955
5956
5957 mask = ~((1 << __ffs(start)) - 1);
5958 while (mask && last_end <= (start & (mask << 1)))
5959 mask <<= 1;
5960
5961
5962 accl_mask |= mask;
5963 }
5964
5965
5966 return ~accl_mask + 1;
5967}
5968
5969
5970static unsigned long __init find_min_pfn_for_node(int nid)
5971{
5972 unsigned long min_pfn = ULONG_MAX;
5973 unsigned long start_pfn;
5974 int i;
5975
5976 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5977 min_pfn = min(min_pfn, start_pfn);
5978
5979 if (min_pfn == ULONG_MAX) {
5980 pr_warn("Could not find start_pfn for node %d\n", nid);
5981 return 0;
5982 }
5983
5984 return min_pfn;
5985}
5986
5987
5988
5989
5990
5991
5992
5993unsigned long __init find_min_pfn_with_active_regions(void)
5994{
5995 return find_min_pfn_for_node(MAX_NUMNODES);
5996}
5997
5998
5999
6000
6001
6002
6003static unsigned long __init early_calculate_totalpages(void)
6004{
6005 unsigned long totalpages = 0;
6006 unsigned long start_pfn, end_pfn;
6007 int i, nid;
6008
6009 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6010 unsigned long pages = end_pfn - start_pfn;
6011
6012 totalpages += pages;
6013 if (pages)
6014 node_set_state(nid, N_MEMORY);
6015 }
6016 return totalpages;
6017}
6018
6019
6020
6021
6022
6023
6024
6025static void __init find_zone_movable_pfns_for_nodes(void)
6026{
6027 int i, nid;
6028 unsigned long usable_startpfn;
6029 unsigned long kernelcore_node, kernelcore_remaining;
6030
6031 nodemask_t saved_node_state = node_states[N_MEMORY];
6032 unsigned long totalpages = early_calculate_totalpages();
6033 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
6034 struct memblock_region *r;
6035
6036
6037 find_usable_zone_for_movable();
6038
6039
6040
6041
6042
6043 if (movable_node_is_enabled()) {
6044 for_each_memblock(memory, r) {
6045 if (!memblock_is_hotpluggable(r))
6046 continue;
6047
6048 nid = r->nid;
6049
6050 usable_startpfn = PFN_DOWN(r->base);
6051 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6052 min(usable_startpfn, zone_movable_pfn[nid]) :
6053 usable_startpfn;
6054 }
6055
6056 goto out2;
6057 }
6058
6059
6060
6061
6062 if (mirrored_kernelcore) {
6063 bool mem_below_4gb_not_mirrored = false;
6064
6065 for_each_memblock(memory, r) {
6066 if (memblock_is_mirror(r))
6067 continue;
6068
6069 nid = r->nid;
6070
6071 usable_startpfn = memblock_region_memory_base_pfn(r);
6072
6073 if (usable_startpfn < 0x100000) {
6074 mem_below_4gb_not_mirrored = true;
6075 continue;
6076 }
6077
6078 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6079 min(usable_startpfn, zone_movable_pfn[nid]) :
6080 usable_startpfn;
6081 }
6082
6083 if (mem_below_4gb_not_mirrored)
6084 pr_warn("This configuration results in unmirrored kernel memory.");
6085
6086 goto out2;
6087 }
6088
6089
6090
6091
6092
6093
6094
6095
6096
6097 if (required_movablecore) {
6098 unsigned long corepages;
6099
6100
6101
6102
6103
6104 required_movablecore =
6105 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
6106 required_movablecore = min(totalpages, required_movablecore);
6107 corepages = totalpages - required_movablecore;
6108
6109 required_kernelcore = max(required_kernelcore, corepages);
6110 }
6111
6112
6113
6114
6115
6116 if (!required_kernelcore || required_kernelcore >= totalpages)
6117 goto out;
6118
6119
6120 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6121
6122restart:
6123
6124 kernelcore_node = required_kernelcore / usable_nodes;
6125 for_each_node_state(nid, N_MEMORY) {
6126 unsigned long start_pfn, end_pfn;
6127
6128
6129
6130
6131
6132
6133 if (required_kernelcore < kernelcore_node)
6134 kernelcore_node = required_kernelcore / usable_nodes;
6135
6136
6137
6138
6139
6140
6141 kernelcore_remaining = kernelcore_node;
6142
6143
6144 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6145 unsigned long size_pages;
6146
6147 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
6148 if (start_pfn >= end_pfn)
6149 continue;
6150
6151
6152 if (start_pfn < usable_startpfn) {
6153 unsigned long kernel_pages;
6154 kernel_pages = min(end_pfn, usable_startpfn)
6155 - start_pfn;
6156
6157 kernelcore_remaining -= min(kernel_pages,
6158 kernelcore_remaining);
6159 required_kernelcore -= min(kernel_pages,
6160 required_kernelcore);
6161
6162
6163 if (end_pfn <= usable_startpfn) {
6164
6165
6166
6167
6168
6169
6170
6171 zone_movable_pfn[nid] = end_pfn;
6172 continue;
6173 }
6174 start_pfn = usable_startpfn;
6175 }
6176
6177
6178
6179
6180
6181
6182 size_pages = end_pfn - start_pfn;
6183 if (size_pages > kernelcore_remaining)
6184 size_pages = kernelcore_remaining;
6185 zone_movable_pfn[nid] = start_pfn + size_pages;
6186
6187
6188
6189
6190
6191
6192 required_kernelcore -= min(required_kernelcore,
6193 size_pages);
6194 kernelcore_remaining -= size_pages;
6195 if (!kernelcore_remaining)
6196 break;
6197 }
6198 }
6199
6200
6201
6202
6203
6204
6205
6206 usable_nodes--;
6207 if (usable_nodes && required_kernelcore > usable_nodes)
6208 goto restart;
6209
6210out2:
6211
6212 for (nid = 0; nid < MAX_NUMNODES; nid++)
6213 zone_movable_pfn[nid] =
6214 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
6215
6216out:
6217
6218 node_states[N_MEMORY] = saved_node_state;
6219}
6220
6221
6222static void check_for_memory(pg_data_t *pgdat, int nid)
6223{
6224 enum zone_type zone_type;
6225
6226 if (N_MEMORY == N_NORMAL_MEMORY)
6227 return;
6228
6229 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
6230 struct zone *zone = &pgdat->node_zones[zone_type];
6231 if (populated_zone(zone)) {
6232 node_set_state(nid, N_HIGH_MEMORY);
6233 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6234 zone_type <= ZONE_NORMAL)
6235 node_set_state(nid, N_NORMAL_MEMORY);
6236 break;
6237 }
6238 }
6239}
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6255{
6256 unsigned long start_pfn, end_pfn;
6257 int i, nid;
6258
6259
6260 memset(arch_zone_lowest_possible_pfn, 0,
6261 sizeof(arch_zone_lowest_possible_pfn));
6262 memset(arch_zone_highest_possible_pfn, 0,
6263 sizeof(arch_zone_highest_possible_pfn));
6264
6265 start_pfn = find_min_pfn_with_active_regions();
6266
6267 for (i = 0; i < MAX_NR_ZONES; i++) {
6268 if (i == ZONE_MOVABLE)
6269 continue;
6270
6271 end_pfn = max(max_zone_pfn[i], start_pfn);
6272 arch_zone_lowest_possible_pfn[i] = start_pfn;
6273 arch_zone_highest_possible_pfn[i] = end_pfn;
6274
6275 start_pfn = end_pfn;
6276 }
6277 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
6278 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
6279
6280
6281 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
6282 find_zone_movable_pfns_for_nodes();
6283
6284
6285 pr_info("Zone ranges:\n");
6286 for (i = 0; i < MAX_NR_ZONES; i++) {
6287 if (i == ZONE_MOVABLE)
6288 continue;
6289 pr_info(" %-8s ", zone_names[i]);
6290 if (arch_zone_lowest_possible_pfn[i] ==
6291 arch_zone_highest_possible_pfn[i])
6292 pr_cont("empty\n");
6293 else
6294 pr_cont("[mem %#018Lx-%#018Lx]\n",
6295 (u64)arch_zone_lowest_possible_pfn[i]
6296 << PAGE_SHIFT,
6297 ((u64)arch_zone_highest_possible_pfn[i]
6298 << PAGE_SHIFT) - 1);
6299 }
6300
6301
6302 pr_info("Movable zone start for each node\n");
6303 for (i = 0; i < MAX_NUMNODES; i++) {
6304 if (zone_movable_pfn[i])
6305 pr_info(" Node %d: %#018Lx\n", i,
6306 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
6307 }
6308
6309
6310 pr_info("Early memory node ranges\n");
6311 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
6312 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6313 (u64)start_pfn << PAGE_SHIFT,
6314 ((u64)end_pfn << PAGE_SHIFT) - 1);
6315
6316
6317 mminit_verify_pageflags_layout();
6318 setup_nr_node_ids();
6319 for_each_online_node(nid) {
6320 pg_data_t *pgdat = NODE_DATA(nid);
6321 free_area_init_node(nid, NULL,
6322 find_min_pfn_for_node(nid), NULL);
6323
6324
6325 if (pgdat->node_present_pages)
6326 node_set_state(nid, N_MEMORY);
6327 check_for_memory(pgdat, nid);
6328 }
6329}
6330
6331static int __init cmdline_parse_core(char *p, unsigned long *core)
6332{
6333 unsigned long long coremem;
6334 if (!p)
6335 return -EINVAL;
6336
6337 coremem = memparse(p, &p);
6338 *core = coremem >> PAGE_SHIFT;
6339
6340
6341 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6342
6343 return 0;
6344}
6345
6346
6347
6348
6349
6350static int __init cmdline_parse_kernelcore(char *p)
6351{
6352
6353 if (parse_option_str(p, "mirror")) {
6354 mirrored_kernelcore = true;
6355 return 0;
6356 }
6357
6358 return cmdline_parse_core(p, &required_kernelcore);
6359}
6360
6361
6362
6363
6364
6365static int __init cmdline_parse_movablecore(char *p)
6366{
6367 return cmdline_parse_core(p, &required_movablecore);
6368}
6369
6370early_param("kernelcore", cmdline_parse_kernelcore);
6371early_param("movablecore", cmdline_parse_movablecore);
6372
6373#endif
6374
6375void adjust_managed_page_count(struct page *page, long count)
6376{
6377 spin_lock(&managed_page_count_lock);
6378 page_zone(page)->managed_pages += count;
6379 totalram_pages += count;
6380#ifdef CONFIG_HIGHMEM
6381 if (PageHighMem(page))
6382 totalhigh_pages += count;
6383#endif
6384 spin_unlock(&managed_page_count_lock);
6385}
6386EXPORT_SYMBOL(adjust_managed_page_count);
6387
6388unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
6389{
6390 void *pos;
6391 unsigned long pages = 0;
6392
6393 start = (void *)PAGE_ALIGN((unsigned long)start);
6394 end = (void *)((unsigned long)end & PAGE_MASK);
6395 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
6396 if ((unsigned int)poison <= 0xFF)
6397 memset(pos, poison, PAGE_SIZE);
6398 free_reserved_page(virt_to_page(pos));
6399 }
6400
6401 if (pages && s)
6402 pr_info("Freeing %s memory: %ldK (%p - %p)\n",
6403 s, pages << (PAGE_SHIFT - 10), start, end);
6404
6405 return pages;
6406}
6407EXPORT_SYMBOL(free_reserved_area);
6408
6409#ifdef CONFIG_HIGHMEM
6410void free_highmem_page(struct page *page)
6411{
6412 __free_reserved_page(page);
6413 totalram_pages++;
6414 page_zone(page)->managed_pages++;
6415 totalhigh_pages++;
6416}
6417#endif
6418
6419
6420void __init mem_init_print_info(const char *str)
6421{
6422 unsigned long physpages, codesize, datasize, rosize, bss_size;
6423 unsigned long init_code_size, init_data_size;
6424
6425 physpages = get_num_physpages();
6426 codesize = _etext - _stext;
6427 datasize = _edata - _sdata;
6428 rosize = __end_rodata - __start_rodata;
6429 bss_size = __bss_stop - __bss_start;
6430 init_data_size = __init_end - __init_begin;
6431 init_code_size = _einittext - _sinittext;
6432
6433
6434
6435
6436
6437
6438
6439
6440#define adj_init_size(start, end, size, pos, adj) \
6441 do { \
6442 if (start <= pos && pos < end && size > adj) \
6443 size -= adj; \
6444 } while (0)
6445
6446 adj_init_size(__init_begin, __init_end, init_data_size,
6447 _sinittext, init_code_size);
6448 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6449 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6450 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6451 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6452
6453#undef adj_init_size
6454
6455 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
6456#ifdef CONFIG_HIGHMEM
6457 ", %luK highmem"
6458#endif
6459 "%s%s)\n",
6460 nr_free_pages() << (PAGE_SHIFT - 10),
6461 physpages << (PAGE_SHIFT - 10),
6462 codesize >> 10, datasize >> 10, rosize >> 10,
6463 (init_data_size + init_code_size) >> 10, bss_size >> 10,
6464 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6465 totalcma_pages << (PAGE_SHIFT - 10),
6466#ifdef CONFIG_HIGHMEM
6467 totalhigh_pages << (PAGE_SHIFT - 10),
6468#endif
6469 str ? ", " : "", str ? str : "");
6470}
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483void __init set_dma_reserve(unsigned long new_dma_reserve)
6484{
6485 dma_reserve = new_dma_reserve;
6486}
6487
6488void __init free_area_init(unsigned long *zones_size)
6489{
6490 free_area_init_node(0, zones_size,
6491 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6492}
6493
6494static int page_alloc_cpu_notify(struct notifier_block *self,
6495 unsigned long action, void *hcpu)
6496{
6497 int cpu = (unsigned long)hcpu;
6498
6499 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
6500 lru_add_drain_cpu(cpu);
6501 drain_pages(cpu);
6502
6503
6504
6505
6506
6507
6508
6509 vm_events_fold_cpu(cpu);
6510
6511
6512
6513
6514
6515
6516
6517
6518 cpu_vm_stats_fold(cpu);
6519 }
6520 return NOTIFY_OK;
6521}
6522
6523void __init page_alloc_init(void)
6524{
6525 hotcpu_notifier(page_alloc_cpu_notify, 0);
6526}
6527
6528
6529
6530
6531
6532static void calculate_totalreserve_pages(void)
6533{
6534 struct pglist_data *pgdat;
6535 unsigned long reserve_pages = 0;
6536 enum zone_type i, j;
6537
6538 for_each_online_pgdat(pgdat) {
6539
6540 pgdat->totalreserve_pages = 0;
6541
6542 for (i = 0; i < MAX_NR_ZONES; i++) {
6543 struct zone *zone = pgdat->node_zones + i;
6544 long max = 0;
6545
6546
6547 for (j = i; j < MAX_NR_ZONES; j++) {
6548 if (zone->lowmem_reserve[j] > max)
6549 max = zone->lowmem_reserve[j];
6550 }
6551
6552
6553 max += high_wmark_pages(zone);
6554
6555 if (max > zone->managed_pages)
6556 max = zone->managed_pages;
6557
6558 pgdat->totalreserve_pages += max;
6559
6560 reserve_pages += max;
6561 }
6562 }
6563 totalreserve_pages = reserve_pages;
6564}
6565
6566
6567
6568
6569
6570
6571
6572static void setup_per_zone_lowmem_reserve(void)
6573{
6574 struct pglist_data *pgdat;
6575 enum zone_type j, idx;
6576
6577 for_each_online_pgdat(pgdat) {
6578 for (j = 0; j < MAX_NR_ZONES; j++) {
6579 struct zone *zone = pgdat->node_zones + j;
6580 unsigned long managed_pages = zone->managed_pages;
6581
6582 zone->lowmem_reserve[j] = 0;
6583
6584 idx = j;
6585 while (idx) {
6586 struct zone *lower_zone;
6587
6588 idx--;
6589
6590 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6591 sysctl_lowmem_reserve_ratio[idx] = 1;
6592
6593 lower_zone = pgdat->node_zones + idx;
6594 lower_zone->lowmem_reserve[j] = managed_pages /
6595 sysctl_lowmem_reserve_ratio[idx];
6596 managed_pages += lower_zone->managed_pages;
6597 }
6598 }
6599 }
6600
6601
6602 calculate_totalreserve_pages();
6603}
6604
6605static void __setup_per_zone_wmarks(void)
6606{
6607 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6608 unsigned long lowmem_pages = 0;
6609 struct zone *zone;
6610 unsigned long flags;
6611
6612
6613 for_each_zone(zone) {
6614 if (!is_highmem(zone))
6615 lowmem_pages += zone->managed_pages;
6616 }
6617
6618 for_each_zone(zone) {
6619 u64 tmp;
6620
6621 spin_lock_irqsave(&zone->lock, flags);
6622 tmp = (u64)pages_min * zone->managed_pages;
6623 do_div(tmp, lowmem_pages);
6624 if (is_highmem(zone)) {
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634 unsigned long min_pages;
6635
6636 min_pages = zone->managed_pages / 1024;
6637 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
6638 zone->watermark[WMARK_MIN] = min_pages;
6639 } else {
6640
6641
6642
6643
6644 zone->watermark[WMARK_MIN] = tmp;
6645 }
6646
6647
6648
6649
6650
6651
6652 tmp = max_t(u64, tmp >> 2,
6653 mult_frac(zone->managed_pages,
6654 watermark_scale_factor, 10000));
6655
6656 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
6657 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
6658
6659 spin_unlock_irqrestore(&zone->lock, flags);
6660 }
6661
6662
6663 calculate_totalreserve_pages();
6664}
6665
6666
6667
6668
6669
6670
6671
6672
6673void setup_per_zone_wmarks(void)
6674{
6675 mutex_lock(&zonelists_mutex);
6676 __setup_per_zone_wmarks();
6677 mutex_unlock(&zonelists_mutex);
6678}
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703
6704int __meminit init_per_zone_wmark_min(void)
6705{
6706 unsigned long lowmem_kbytes;
6707 int new_min_free_kbytes;
6708
6709 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
6710 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6711
6712 if (new_min_free_kbytes > user_min_free_kbytes) {
6713 min_free_kbytes = new_min_free_kbytes;
6714 if (min_free_kbytes < 128)
6715 min_free_kbytes = 128;
6716 if (min_free_kbytes > 65536)
6717 min_free_kbytes = 65536;
6718 } else {
6719 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6720 new_min_free_kbytes, user_min_free_kbytes);
6721 }
6722 setup_per_zone_wmarks();
6723 refresh_zone_stat_thresholds();
6724 setup_per_zone_lowmem_reserve();
6725
6726#ifdef CONFIG_NUMA
6727 setup_min_unmapped_ratio();
6728 setup_min_slab_ratio();
6729#endif
6730
6731 return 0;
6732}
6733core_initcall(init_per_zone_wmark_min)
6734
6735
6736
6737
6738
6739
6740int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
6741 void __user *buffer, size_t *length, loff_t *ppos)
6742{
6743 int rc;
6744
6745 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6746 if (rc)
6747 return rc;
6748
6749 if (write) {
6750 user_min_free_kbytes = min_free_kbytes;
6751 setup_per_zone_wmarks();
6752 }
6753 return 0;
6754}
6755
6756int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
6757 void __user *buffer, size_t *length, loff_t *ppos)
6758{
6759 int rc;
6760
6761 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6762 if (rc)
6763 return rc;
6764
6765 if (write)
6766 setup_per_zone_wmarks();
6767
6768 return 0;
6769}
6770
6771#ifdef CONFIG_NUMA
6772static void setup_min_unmapped_ratio(void)
6773{
6774 pg_data_t *pgdat;
6775 struct zone *zone;
6776
6777 for_each_online_pgdat(pgdat)
6778 pgdat->min_unmapped_pages = 0;
6779
6780 for_each_zone(zone)
6781 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
6782 sysctl_min_unmapped_ratio) / 100;
6783}
6784
6785
6786int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
6787 void __user *buffer, size_t *length, loff_t *ppos)
6788{
6789 int rc;
6790
6791 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6792 if (rc)
6793 return rc;
6794
6795 setup_min_unmapped_ratio();
6796
6797 return 0;
6798}
6799
6800static void setup_min_slab_ratio(void)
6801{
6802 pg_data_t *pgdat;
6803 struct zone *zone;
6804
6805 for_each_online_pgdat(pgdat)
6806 pgdat->min_slab_pages = 0;
6807
6808 for_each_zone(zone)
6809 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
6810 sysctl_min_slab_ratio) / 100;
6811}
6812
6813int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
6814 void __user *buffer, size_t *length, loff_t *ppos)
6815{
6816 int rc;
6817
6818 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6819 if (rc)
6820 return rc;
6821
6822 setup_min_slab_ratio();
6823
6824 return 0;
6825}
6826#endif
6827
6828
6829
6830
6831
6832
6833
6834
6835
6836
6837int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
6838 void __user *buffer, size_t *length, loff_t *ppos)
6839{
6840 proc_dointvec_minmax(table, write, buffer, length, ppos);
6841 setup_per_zone_lowmem_reserve();
6842 return 0;
6843}
6844
6845
6846
6847
6848
6849
6850int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
6851 void __user *buffer, size_t *length, loff_t *ppos)
6852{
6853 struct zone *zone;
6854 int old_percpu_pagelist_fraction;
6855 int ret;
6856
6857 mutex_lock(&pcp_batch_high_lock);
6858 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
6859
6860 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6861 if (!write || ret < 0)
6862 goto out;
6863
6864
6865 if (percpu_pagelist_fraction &&
6866 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
6867 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
6868 ret = -EINVAL;
6869 goto out;
6870 }
6871
6872
6873 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
6874 goto out;
6875
6876 for_each_populated_zone(zone) {
6877 unsigned int cpu;
6878
6879 for_each_possible_cpu(cpu)
6880 pageset_set_high_and_batch(zone,
6881 per_cpu_ptr(zone->pageset, cpu));
6882 }
6883out:
6884 mutex_unlock(&pcp_batch_high_lock);
6885 return ret;
6886}
6887
6888#ifdef CONFIG_NUMA
6889int hashdist = HASHDIST_DEFAULT;
6890
6891static int __init set_hashdist(char *str)
6892{
6893 if (!str)
6894 return 0;
6895 hashdist = simple_strtoul(str, &str, 0);
6896 return 1;
6897}
6898__setup("hashdist=", set_hashdist);
6899#endif
6900
6901#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
6902
6903
6904
6905
6906static unsigned long __init arch_reserved_kernel_pages(void)
6907{
6908 return 0;
6909}
6910#endif
6911
6912
6913
6914
6915
6916
6917
6918void *__init alloc_large_system_hash(const char *tablename,
6919 unsigned long bucketsize,
6920 unsigned long numentries,
6921 int scale,
6922 int flags,
6923 unsigned int *_hash_shift,
6924 unsigned int *_hash_mask,
6925 unsigned long low_limit,
6926 unsigned long high_limit)
6927{
6928 unsigned long long max = high_limit;
6929 unsigned long log2qty, size;
6930 void *table = NULL;
6931
6932
6933 if (!numentries) {
6934
6935 numentries = nr_kernel_pages;
6936 numentries -= arch_reserved_kernel_pages();
6937
6938
6939 if (PAGE_SHIFT < 20)
6940 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
6941
6942
6943 if (scale > PAGE_SHIFT)
6944 numentries >>= (scale - PAGE_SHIFT);
6945 else
6946 numentries <<= (PAGE_SHIFT - scale);
6947
6948
6949 if (unlikely(flags & HASH_SMALL)) {
6950
6951 WARN_ON(!(flags & HASH_EARLY));
6952 if (!(numentries >> *_hash_shift)) {
6953 numentries = 1UL << *_hash_shift;
6954 BUG_ON(!numentries);
6955 }
6956 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
6957 numentries = PAGE_SIZE / bucketsize;
6958 }
6959 numentries = roundup_pow_of_two(numentries);
6960
6961
6962 if (max == 0) {
6963 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6964 do_div(max, bucketsize);
6965 }
6966 max = min(max, 0x80000000ULL);
6967
6968 if (numentries < low_limit)
6969 numentries = low_limit;
6970 if (numentries > max)
6971 numentries = max;
6972
6973 log2qty = ilog2(numentries);
6974
6975 do {
6976 size = bucketsize << log2qty;
6977 if (flags & HASH_EARLY)
6978 table = memblock_virt_alloc_nopanic(size, 0);
6979 else if (hashdist)
6980 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6981 else {
6982
6983
6984
6985
6986
6987 if (get_order(size) < MAX_ORDER) {
6988 table = alloc_pages_exact(size, GFP_ATOMIC);
6989 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6990 }
6991 }
6992 } while (!table && size > PAGE_SIZE && --log2qty);
6993
6994 if (!table)
6995 panic("Failed to allocate %s hash table\n", tablename);
6996
6997 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
6998 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
6999
7000 if (_hash_shift)
7001 *_hash_shift = log2qty;
7002 if (_hash_mask)
7003 *_hash_mask = (1 << log2qty) - 1;
7004
7005 return table;
7006}
7007
7008
7009
7010
7011
7012
7013
7014
7015
7016bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7017 bool skip_hwpoisoned_pages)
7018{
7019 unsigned long pfn, iter, found;
7020 int mt;
7021
7022
7023
7024
7025
7026 if (zone_idx(zone) == ZONE_MOVABLE)
7027 return false;
7028 mt = get_pageblock_migratetype(page);
7029 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
7030 return false;
7031
7032 pfn = page_to_pfn(page);
7033 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7034 unsigned long check = pfn + iter;
7035
7036 if (!pfn_valid_within(check))
7037 continue;
7038
7039 page = pfn_to_page(check);
7040
7041
7042
7043
7044
7045
7046 if (PageHuge(page)) {
7047 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7048 continue;
7049 }
7050
7051
7052
7053
7054
7055
7056
7057 if (!page_ref_count(page)) {
7058 if (PageBuddy(page))
7059 iter += (1 << page_order(page)) - 1;
7060 continue;
7061 }
7062
7063
7064
7065
7066
7067 if (skip_hwpoisoned_pages && PageHWPoison(page))
7068 continue;
7069
7070 if (!PageLRU(page))
7071 found++;
7072
7073
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085 if (found > count)
7086 return true;
7087 }
7088 return false;
7089}
7090
7091bool is_pageblock_removable_nolock(struct page *page)
7092{
7093 struct zone *zone;
7094 unsigned long pfn;
7095
7096
7097
7098
7099
7100
7101
7102
7103 if (!node_online(page_to_nid(page)))
7104 return false;
7105
7106 zone = page_zone(page);
7107 pfn = page_to_pfn(page);
7108 if (!zone_spans_pfn(zone, pfn))
7109 return false;
7110
7111 return !has_unmovable_pages(zone, page, 0, true);
7112}
7113
7114#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
7115
7116static unsigned long pfn_max_align_down(unsigned long pfn)
7117{
7118 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7119 pageblock_nr_pages) - 1);
7120}
7121
7122static unsigned long pfn_max_align_up(unsigned long pfn)
7123{
7124 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7125 pageblock_nr_pages));
7126}
7127
7128
7129static int __alloc_contig_migrate_range(struct compact_control *cc,
7130 unsigned long start, unsigned long end)
7131{
7132
7133 unsigned long nr_reclaimed;
7134 unsigned long pfn = start;
7135 unsigned int tries = 0;
7136 int ret = 0;
7137
7138 migrate_prep();
7139
7140 while (pfn < end || !list_empty(&cc->migratepages)) {
7141 if (fatal_signal_pending(current)) {
7142 ret = -EINTR;
7143 break;
7144 }
7145
7146 if (list_empty(&cc->migratepages)) {
7147 cc->nr_migratepages = 0;
7148 pfn = isolate_migratepages_range(cc, pfn, end);
7149 if (!pfn) {
7150 ret = -EINTR;
7151 break;
7152 }
7153 tries = 0;
7154 } else if (++tries == 5) {
7155 ret = ret < 0 ? ret : -EBUSY;
7156 break;
7157 }
7158
7159 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7160 &cc->migratepages);
7161 cc->nr_migratepages -= nr_reclaimed;
7162
7163 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
7164 NULL, 0, cc->mode, MR_CMA);
7165 }
7166 if (ret < 0) {
7167 putback_movable_pages(&cc->migratepages);
7168 return ret;
7169 }
7170 return 0;
7171}
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193int alloc_contig_range(unsigned long start, unsigned long end,
7194 unsigned migratetype)
7195{
7196 unsigned long outer_start, outer_end;
7197 unsigned int order;
7198 int ret = 0;
7199
7200 struct compact_control cc = {
7201 .nr_migratepages = 0,
7202 .order = -1,
7203 .zone = page_zone(pfn_to_page(start)),
7204 .mode = MIGRATE_SYNC,
7205 .ignore_skip_hint = true,
7206 };
7207 INIT_LIST_HEAD(&cc.migratepages);
7208
7209
7210
7211
7212
7213
7214
7215
7216
7217
7218
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233 ret = start_isolate_page_range(pfn_max_align_down(start),
7234 pfn_max_align_up(end), migratetype,
7235 false);
7236 if (ret)
7237 return ret;
7238
7239
7240
7241
7242
7243 ret = __alloc_contig_migrate_range(&cc, start, end);
7244 if (ret && ret != -EBUSY)
7245 goto done;
7246
7247
7248
7249
7250
7251
7252
7253
7254
7255
7256
7257
7258
7259
7260
7261
7262
7263
7264 lru_add_drain_all();
7265 drain_all_pages(cc.zone);
7266
7267 order = 0;
7268 outer_start = start;
7269 while (!PageBuddy(pfn_to_page(outer_start))) {
7270 if (++order >= MAX_ORDER) {
7271 outer_start = start;
7272 break;
7273 }
7274 outer_start &= ~0UL << order;
7275 }
7276
7277 if (outer_start != start) {
7278 order = page_order(pfn_to_page(outer_start));
7279
7280
7281
7282
7283
7284
7285
7286 if (outer_start + (1UL << order) <= start)
7287 outer_start = start;
7288 }
7289
7290
7291 if (test_pages_isolated(outer_start, end, false)) {
7292 pr_info("%s: [%lx, %lx) PFNs busy\n",
7293 __func__, outer_start, end);
7294 ret = -EBUSY;
7295 goto done;
7296 }
7297
7298
7299 outer_end = isolate_freepages_range(&cc, outer_start, end);
7300 if (!outer_end) {
7301 ret = -EBUSY;
7302 goto done;
7303 }
7304
7305
7306 if (start != outer_start)
7307 free_contig_range(outer_start, start - outer_start);
7308 if (end != outer_end)
7309 free_contig_range(end, outer_end - end);
7310
7311done:
7312 undo_isolate_page_range(pfn_max_align_down(start),
7313 pfn_max_align_up(end), migratetype);
7314 return ret;
7315}
7316
7317void free_contig_range(unsigned long pfn, unsigned nr_pages)
7318{
7319 unsigned int count = 0;
7320
7321 for (; nr_pages--; pfn++) {
7322 struct page *page = pfn_to_page(pfn);
7323
7324 count += page_count(page) != 1;
7325 __free_page(page);
7326 }
7327 WARN(count != 0, "%d pages are still in use!\n", count);
7328}
7329#endif
7330
7331#ifdef CONFIG_MEMORY_HOTPLUG
7332
7333
7334
7335
7336void __meminit zone_pcp_update(struct zone *zone)
7337{
7338 unsigned cpu;
7339 mutex_lock(&pcp_batch_high_lock);
7340 for_each_possible_cpu(cpu)
7341 pageset_set_high_and_batch(zone,
7342 per_cpu_ptr(zone->pageset, cpu));
7343 mutex_unlock(&pcp_batch_high_lock);
7344}
7345#endif
7346
7347void zone_pcp_reset(struct zone *zone)
7348{
7349 unsigned long flags;
7350 int cpu;
7351 struct per_cpu_pageset *pset;
7352
7353
7354 local_irq_save(flags);
7355 if (zone->pageset != &boot_pageset) {
7356 for_each_online_cpu(cpu) {
7357 pset = per_cpu_ptr(zone->pageset, cpu);
7358 drain_zonestat(zone, pset);
7359 }
7360 free_percpu(zone->pageset);
7361 zone->pageset = &boot_pageset;
7362 }
7363 local_irq_restore(flags);
7364}
7365
7366#ifdef CONFIG_MEMORY_HOTREMOVE
7367
7368
7369
7370
7371void
7372__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7373{
7374 struct page *page;
7375 struct zone *zone;
7376 unsigned int order, i;
7377 unsigned long pfn;
7378 unsigned long flags;
7379
7380 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7381 if (pfn_valid(pfn))
7382 break;
7383 if (pfn == end_pfn)
7384 return;
7385 zone = page_zone(pfn_to_page(pfn));
7386 spin_lock_irqsave(&zone->lock, flags);
7387 pfn = start_pfn;
7388 while (pfn < end_pfn) {
7389 if (!pfn_valid(pfn)) {
7390 pfn++;
7391 continue;
7392 }
7393 page = pfn_to_page(pfn);
7394
7395
7396
7397
7398 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7399 pfn++;
7400 SetPageReserved(page);
7401 continue;
7402 }
7403
7404 BUG_ON(page_count(page));
7405 BUG_ON(!PageBuddy(page));
7406 order = page_order(page);
7407#ifdef CONFIG_DEBUG_VM
7408 pr_info("remove from free list %lx %d %lx\n",
7409 pfn, 1 << order, end_pfn);
7410#endif
7411 list_del(&page->lru);
7412 rmv_page_order(page);
7413 zone->free_area[order].nr_free--;
7414 for (i = 0; i < (1 << order); i++)
7415 SetPageReserved((page+i));
7416 pfn += (1 << order);
7417 }
7418 spin_unlock_irqrestore(&zone->lock, flags);
7419}
7420#endif
7421
7422bool is_free_buddy_page(struct page *page)
7423{
7424 struct zone *zone = page_zone(page);
7425 unsigned long pfn = page_to_pfn(page);
7426 unsigned long flags;
7427 unsigned int order;
7428
7429 spin_lock_irqsave(&zone->lock, flags);
7430 for (order = 0; order < MAX_ORDER; order++) {
7431 struct page *page_head = page - (pfn & ((1 << order) - 1));
7432
7433 if (PageBuddy(page_head) && page_order(page_head) >= order)
7434 break;
7435 }
7436 spin_unlock_irqrestore(&zone->lock, flags);
7437
7438 return order < MAX_ORDER;
7439}
7440