1
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include <linux/bitops.h>
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
16#include <linux/seqlock.h>
17#include <linux/nodemask.h>
18#include <linux/pageblock-flags.h>
19#include <linux/page-flags-layout.h>
20#include <linux/atomic.h>
21#include <linux/mm_types.h>
22#include <linux/page-flags.h>
23#include <asm/page.h>
24
25
26#ifndef CONFIG_FORCE_MAX_ZONEORDER
27#define MAX_ORDER 11
28#else
29#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
30#endif
31#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
32
33
34
35
36
37
38
39#define PAGE_ALLOC_COSTLY_ORDER 3
40
41enum migratetype {
42 MIGRATE_UNMOVABLE,
43 MIGRATE_MOVABLE,
44 MIGRATE_RECLAIMABLE,
45 MIGRATE_PCPTYPES,
46 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
47#ifdef CONFIG_CMA
48
49
50
51
52
53
54
55
56
57
58
59
60
61 MIGRATE_CMA,
62#endif
63#ifdef CONFIG_MEMORY_ISOLATION
64 MIGRATE_ISOLATE,
65#endif
66 MIGRATE_TYPES
67};
68
69
70extern const char * const migratetype_names[MIGRATE_TYPES];
71
72#ifdef CONFIG_CMA
73# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
74# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
75#else
76# define is_migrate_cma(migratetype) false
77# define is_migrate_cma_page(_page) false
78#endif
79
80static inline bool is_migrate_movable(int mt)
81{
82 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
83}
84
85#define for_each_migratetype_order(order, type) \
86 for (order = 0; order < MAX_ORDER; order++) \
87 for (type = 0; type < MIGRATE_TYPES; type++)
88
89extern int page_group_by_mobility_disabled;
90
91#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
92#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
93
94#define get_pageblock_migratetype(page) \
95 get_pfnblock_flags_mask(page, page_to_pfn(page), \
96 PB_migrate_end, MIGRATETYPE_MASK)
97
98struct free_area {
99 struct list_head free_list[MIGRATE_TYPES];
100 unsigned long nr_free;
101};
102
103
104static inline void add_to_free_area(struct page *page, struct free_area *area,
105 int migratetype)
106{
107 list_add(&page->lru, &area->free_list[migratetype]);
108 area->nr_free++;
109}
110
111
112static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
113 int migratetype)
114{
115 list_add_tail(&page->lru, &area->free_list[migratetype]);
116 area->nr_free++;
117}
118
119#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
120
121void add_to_free_area_random(struct page *page, struct free_area *area,
122 int migratetype);
123#else
124static inline void add_to_free_area_random(struct page *page,
125 struct free_area *area, int migratetype)
126{
127 add_to_free_area(page, area, migratetype);
128}
129#endif
130
131
132static inline void move_to_free_area(struct page *page, struct free_area *area,
133 int migratetype)
134{
135 list_move(&page->lru, &area->free_list[migratetype]);
136}
137
138static inline struct page *get_page_from_free_area(struct free_area *area,
139 int migratetype)
140{
141 return list_first_entry_or_null(&area->free_list[migratetype],
142 struct page, lru);
143}
144
145static inline void del_page_from_free_area(struct page *page,
146 struct free_area *area)
147{
148 list_del(&page->lru);
149 __ClearPageBuddy(page);
150 set_page_private(page, 0);
151 area->nr_free--;
152}
153
154static inline bool free_area_empty(struct free_area *area, int migratetype)
155{
156 return list_empty(&area->free_list[migratetype]);
157}
158
159struct pglist_data;
160
161
162
163
164
165
166
167#if defined(CONFIG_SMP)
168struct zone_padding {
169 char x[0];
170} ____cacheline_internodealigned_in_smp;
171#define ZONE_PADDING(name) struct zone_padding name;
172#else
173#define ZONE_PADDING(name)
174#endif
175
176#ifdef CONFIG_NUMA
177enum numa_stat_item {
178 NUMA_HIT,
179 NUMA_MISS,
180 NUMA_FOREIGN,
181 NUMA_INTERLEAVE_HIT,
182 NUMA_LOCAL,
183 NUMA_OTHER,
184 NR_VM_NUMA_STAT_ITEMS
185};
186#else
187#define NR_VM_NUMA_STAT_ITEMS 0
188#endif
189
190enum zone_stat_item {
191
192 NR_FREE_PAGES,
193 NR_ZONE_LRU_BASE,
194 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
195 NR_ZONE_ACTIVE_ANON,
196 NR_ZONE_INACTIVE_FILE,
197 NR_ZONE_ACTIVE_FILE,
198 NR_ZONE_UNEVICTABLE,
199 NR_ZONE_WRITE_PENDING,
200 NR_MLOCK,
201 NR_PAGETABLE,
202 NR_KERNEL_STACK_KB,
203
204 NR_BOUNCE,
205#if IS_ENABLED(CONFIG_ZSMALLOC)
206 NR_ZSPAGES,
207#endif
208 NR_FREE_CMA_PAGES,
209 NR_VM_ZONE_STAT_ITEMS };
210
211enum node_stat_item {
212 NR_LRU_BASE,
213 NR_INACTIVE_ANON = NR_LRU_BASE,
214 NR_ACTIVE_ANON,
215 NR_INACTIVE_FILE,
216 NR_ACTIVE_FILE,
217 NR_UNEVICTABLE,
218 NR_SLAB_RECLAIMABLE,
219 NR_SLAB_UNRECLAIMABLE,
220 NR_ISOLATED_ANON,
221 NR_ISOLATED_FILE,
222 WORKINGSET_NODES,
223 WORKINGSET_REFAULT,
224 WORKINGSET_ACTIVATE,
225 WORKINGSET_RESTORE,
226 WORKINGSET_NODERECLAIM,
227 NR_ANON_MAPPED,
228 NR_FILE_MAPPED,
229
230 NR_FILE_PAGES,
231 NR_FILE_DIRTY,
232 NR_WRITEBACK,
233 NR_WRITEBACK_TEMP,
234 NR_SHMEM,
235 NR_SHMEM_THPS,
236 NR_SHMEM_PMDMAPPED,
237 NR_ANON_THPS,
238 NR_UNSTABLE_NFS,
239 NR_VMSCAN_WRITE,
240 NR_VMSCAN_IMMEDIATE,
241 NR_DIRTIED,
242 NR_WRITTEN,
243 NR_KERNEL_MISC_RECLAIMABLE,
244 NR_VM_NODE_STAT_ITEMS
245};
246
247
248
249
250
251
252
253
254
255
256#define LRU_BASE 0
257#define LRU_ACTIVE 1
258#define LRU_FILE 2
259
260enum lru_list {
261 LRU_INACTIVE_ANON = LRU_BASE,
262 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
263 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
264 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
265 LRU_UNEVICTABLE,
266 NR_LRU_LISTS
267};
268
269#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
270
271#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
272
273static inline int is_file_lru(enum lru_list lru)
274{
275 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
276}
277
278static inline int is_active_lru(enum lru_list lru)
279{
280 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
281}
282
283struct zone_reclaim_stat {
284
285
286
287
288
289
290
291
292 unsigned long recent_rotated[2];
293 unsigned long recent_scanned[2];
294};
295
296struct lruvec {
297 struct list_head lists[NR_LRU_LISTS];
298 struct zone_reclaim_stat reclaim_stat;
299
300 atomic_long_t inactive_age;
301
302 unsigned long refaults;
303#ifdef CONFIG_MEMCG
304 struct pglist_data *pgdat;
305#endif
306};
307
308
309#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
310
311#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
312
313#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
314
315
316typedef unsigned __bitwise isolate_mode_t;
317
318enum zone_watermarks {
319 WMARK_MIN,
320 WMARK_LOW,
321 WMARK_HIGH,
322 NR_WMARK
323};
324
325#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
326#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
327#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
328#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
329
330struct per_cpu_pages {
331 int count;
332 int high;
333 int batch;
334
335
336 struct list_head lists[MIGRATE_PCPTYPES];
337};
338
339struct per_cpu_pageset {
340 struct per_cpu_pages pcp;
341#ifdef CONFIG_NUMA
342 s8 expire;
343 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
344#endif
345#ifdef CONFIG_SMP
346 s8 stat_threshold;
347 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
348#endif
349};
350
351struct per_cpu_nodestat {
352 s8 stat_threshold;
353 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
354};
355
356#endif
357
358enum zone_type {
359#ifdef CONFIG_ZONE_DMA
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378 ZONE_DMA,
379#endif
380#ifdef CONFIG_ZONE_DMA32
381
382
383
384
385
386 ZONE_DMA32,
387#endif
388
389
390
391
392
393 ZONE_NORMAL,
394#ifdef CONFIG_HIGHMEM
395
396
397
398
399
400
401
402
403 ZONE_HIGHMEM,
404#endif
405 ZONE_MOVABLE,
406#ifdef CONFIG_ZONE_DEVICE
407 ZONE_DEVICE,
408#endif
409 __MAX_NR_ZONES
410
411};
412
413#ifndef __GENERATING_BOUNDS_H
414
415struct zone {
416
417
418
419 unsigned long _watermark[NR_WMARK];
420 unsigned long watermark_boost;
421
422 unsigned long nr_reserved_highatomic;
423
424
425
426
427
428
429
430
431
432
433 long lowmem_reserve[MAX_NR_ZONES];
434
435#ifdef CONFIG_NUMA
436 int node;
437#endif
438 struct pglist_data *zone_pgdat;
439 struct per_cpu_pageset __percpu *pageset;
440
441#ifndef CONFIG_SPARSEMEM
442
443
444
445
446 unsigned long *pageblock_flags;
447#endif
448
449
450 unsigned long zone_start_pfn;
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487 atomic_long_t managed_pages;
488 unsigned long spanned_pages;
489 unsigned long present_pages;
490
491 const char *name;
492
493#ifdef CONFIG_MEMORY_ISOLATION
494
495
496
497
498
499 unsigned long nr_isolate_pageblock;
500#endif
501
502#ifdef CONFIG_MEMORY_HOTPLUG
503
504 seqlock_t span_seqlock;
505#endif
506
507 int initialized;
508
509
510 ZONE_PADDING(_pad1_)
511
512
513 struct free_area free_area[MAX_ORDER];
514
515
516 unsigned long flags;
517
518
519 spinlock_t lock;
520
521
522 ZONE_PADDING(_pad2_)
523
524
525
526
527
528
529 unsigned long percpu_drift_mark;
530
531#if defined CONFIG_COMPACTION || defined CONFIG_CMA
532
533 unsigned long compact_cached_free_pfn;
534
535 unsigned long compact_cached_migrate_pfn[2];
536 unsigned long compact_init_migrate_pfn;
537 unsigned long compact_init_free_pfn;
538#endif
539
540#ifdef CONFIG_COMPACTION
541
542
543
544
545
546 unsigned int compact_considered;
547 unsigned int compact_defer_shift;
548 int compact_order_failed;
549#endif
550
551#if defined CONFIG_COMPACTION || defined CONFIG_CMA
552
553 bool compact_blockskip_flush;
554#endif
555
556 bool contiguous;
557
558 ZONE_PADDING(_pad3_)
559
560 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
561 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
562} ____cacheline_internodealigned_in_smp;
563
564enum pgdat_flags {
565 PGDAT_CONGESTED,
566
567
568 PGDAT_DIRTY,
569
570
571
572 PGDAT_WRITEBACK,
573
574
575 PGDAT_RECLAIM_LOCKED,
576};
577
578enum zone_flags {
579 ZONE_BOOSTED_WATERMARK,
580
581
582};
583
584static inline unsigned long zone_managed_pages(struct zone *zone)
585{
586 return (unsigned long)atomic_long_read(&zone->managed_pages);
587}
588
589static inline unsigned long zone_end_pfn(const struct zone *zone)
590{
591 return zone->zone_start_pfn + zone->spanned_pages;
592}
593
594static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
595{
596 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
597}
598
599static inline bool zone_is_initialized(struct zone *zone)
600{
601 return zone->initialized;
602}
603
604static inline bool zone_is_empty(struct zone *zone)
605{
606 return zone->spanned_pages == 0;
607}
608
609
610
611
612
613static inline bool zone_intersects(struct zone *zone,
614 unsigned long start_pfn, unsigned long nr_pages)
615{
616 if (zone_is_empty(zone))
617 return false;
618 if (start_pfn >= zone_end_pfn(zone) ||
619 start_pfn + nr_pages <= zone->zone_start_pfn)
620 return false;
621
622 return true;
623}
624
625
626
627
628
629
630#define DEF_PRIORITY 12
631
632
633#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
634
635enum {
636 ZONELIST_FALLBACK,
637#ifdef CONFIG_NUMA
638
639
640
641
642 ZONELIST_NOFALLBACK,
643#endif
644 MAX_ZONELISTS
645};
646
647
648
649
650
651struct zoneref {
652 struct zone *zone;
653 int zone_idx;
654};
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670struct zonelist {
671 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
672};
673
674#ifndef CONFIG_DISCONTIGMEM
675
676extern struct page *mem_map;
677#endif
678
679
680
681
682
683
684
685
686
687struct bootmem_data;
688typedef struct pglist_data {
689 struct zone node_zones[MAX_NR_ZONES];
690 struct zonelist node_zonelists[MAX_ZONELISTS];
691 int nr_zones;
692#ifdef CONFIG_FLAT_NODE_MEM_MAP
693 struct page *node_mem_map;
694#ifdef CONFIG_PAGE_EXTENSION
695 struct page_ext *node_page_ext;
696#endif
697#endif
698#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
699
700
701
702
703
704
705
706
707
708
709 spinlock_t node_size_lock;
710#endif
711 unsigned long node_start_pfn;
712 unsigned long node_present_pages;
713 unsigned long node_spanned_pages;
714
715 int node_id;
716 wait_queue_head_t kswapd_wait;
717 wait_queue_head_t pfmemalloc_wait;
718 struct task_struct *kswapd;
719
720 int kswapd_order;
721 enum zone_type kswapd_classzone_idx;
722
723 int kswapd_failures;
724
725#ifdef CONFIG_COMPACTION
726 int kcompactd_max_order;
727 enum zone_type kcompactd_classzone_idx;
728 wait_queue_head_t kcompactd_wait;
729 struct task_struct *kcompactd;
730#endif
731
732
733
734
735 unsigned long totalreserve_pages;
736
737#ifdef CONFIG_NUMA
738
739
740
741 unsigned long min_unmapped_pages;
742 unsigned long min_slab_pages;
743#endif
744
745
746 ZONE_PADDING(_pad1_)
747 spinlock_t lru_lock;
748
749#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
750
751
752
753
754 unsigned long first_deferred_pfn;
755#endif
756
757#ifdef CONFIG_TRANSPARENT_HUGEPAGE
758 spinlock_t split_queue_lock;
759 struct list_head split_queue;
760 unsigned long split_queue_len;
761#endif
762
763
764 struct lruvec lruvec;
765
766 unsigned long flags;
767
768 ZONE_PADDING(_pad2_)
769
770
771 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
772 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
773} pg_data_t;
774
775#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
776#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
777#ifdef CONFIG_FLAT_NODE_MEM_MAP
778#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
779#else
780#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
781#endif
782#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
783
784#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
785#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
786
787static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
788{
789 return &pgdat->lruvec;
790}
791
792static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
793{
794 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
795}
796
797static inline bool pgdat_is_empty(pg_data_t *pgdat)
798{
799 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
800}
801
802#include <linux/memory_hotplug.h>
803
804void build_all_zonelists(pg_data_t *pgdat);
805void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
806 enum zone_type classzone_idx);
807bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
808 int classzone_idx, unsigned int alloc_flags,
809 long free_pages);
810bool zone_watermark_ok(struct zone *z, unsigned int order,
811 unsigned long mark, int classzone_idx,
812 unsigned int alloc_flags);
813bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
814 unsigned long mark, int classzone_idx);
815enum memmap_context {
816 MEMMAP_EARLY,
817 MEMMAP_HOTPLUG,
818};
819extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
820 unsigned long size);
821
822extern void lruvec_init(struct lruvec *lruvec);
823
824static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
825{
826#ifdef CONFIG_MEMCG
827 return lruvec->pgdat;
828#else
829 return container_of(lruvec, struct pglist_data, lruvec);
830#endif
831}
832
833extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
834
835#ifdef CONFIG_HAVE_MEMORY_PRESENT
836void memory_present(int nid, unsigned long start, unsigned long end);
837#else
838static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
839#endif
840
841#if defined(CONFIG_SPARSEMEM)
842void memblocks_present(void);
843#else
844static inline void memblocks_present(void) {}
845#endif
846
847#ifdef CONFIG_HAVE_MEMORYLESS_NODES
848int local_memory_node(int node_id);
849#else
850static inline int local_memory_node(int node_id) { return node_id; };
851#endif
852
853
854
855
856#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
857
858#ifdef CONFIG_ZONE_DEVICE
859static inline bool is_dev_zone(const struct zone *zone)
860{
861 return zone_idx(zone) == ZONE_DEVICE;
862}
863#else
864static inline bool is_dev_zone(const struct zone *zone)
865{
866 return false;
867}
868#endif
869
870
871
872
873
874
875
876static inline bool managed_zone(struct zone *zone)
877{
878 return zone_managed_pages(zone);
879}
880
881
882static inline bool populated_zone(struct zone *zone)
883{
884 return zone->present_pages;
885}
886
887#ifdef CONFIG_NUMA
888static inline int zone_to_nid(struct zone *zone)
889{
890 return zone->node;
891}
892
893static inline void zone_set_nid(struct zone *zone, int nid)
894{
895 zone->node = nid;
896}
897#else
898static inline int zone_to_nid(struct zone *zone)
899{
900 return 0;
901}
902
903static inline void zone_set_nid(struct zone *zone, int nid) {}
904#endif
905
906extern int movable_zone;
907
908#ifdef CONFIG_HIGHMEM
909static inline int zone_movable_is_highmem(void)
910{
911#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
912 return movable_zone == ZONE_HIGHMEM;
913#else
914 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
915#endif
916}
917#endif
918
919static inline int is_highmem_idx(enum zone_type idx)
920{
921#ifdef CONFIG_HIGHMEM
922 return (idx == ZONE_HIGHMEM ||
923 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
924#else
925 return 0;
926#endif
927}
928
929
930
931
932
933
934
935static inline int is_highmem(struct zone *zone)
936{
937#ifdef CONFIG_HIGHMEM
938 return is_highmem_idx(zone_idx(zone));
939#else
940 return 0;
941#endif
942}
943
944
945struct ctl_table;
946int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
947 void __user *, size_t *, loff_t *);
948int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
949 void __user *, size_t *, loff_t *);
950int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
951 void __user *, size_t *, loff_t *);
952extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
953int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
954 void __user *, size_t *, loff_t *);
955int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
956 void __user *, size_t *, loff_t *);
957int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
958 void __user *, size_t *, loff_t *);
959int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
960 void __user *, size_t *, loff_t *);
961
962extern int numa_zonelist_order_handler(struct ctl_table *, int,
963 void __user *, size_t *, loff_t *);
964extern char numa_zonelist_order[];
965#define NUMA_ZONELIST_ORDER_LEN 16
966
967#ifndef CONFIG_NEED_MULTIPLE_NODES
968
969extern struct pglist_data contig_page_data;
970#define NODE_DATA(nid) (&contig_page_data)
971#define NODE_MEM_MAP(nid) mem_map
972
973#else
974
975#include <asm/mmzone.h>
976
977#endif
978
979extern struct pglist_data *first_online_pgdat(void);
980extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
981extern struct zone *next_zone(struct zone *zone);
982
983
984
985
986
987#define for_each_online_pgdat(pgdat) \
988 for (pgdat = first_online_pgdat(); \
989 pgdat; \
990 pgdat = next_online_pgdat(pgdat))
991
992
993
994
995
996
997
998#define for_each_zone(zone) \
999 for (zone = (first_online_pgdat())->node_zones; \
1000 zone; \
1001 zone = next_zone(zone))
1002
1003#define for_each_populated_zone(zone) \
1004 for (zone = (first_online_pgdat())->node_zones; \
1005 zone; \
1006 zone = next_zone(zone)) \
1007 if (!populated_zone(zone)) \
1008 ; \
1009 else
1010
1011static inline struct zone *zonelist_zone(struct zoneref *zoneref)
1012{
1013 return zoneref->zone;
1014}
1015
1016static inline int zonelist_zone_idx(struct zoneref *zoneref)
1017{
1018 return zoneref->zone_idx;
1019}
1020
1021static inline int zonelist_node_idx(struct zoneref *zoneref)
1022{
1023 return zone_to_nid(zoneref->zone);
1024}
1025
1026struct zoneref *__next_zones_zonelist(struct zoneref *z,
1027 enum zone_type highest_zoneidx,
1028 nodemask_t *nodes);
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1043 enum zone_type highest_zoneidx,
1044 nodemask_t *nodes)
1045{
1046 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1047 return z;
1048 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1068 enum zone_type highest_zoneidx,
1069 nodemask_t *nodes)
1070{
1071 return next_zones_zonelist(zonelist->_zonerefs,
1072 highest_zoneidx, nodes);
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1087 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1088 zone; \
1089 z = next_zones_zonelist(++z, highidx, nodemask), \
1090 zone = zonelist_zone(z))
1091
1092#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1093 for (zone = z->zone; \
1094 zone; \
1095 z = next_zones_zonelist(++z, highidx, nodemask), \
1096 zone = zonelist_zone(z))
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1109 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1110
1111#ifdef CONFIG_SPARSEMEM
1112#include <asm/sparsemem.h>
1113#endif
1114
1115#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1116 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1117static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1118{
1119 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1120 return 0;
1121}
1122#endif
1123
1124#ifdef CONFIG_FLATMEM
1125#define pfn_to_nid(pfn) (0)
1126#endif
1127
1128#ifdef CONFIG_SPARSEMEM
1129
1130
1131
1132
1133
1134
1135
1136#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1137#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1138
1139#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1140
1141#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1142#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1143
1144#define SECTION_BLOCKFLAGS_BITS \
1145 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1146
1147#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1148#error Allocator MAX_ORDER exceeds SECTION_SIZE
1149#endif
1150
1151static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1152{
1153 return pfn >> PFN_SECTION_SHIFT;
1154}
1155static inline unsigned long section_nr_to_pfn(unsigned long sec)
1156{
1157 return sec << PFN_SECTION_SHIFT;
1158}
1159
1160#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1161#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1162
1163struct page;
1164struct page_ext;
1165struct mem_section {
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 unsigned long section_mem_map;
1179
1180
1181 unsigned long *pageblock_flags;
1182#ifdef CONFIG_PAGE_EXTENSION
1183
1184
1185
1186
1187 struct page_ext *page_ext;
1188 unsigned long pad;
1189#endif
1190
1191
1192
1193
1194};
1195
1196#ifdef CONFIG_SPARSEMEM_EXTREME
1197#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1198#else
1199#define SECTIONS_PER_ROOT 1
1200#endif
1201
1202#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1203#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1204#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1205
1206#ifdef CONFIG_SPARSEMEM_EXTREME
1207extern struct mem_section **mem_section;
1208#else
1209extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1210#endif
1211
1212static inline struct mem_section *__nr_to_section(unsigned long nr)
1213{
1214#ifdef CONFIG_SPARSEMEM_EXTREME
1215 if (!mem_section)
1216 return NULL;
1217#endif
1218 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1219 return NULL;
1220 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1221}
1222extern int __section_nr(struct mem_section* ms);
1223extern unsigned long usemap_size(void);
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238#define SECTION_MARKED_PRESENT (1UL<<0)
1239#define SECTION_HAS_MEM_MAP (1UL<<1)
1240#define SECTION_IS_ONLINE (1UL<<2)
1241#define SECTION_MAP_LAST_BIT (1UL<<3)
1242#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1243#define SECTION_NID_SHIFT 3
1244
1245static inline struct page *__section_mem_map_addr(struct mem_section *section)
1246{
1247 unsigned long map = section->section_mem_map;
1248 map &= SECTION_MAP_MASK;
1249 return (struct page *)map;
1250}
1251
1252static inline int present_section(struct mem_section *section)
1253{
1254 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1255}
1256
1257static inline int present_section_nr(unsigned long nr)
1258{
1259 return present_section(__nr_to_section(nr));
1260}
1261
1262static inline int valid_section(struct mem_section *section)
1263{
1264 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1265}
1266
1267static inline int valid_section_nr(unsigned long nr)
1268{
1269 return valid_section(__nr_to_section(nr));
1270}
1271
1272static inline int online_section(struct mem_section *section)
1273{
1274 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1275}
1276
1277static inline int online_section_nr(unsigned long nr)
1278{
1279 return online_section(__nr_to_section(nr));
1280}
1281
1282#ifdef CONFIG_MEMORY_HOTPLUG
1283void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1284#ifdef CONFIG_MEMORY_HOTREMOVE
1285void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1286#endif
1287#endif
1288
1289static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1290{
1291 return __nr_to_section(pfn_to_section_nr(pfn));
1292}
1293
1294extern int __highest_present_section_nr;
1295
1296#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1297static inline int pfn_valid(unsigned long pfn)
1298{
1299 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1300 return 0;
1301 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1302}
1303#endif
1304
1305static inline int pfn_present(unsigned long pfn)
1306{
1307 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1308 return 0;
1309 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1310}
1311
1312
1313
1314
1315
1316
1317#ifdef CONFIG_NUMA
1318#define pfn_to_nid(pfn) \
1319({ \
1320 unsigned long __pfn_to_nid_pfn = (pfn); \
1321 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1322})
1323#else
1324#define pfn_to_nid(pfn) (0)
1325#endif
1326
1327#define early_pfn_valid(pfn) pfn_valid(pfn)
1328void sparse_init(void);
1329#else
1330#define sparse_init() do {} while (0)
1331#define sparse_index_init(_sec, _nid) do {} while (0)
1332#define pfn_present pfn_valid
1333#endif
1334
1335
1336
1337
1338
1339
1340struct mminit_pfnnid_cache {
1341 unsigned long last_start;
1342 unsigned long last_end;
1343 int last_nid;
1344};
1345
1346#ifndef early_pfn_valid
1347#define early_pfn_valid(pfn) (1)
1348#endif
1349
1350void memory_present(int nid, unsigned long start, unsigned long end);
1351
1352
1353
1354
1355
1356
1357
1358#ifdef CONFIG_HOLES_IN_ZONE
1359#define pfn_valid_within(pfn) pfn_valid(pfn)
1360#else
1361#define pfn_valid_within(pfn) (1)
1362#endif
1363
1364#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385bool memmap_valid_within(unsigned long pfn,
1386 struct page *page, struct zone *zone);
1387#else
1388static inline bool memmap_valid_within(unsigned long pfn,
1389 struct page *page, struct zone *zone)
1390{
1391 return true;
1392}
1393#endif
1394
1395#endif
1396#endif
1397#endif
1398