1
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include <linux/bitops.h>
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
16#include <linux/seqlock.h>
17#include <linux/nodemask.h>
18#include <linux/pageblock-flags.h>
19#include <linux/page-flags-layout.h>
20#include <linux/atomic.h>
21#include <asm/page.h>
22
23
24#ifndef CONFIG_FORCE_MAX_ZONEORDER
25#define MAX_ORDER 11
26#else
27#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
28#endif
29#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
30
31
32
33
34
35
36
37#define PAGE_ALLOC_COSTLY_ORDER 3
38
39enum migratetype {
40 MIGRATE_UNMOVABLE,
41 MIGRATE_MOVABLE,
42 MIGRATE_RECLAIMABLE,
43 MIGRATE_PCPTYPES,
44 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
45#ifdef CONFIG_CMA
46
47
48
49
50
51
52
53
54
55
56
57
58
59 MIGRATE_CMA,
60#endif
61#ifdef CONFIG_MEMORY_ISOLATION
62 MIGRATE_ISOLATE,
63#endif
64 MIGRATE_TYPES
65};
66
67
68extern const char * const migratetype_names[MIGRATE_TYPES];
69
70#ifdef CONFIG_CMA
71# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
72# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
73#else
74# define is_migrate_cma(migratetype) false
75# define is_migrate_cma_page(_page) false
76#endif
77
78static inline bool is_migrate_movable(int mt)
79{
80 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
81}
82
83#define for_each_migratetype_order(order, type) \
84 for (order = 0; order < MAX_ORDER; order++) \
85 for (type = 0; type < MIGRATE_TYPES; type++)
86
87extern int page_group_by_mobility_disabled;
88
89#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
90#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
91
92#define get_pageblock_migratetype(page) \
93 get_pfnblock_flags_mask(page, page_to_pfn(page), \
94 PB_migrate_end, MIGRATETYPE_MASK)
95
96struct free_area {
97 struct list_head free_list[MIGRATE_TYPES];
98 unsigned long nr_free;
99};
100
101struct pglist_data;
102
103
104
105
106
107
108
109#if defined(CONFIG_SMP)
110struct zone_padding {
111 char x[0];
112} ____cacheline_internodealigned_in_smp;
113#define ZONE_PADDING(name) struct zone_padding name;
114#else
115#define ZONE_PADDING(name)
116#endif
117
118#ifdef CONFIG_NUMA
119enum numa_stat_item {
120 NUMA_HIT,
121 NUMA_MISS,
122 NUMA_FOREIGN,
123 NUMA_INTERLEAVE_HIT,
124 NUMA_LOCAL,
125 NUMA_OTHER,
126 NR_VM_NUMA_STAT_ITEMS
127};
128#else
129#define NR_VM_NUMA_STAT_ITEMS 0
130#endif
131
132enum zone_stat_item {
133
134 NR_FREE_PAGES,
135 NR_ZONE_LRU_BASE,
136 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
137 NR_ZONE_ACTIVE_ANON,
138 NR_ZONE_INACTIVE_FILE,
139 NR_ZONE_ACTIVE_FILE,
140 NR_ZONE_UNEVICTABLE,
141 NR_ZONE_WRITE_PENDING,
142 NR_MLOCK,
143 NR_PAGETABLE,
144 NR_KERNEL_STACK_KB,
145
146 NR_BOUNCE,
147#if IS_ENABLED(CONFIG_ZSMALLOC)
148 NR_ZSPAGES,
149#endif
150 NR_FREE_CMA_PAGES,
151 NR_VM_ZONE_STAT_ITEMS };
152
153enum node_stat_item {
154 NR_LRU_BASE,
155 NR_INACTIVE_ANON = NR_LRU_BASE,
156 NR_ACTIVE_ANON,
157 NR_INACTIVE_FILE,
158 NR_ACTIVE_FILE,
159 NR_UNEVICTABLE,
160 NR_SLAB_RECLAIMABLE,
161 NR_SLAB_UNRECLAIMABLE,
162 NR_ISOLATED_ANON,
163 NR_ISOLATED_FILE,
164 WORKINGSET_NODES,
165 WORKINGSET_REFAULT,
166 WORKINGSET_ACTIVATE,
167 WORKINGSET_RESTORE,
168 WORKINGSET_NODERECLAIM,
169 NR_ANON_MAPPED,
170 NR_FILE_MAPPED,
171
172 NR_FILE_PAGES,
173 NR_FILE_DIRTY,
174 NR_WRITEBACK,
175 NR_WRITEBACK_TEMP,
176 NR_SHMEM,
177 NR_SHMEM_THPS,
178 NR_SHMEM_PMDMAPPED,
179 NR_ANON_THPS,
180 NR_UNSTABLE_NFS,
181 NR_VMSCAN_WRITE,
182 NR_VMSCAN_IMMEDIATE,
183 NR_DIRTIED,
184 NR_WRITTEN,
185 NR_KERNEL_MISC_RECLAIMABLE,
186 NR_VM_NODE_STAT_ITEMS
187};
188
189
190
191
192
193
194
195
196
197
198#define LRU_BASE 0
199#define LRU_ACTIVE 1
200#define LRU_FILE 2
201
202enum lru_list {
203 LRU_INACTIVE_ANON = LRU_BASE,
204 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
205 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
206 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
207 LRU_UNEVICTABLE,
208 NR_LRU_LISTS
209};
210
211#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
212
213#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
214
215static inline int is_file_lru(enum lru_list lru)
216{
217 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
218}
219
220static inline int is_active_lru(enum lru_list lru)
221{
222 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
223}
224
225struct zone_reclaim_stat {
226
227
228
229
230
231
232
233
234 unsigned long recent_rotated[2];
235 unsigned long recent_scanned[2];
236};
237
238struct lruvec {
239 struct list_head lists[NR_LRU_LISTS];
240 struct zone_reclaim_stat reclaim_stat;
241
242 atomic_long_t inactive_age;
243
244 unsigned long refaults;
245#ifdef CONFIG_MEMCG
246 struct pglist_data *pgdat;
247#endif
248};
249
250
251#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
252#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
253#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
254
255
256#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
257
258#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
259
260#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
261
262
263typedef unsigned __bitwise isolate_mode_t;
264
265enum zone_watermarks {
266 WMARK_MIN,
267 WMARK_LOW,
268 WMARK_HIGH,
269 NR_WMARK
270};
271
272#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
273#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
274#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
275#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
276
277struct per_cpu_pages {
278 int count;
279 int high;
280 int batch;
281
282
283 struct list_head lists[MIGRATE_PCPTYPES];
284};
285
286struct per_cpu_pageset {
287 struct per_cpu_pages pcp;
288#ifdef CONFIG_NUMA
289 s8 expire;
290 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
291#endif
292#ifdef CONFIG_SMP
293 s8 stat_threshold;
294 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
295#endif
296};
297
298struct per_cpu_nodestat {
299 s8 stat_threshold;
300 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
301};
302
303#endif
304
305enum zone_type {
306#ifdef CONFIG_ZONE_DMA
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325 ZONE_DMA,
326#endif
327#ifdef CONFIG_ZONE_DMA32
328
329
330
331
332
333 ZONE_DMA32,
334#endif
335
336
337
338
339
340 ZONE_NORMAL,
341#ifdef CONFIG_HIGHMEM
342
343
344
345
346
347
348
349
350 ZONE_HIGHMEM,
351#endif
352 ZONE_MOVABLE,
353#ifdef CONFIG_ZONE_DEVICE
354 ZONE_DEVICE,
355#endif
356 __MAX_NR_ZONES
357
358};
359
360#ifndef __GENERATING_BOUNDS_H
361
362struct zone {
363
364
365
366 unsigned long _watermark[NR_WMARK];
367 unsigned long watermark_boost;
368
369 unsigned long nr_reserved_highatomic;
370
371
372
373
374
375
376
377
378
379
380 long lowmem_reserve[MAX_NR_ZONES];
381
382#ifdef CONFIG_NUMA
383 int node;
384#endif
385 struct pglist_data *zone_pgdat;
386 struct per_cpu_pageset __percpu *pageset;
387
388#ifndef CONFIG_SPARSEMEM
389
390
391
392
393 unsigned long *pageblock_flags;
394#endif
395
396
397 unsigned long zone_start_pfn;
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434 atomic_long_t managed_pages;
435 unsigned long spanned_pages;
436 unsigned long present_pages;
437
438 const char *name;
439
440#ifdef CONFIG_MEMORY_ISOLATION
441
442
443
444
445
446 unsigned long nr_isolate_pageblock;
447#endif
448
449#ifdef CONFIG_MEMORY_HOTPLUG
450
451 seqlock_t span_seqlock;
452#endif
453
454 int initialized;
455
456
457 ZONE_PADDING(_pad1_)
458
459
460 struct free_area free_area[MAX_ORDER];
461
462
463 unsigned long flags;
464
465
466 spinlock_t lock;
467
468
469 ZONE_PADDING(_pad2_)
470
471
472
473
474
475
476 unsigned long percpu_drift_mark;
477
478#if defined CONFIG_COMPACTION || defined CONFIG_CMA
479
480 unsigned long compact_cached_free_pfn;
481
482 unsigned long compact_cached_migrate_pfn[2];
483 unsigned long compact_init_migrate_pfn;
484 unsigned long compact_init_free_pfn;
485#endif
486
487#ifdef CONFIG_COMPACTION
488
489
490
491
492
493 unsigned int compact_considered;
494 unsigned int compact_defer_shift;
495 int compact_order_failed;
496#endif
497
498#if defined CONFIG_COMPACTION || defined CONFIG_CMA
499
500 bool compact_blockskip_flush;
501#endif
502
503 bool contiguous;
504
505 ZONE_PADDING(_pad3_)
506
507 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
508 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
509} ____cacheline_internodealigned_in_smp;
510
511enum pgdat_flags {
512 PGDAT_CONGESTED,
513
514
515 PGDAT_DIRTY,
516
517
518
519 PGDAT_WRITEBACK,
520
521
522 PGDAT_RECLAIM_LOCKED,
523};
524
525enum zone_flags {
526 ZONE_BOOSTED_WATERMARK,
527
528
529};
530
531static inline unsigned long zone_managed_pages(struct zone *zone)
532{
533 return (unsigned long)atomic_long_read(&zone->managed_pages);
534}
535
536static inline unsigned long zone_end_pfn(const struct zone *zone)
537{
538 return zone->zone_start_pfn + zone->spanned_pages;
539}
540
541static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
542{
543 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
544}
545
546static inline bool zone_is_initialized(struct zone *zone)
547{
548 return zone->initialized;
549}
550
551static inline bool zone_is_empty(struct zone *zone)
552{
553 return zone->spanned_pages == 0;
554}
555
556
557
558
559
560static inline bool zone_intersects(struct zone *zone,
561 unsigned long start_pfn, unsigned long nr_pages)
562{
563 if (zone_is_empty(zone))
564 return false;
565 if (start_pfn >= zone_end_pfn(zone) ||
566 start_pfn + nr_pages <= zone->zone_start_pfn)
567 return false;
568
569 return true;
570}
571
572
573
574
575
576
577#define DEF_PRIORITY 12
578
579
580#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
581
582enum {
583 ZONELIST_FALLBACK,
584#ifdef CONFIG_NUMA
585
586
587
588
589 ZONELIST_NOFALLBACK,
590#endif
591 MAX_ZONELISTS
592};
593
594
595
596
597
598struct zoneref {
599 struct zone *zone;
600 int zone_idx;
601};
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617struct zonelist {
618 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
619};
620
621#ifndef CONFIG_DISCONTIGMEM
622
623extern struct page *mem_map;
624#endif
625
626
627
628
629
630
631
632
633
634struct bootmem_data;
635typedef struct pglist_data {
636 struct zone node_zones[MAX_NR_ZONES];
637 struct zonelist node_zonelists[MAX_ZONELISTS];
638 int nr_zones;
639#ifdef CONFIG_FLAT_NODE_MEM_MAP
640 struct page *node_mem_map;
641#ifdef CONFIG_PAGE_EXTENSION
642 struct page_ext *node_page_ext;
643#endif
644#endif
645#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
646
647
648
649
650
651
652
653
654
655
656 spinlock_t node_size_lock;
657#endif
658 unsigned long node_start_pfn;
659 unsigned long node_present_pages;
660 unsigned long node_spanned_pages;
661
662 int node_id;
663 wait_queue_head_t kswapd_wait;
664 wait_queue_head_t pfmemalloc_wait;
665 struct task_struct *kswapd;
666
667 int kswapd_order;
668 enum zone_type kswapd_classzone_idx;
669
670 int kswapd_failures;
671
672#ifdef CONFIG_COMPACTION
673 int kcompactd_max_order;
674 enum zone_type kcompactd_classzone_idx;
675 wait_queue_head_t kcompactd_wait;
676 struct task_struct *kcompactd;
677#endif
678
679
680
681
682 unsigned long totalreserve_pages;
683
684#ifdef CONFIG_NUMA
685
686
687
688 unsigned long min_unmapped_pages;
689 unsigned long min_slab_pages;
690#endif
691
692
693 ZONE_PADDING(_pad1_)
694 spinlock_t lru_lock;
695
696#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
697
698
699
700
701 unsigned long first_deferred_pfn;
702#endif
703
704#ifdef CONFIG_TRANSPARENT_HUGEPAGE
705 spinlock_t split_queue_lock;
706 struct list_head split_queue;
707 unsigned long split_queue_len;
708#endif
709
710
711 struct lruvec lruvec;
712
713 unsigned long flags;
714
715 ZONE_PADDING(_pad2_)
716
717
718 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
719 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
720} pg_data_t;
721
722#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
723#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
724#ifdef CONFIG_FLAT_NODE_MEM_MAP
725#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
726#else
727#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
728#endif
729#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
730
731#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
732#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
733
734static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
735{
736 return &pgdat->lruvec;
737}
738
739static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
740{
741 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
742}
743
744static inline bool pgdat_is_empty(pg_data_t *pgdat)
745{
746 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
747}
748
749#include <linux/memory_hotplug.h>
750
751void build_all_zonelists(pg_data_t *pgdat);
752void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
753 enum zone_type classzone_idx);
754bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
755 int classzone_idx, unsigned int alloc_flags,
756 long free_pages);
757bool zone_watermark_ok(struct zone *z, unsigned int order,
758 unsigned long mark, int classzone_idx,
759 unsigned int alloc_flags);
760bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
761 unsigned long mark, int classzone_idx);
762enum memmap_context {
763 MEMMAP_EARLY,
764 MEMMAP_HOTPLUG,
765};
766extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
767 unsigned long size);
768
769extern void lruvec_init(struct lruvec *lruvec);
770
771static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
772{
773#ifdef CONFIG_MEMCG
774 return lruvec->pgdat;
775#else
776 return container_of(lruvec, struct pglist_data, lruvec);
777#endif
778}
779
780extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
781
782#ifdef CONFIG_HAVE_MEMORY_PRESENT
783void memory_present(int nid, unsigned long start, unsigned long end);
784#else
785static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
786#endif
787
788#if defined(CONFIG_SPARSEMEM)
789void memblocks_present(void);
790#else
791static inline void memblocks_present(void) {}
792#endif
793
794#ifdef CONFIG_HAVE_MEMORYLESS_NODES
795int local_memory_node(int node_id);
796#else
797static inline int local_memory_node(int node_id) { return node_id; };
798#endif
799
800
801
802
803#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
804
805#ifdef CONFIG_ZONE_DEVICE
806static inline bool is_dev_zone(const struct zone *zone)
807{
808 return zone_idx(zone) == ZONE_DEVICE;
809}
810#else
811static inline bool is_dev_zone(const struct zone *zone)
812{
813 return false;
814}
815#endif
816
817
818
819
820
821
822
823static inline bool managed_zone(struct zone *zone)
824{
825 return zone_managed_pages(zone);
826}
827
828
829static inline bool populated_zone(struct zone *zone)
830{
831 return zone->present_pages;
832}
833
834#ifdef CONFIG_NUMA
835static inline int zone_to_nid(struct zone *zone)
836{
837 return zone->node;
838}
839
840static inline void zone_set_nid(struct zone *zone, int nid)
841{
842 zone->node = nid;
843}
844#else
845static inline int zone_to_nid(struct zone *zone)
846{
847 return 0;
848}
849
850static inline void zone_set_nid(struct zone *zone, int nid) {}
851#endif
852
853extern int movable_zone;
854
855#ifdef CONFIG_HIGHMEM
856static inline int zone_movable_is_highmem(void)
857{
858#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
859 return movable_zone == ZONE_HIGHMEM;
860#else
861 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
862#endif
863}
864#endif
865
866static inline int is_highmem_idx(enum zone_type idx)
867{
868#ifdef CONFIG_HIGHMEM
869 return (idx == ZONE_HIGHMEM ||
870 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
871#else
872 return 0;
873#endif
874}
875
876
877
878
879
880
881
882static inline int is_highmem(struct zone *zone)
883{
884#ifdef CONFIG_HIGHMEM
885 return is_highmem_idx(zone_idx(zone));
886#else
887 return 0;
888#endif
889}
890
891
892struct ctl_table;
893int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
894 void __user *, size_t *, loff_t *);
895int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
896 void __user *, size_t *, loff_t *);
897int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
898 void __user *, size_t *, loff_t *);
899extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
900int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
901 void __user *, size_t *, loff_t *);
902int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
903 void __user *, size_t *, loff_t *);
904int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
905 void __user *, size_t *, loff_t *);
906int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
907 void __user *, size_t *, loff_t *);
908
909extern int numa_zonelist_order_handler(struct ctl_table *, int,
910 void __user *, size_t *, loff_t *);
911extern char numa_zonelist_order[];
912#define NUMA_ZONELIST_ORDER_LEN 16
913
914#ifndef CONFIG_NEED_MULTIPLE_NODES
915
916extern struct pglist_data contig_page_data;
917#define NODE_DATA(nid) (&contig_page_data)
918#define NODE_MEM_MAP(nid) mem_map
919
920#else
921
922#include <asm/mmzone.h>
923
924#endif
925
926extern struct pglist_data *first_online_pgdat(void);
927extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
928extern struct zone *next_zone(struct zone *zone);
929
930
931
932
933
934#define for_each_online_pgdat(pgdat) \
935 for (pgdat = first_online_pgdat(); \
936 pgdat; \
937 pgdat = next_online_pgdat(pgdat))
938
939
940
941
942
943
944
945#define for_each_zone(zone) \
946 for (zone = (first_online_pgdat())->node_zones; \
947 zone; \
948 zone = next_zone(zone))
949
950#define for_each_populated_zone(zone) \
951 for (zone = (first_online_pgdat())->node_zones; \
952 zone; \
953 zone = next_zone(zone)) \
954 if (!populated_zone(zone)) \
955 ; \
956 else
957
958static inline struct zone *zonelist_zone(struct zoneref *zoneref)
959{
960 return zoneref->zone;
961}
962
963static inline int zonelist_zone_idx(struct zoneref *zoneref)
964{
965 return zoneref->zone_idx;
966}
967
968static inline int zonelist_node_idx(struct zoneref *zoneref)
969{
970 return zone_to_nid(zoneref->zone);
971}
972
973struct zoneref *__next_zones_zonelist(struct zoneref *z,
974 enum zone_type highest_zoneidx,
975 nodemask_t *nodes);
976
977
978
979
980
981
982
983
984
985
986
987
988
989static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
990 enum zone_type highest_zoneidx,
991 nodemask_t *nodes)
992{
993 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
994 return z;
995 return __next_zones_zonelist(z, highest_zoneidx, nodes);
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1015 enum zone_type highest_zoneidx,
1016 nodemask_t *nodes)
1017{
1018 return next_zones_zonelist(zonelist->_zonerefs,
1019 highest_zoneidx, nodes);
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1034 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1035 zone; \
1036 z = next_zones_zonelist(++z, highidx, nodemask), \
1037 zone = zonelist_zone(z))
1038
1039#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1040 for (zone = z->zone; \
1041 zone; \
1042 z = next_zones_zonelist(++z, highidx, nodemask), \
1043 zone = zonelist_zone(z))
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1056 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1057
1058#ifdef CONFIG_SPARSEMEM
1059#include <asm/sparsemem.h>
1060#endif
1061
1062#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1063 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1064static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1065{
1066 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1067 return 0;
1068}
1069#endif
1070
1071#ifdef CONFIG_FLATMEM
1072#define pfn_to_nid(pfn) (0)
1073#endif
1074
1075#ifdef CONFIG_SPARSEMEM
1076
1077
1078
1079
1080
1081
1082
1083#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1084#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1085
1086#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1087
1088#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1089#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1090
1091#define SECTION_BLOCKFLAGS_BITS \
1092 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1093
1094#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1095#error Allocator MAX_ORDER exceeds SECTION_SIZE
1096#endif
1097
1098static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1099{
1100 return pfn >> PFN_SECTION_SHIFT;
1101}
1102static inline unsigned long section_nr_to_pfn(unsigned long sec)
1103{
1104 return sec << PFN_SECTION_SHIFT;
1105}
1106
1107#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1108#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1109
1110struct page;
1111struct page_ext;
1112struct mem_section {
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125 unsigned long section_mem_map;
1126
1127
1128 unsigned long *pageblock_flags;
1129#ifdef CONFIG_PAGE_EXTENSION
1130
1131
1132
1133
1134 struct page_ext *page_ext;
1135 unsigned long pad;
1136#endif
1137
1138
1139
1140
1141};
1142
1143#ifdef CONFIG_SPARSEMEM_EXTREME
1144#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1145#else
1146#define SECTIONS_PER_ROOT 1
1147#endif
1148
1149#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1150#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1151#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1152
1153#ifdef CONFIG_SPARSEMEM_EXTREME
1154extern struct mem_section **mem_section;
1155#else
1156extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1157#endif
1158
1159static inline struct mem_section *__nr_to_section(unsigned long nr)
1160{
1161#ifdef CONFIG_SPARSEMEM_EXTREME
1162 if (!mem_section)
1163 return NULL;
1164#endif
1165 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1166 return NULL;
1167 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1168}
1169extern int __section_nr(struct mem_section* ms);
1170extern unsigned long usemap_size(void);
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185#define SECTION_MARKED_PRESENT (1UL<<0)
1186#define SECTION_HAS_MEM_MAP (1UL<<1)
1187#define SECTION_IS_ONLINE (1UL<<2)
1188#define SECTION_MAP_LAST_BIT (1UL<<3)
1189#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1190#define SECTION_NID_SHIFT 3
1191
1192static inline struct page *__section_mem_map_addr(struct mem_section *section)
1193{
1194 unsigned long map = section->section_mem_map;
1195 map &= SECTION_MAP_MASK;
1196 return (struct page *)map;
1197}
1198
1199static inline int present_section(struct mem_section *section)
1200{
1201 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1202}
1203
1204static inline int present_section_nr(unsigned long nr)
1205{
1206 return present_section(__nr_to_section(nr));
1207}
1208
1209static inline int valid_section(struct mem_section *section)
1210{
1211 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1212}
1213
1214static inline int valid_section_nr(unsigned long nr)
1215{
1216 return valid_section(__nr_to_section(nr));
1217}
1218
1219static inline int online_section(struct mem_section *section)
1220{
1221 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1222}
1223
1224static inline int online_section_nr(unsigned long nr)
1225{
1226 return online_section(__nr_to_section(nr));
1227}
1228
1229#ifdef CONFIG_MEMORY_HOTPLUG
1230void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1231#ifdef CONFIG_MEMORY_HOTREMOVE
1232void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1233#endif
1234#endif
1235
1236static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1237{
1238 return __nr_to_section(pfn_to_section_nr(pfn));
1239}
1240
1241extern int __highest_present_section_nr;
1242
1243#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1244static inline int pfn_valid(unsigned long pfn)
1245{
1246 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1247 return 0;
1248 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1249}
1250#endif
1251
1252static inline int pfn_present(unsigned long pfn)
1253{
1254 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1255 return 0;
1256 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1257}
1258
1259
1260
1261
1262
1263
1264#ifdef CONFIG_NUMA
1265#define pfn_to_nid(pfn) \
1266({ \
1267 unsigned long __pfn_to_nid_pfn = (pfn); \
1268 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1269})
1270#else
1271#define pfn_to_nid(pfn) (0)
1272#endif
1273
1274#define early_pfn_valid(pfn) pfn_valid(pfn)
1275void sparse_init(void);
1276#else
1277#define sparse_init() do {} while (0)
1278#define sparse_index_init(_sec, _nid) do {} while (0)
1279#endif
1280
1281
1282
1283
1284
1285
1286struct mminit_pfnnid_cache {
1287 unsigned long last_start;
1288 unsigned long last_end;
1289 int last_nid;
1290};
1291
1292#ifndef early_pfn_valid
1293#define early_pfn_valid(pfn) (1)
1294#endif
1295
1296void memory_present(int nid, unsigned long start, unsigned long end);
1297
1298
1299
1300
1301
1302
1303
1304#ifdef CONFIG_HOLES_IN_ZONE
1305#define pfn_valid_within(pfn) pfn_valid(pfn)
1306#else
1307#define pfn_valid_within(pfn) (1)
1308#endif
1309
1310#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331bool memmap_valid_within(unsigned long pfn,
1332 struct page *page, struct zone *zone);
1333#else
1334static inline bool memmap_valid_within(unsigned long pfn,
1335 struct page *page, struct zone *zone)
1336{
1337 return true;
1338}
1339#endif
1340
1341#endif
1342#endif
1343#endif
1344