1
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include <linux/bitops.h>
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
16#include <linux/seqlock.h>
17#include <linux/nodemask.h>
18#include <linux/pageblock-flags.h>
19#include <linux/page-flags-layout.h>
20#include <linux/atomic.h>
21#include <linux/mm_types.h>
22#include <linux/page-flags.h>
23#include <asm/page.h>
24
25
26#ifndef CONFIG_FORCE_MAX_ZONEORDER
27#define MAX_ORDER 11
28#else
29#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
30#endif
31#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
32
33
34
35
36
37
38
39#define PAGE_ALLOC_COSTLY_ORDER 3
40
41enum migratetype {
42 MIGRATE_UNMOVABLE,
43 MIGRATE_MOVABLE,
44 MIGRATE_RECLAIMABLE,
45 MIGRATE_PCPTYPES,
46 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
47#ifdef CONFIG_CMA
48
49
50
51
52
53
54
55
56
57
58
59
60
61 MIGRATE_CMA,
62#endif
63#ifdef CONFIG_MEMORY_ISOLATION
64 MIGRATE_ISOLATE,
65#endif
66 MIGRATE_TYPES
67};
68
69
70extern const char * const migratetype_names[MIGRATE_TYPES];
71
72#ifdef CONFIG_CMA
73# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
74# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
75#else
76# define is_migrate_cma(migratetype) false
77# define is_migrate_cma_page(_page) false
78#endif
79
80static inline bool is_migrate_movable(int mt)
81{
82 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
83}
84
85#define for_each_migratetype_order(order, type) \
86 for (order = 0; order < MAX_ORDER; order++) \
87 for (type = 0; type < MIGRATE_TYPES; type++)
88
89extern int page_group_by_mobility_disabled;
90
91#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
92#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
93
94#define get_pageblock_migratetype(page) \
95 get_pfnblock_flags_mask(page, page_to_pfn(page), \
96 PB_migrate_end, MIGRATETYPE_MASK)
97
98struct free_area {
99 struct list_head free_list[MIGRATE_TYPES];
100 unsigned long nr_free;
101};
102
103static inline struct page *get_page_from_free_area(struct free_area *area,
104 int migratetype)
105{
106 return list_first_entry_or_null(&area->free_list[migratetype],
107 struct page, lru);
108}
109
110static inline bool free_area_empty(struct free_area *area, int migratetype)
111{
112 return list_empty(&area->free_list[migratetype]);
113}
114
115struct pglist_data;
116
117
118
119
120
121
122
123#if defined(CONFIG_SMP)
124struct zone_padding {
125 char x[0];
126} ____cacheline_internodealigned_in_smp;
127#define ZONE_PADDING(name) struct zone_padding name;
128#else
129#define ZONE_PADDING(name)
130#endif
131
132#ifdef CONFIG_NUMA
133enum numa_stat_item {
134 NUMA_HIT,
135 NUMA_MISS,
136 NUMA_FOREIGN,
137 NUMA_INTERLEAVE_HIT,
138 NUMA_LOCAL,
139 NUMA_OTHER,
140 NR_VM_NUMA_STAT_ITEMS
141};
142#else
143#define NR_VM_NUMA_STAT_ITEMS 0
144#endif
145
146enum zone_stat_item {
147
148 NR_FREE_PAGES,
149 NR_ZONE_LRU_BASE,
150 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
151 NR_ZONE_ACTIVE_ANON,
152 NR_ZONE_INACTIVE_FILE,
153 NR_ZONE_ACTIVE_FILE,
154 NR_ZONE_UNEVICTABLE,
155 NR_ZONE_WRITE_PENDING,
156 NR_MLOCK,
157 NR_PAGETABLE,
158 NR_KERNEL_STACK_KB,
159
160 NR_BOUNCE,
161#if IS_ENABLED(CONFIG_ZSMALLOC)
162 NR_ZSPAGES,
163#endif
164 NR_FREE_CMA_PAGES,
165 NR_VM_ZONE_STAT_ITEMS };
166
167enum node_stat_item {
168 NR_LRU_BASE,
169 NR_INACTIVE_ANON = NR_LRU_BASE,
170 NR_ACTIVE_ANON,
171 NR_INACTIVE_FILE,
172 NR_ACTIVE_FILE,
173 NR_UNEVICTABLE,
174 NR_SLAB_RECLAIMABLE,
175 NR_SLAB_UNRECLAIMABLE,
176 NR_ISOLATED_ANON,
177 NR_ISOLATED_FILE,
178 WORKINGSET_NODES,
179 WORKINGSET_REFAULT,
180 WORKINGSET_ACTIVATE,
181 WORKINGSET_RESTORE,
182 WORKINGSET_NODERECLAIM,
183 NR_ANON_MAPPED,
184 NR_FILE_MAPPED,
185
186 NR_FILE_PAGES,
187 NR_FILE_DIRTY,
188 NR_WRITEBACK,
189 NR_WRITEBACK_TEMP,
190 NR_SHMEM,
191 NR_SHMEM_THPS,
192 NR_SHMEM_PMDMAPPED,
193 NR_FILE_THPS,
194 NR_FILE_PMDMAPPED,
195 NR_ANON_THPS,
196 NR_UNSTABLE_NFS,
197 NR_VMSCAN_WRITE,
198 NR_VMSCAN_IMMEDIATE,
199 NR_DIRTIED,
200 NR_WRITTEN,
201 NR_KERNEL_MISC_RECLAIMABLE,
202 NR_FOLL_PIN_ACQUIRED,
203 NR_FOLL_PIN_RELEASED,
204 NR_VM_NODE_STAT_ITEMS
205};
206
207
208
209
210
211
212
213
214
215
216#define LRU_BASE 0
217#define LRU_ACTIVE 1
218#define LRU_FILE 2
219
220enum lru_list {
221 LRU_INACTIVE_ANON = LRU_BASE,
222 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
223 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
224 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
225 LRU_UNEVICTABLE,
226 NR_LRU_LISTS
227};
228
229#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
230
231#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
232
233static inline bool is_file_lru(enum lru_list lru)
234{
235 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
236}
237
238static inline bool is_active_lru(enum lru_list lru)
239{
240 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
241}
242
243struct zone_reclaim_stat {
244
245
246
247
248
249
250
251
252 unsigned long recent_rotated[2];
253 unsigned long recent_scanned[2];
254};
255
256enum lruvec_flags {
257 LRUVEC_CONGESTED,
258
259
260};
261
262struct lruvec {
263 struct list_head lists[NR_LRU_LISTS];
264 struct zone_reclaim_stat reclaim_stat;
265
266 atomic_long_t inactive_age;
267
268 unsigned long refaults;
269
270 unsigned long flags;
271#ifdef CONFIG_MEMCG
272 struct pglist_data *pgdat;
273#endif
274};
275
276
277#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
278
279#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
280
281#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
282
283
284typedef unsigned __bitwise isolate_mode_t;
285
286enum zone_watermarks {
287 WMARK_MIN,
288 WMARK_LOW,
289 WMARK_HIGH,
290 NR_WMARK
291};
292
293#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
294#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
295#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
296#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
297
298struct per_cpu_pages {
299 int count;
300 int high;
301 int batch;
302
303
304 struct list_head lists[MIGRATE_PCPTYPES];
305};
306
307struct per_cpu_pageset {
308 struct per_cpu_pages pcp;
309#ifdef CONFIG_NUMA
310 s8 expire;
311 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
312#endif
313#ifdef CONFIG_SMP
314 s8 stat_threshold;
315 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
316#endif
317};
318
319struct per_cpu_nodestat {
320 s8 stat_threshold;
321 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
322};
323
324#endif
325
326enum zone_type {
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357#ifdef CONFIG_ZONE_DMA
358 ZONE_DMA,
359#endif
360#ifdef CONFIG_ZONE_DMA32
361 ZONE_DMA32,
362#endif
363
364
365
366
367
368 ZONE_NORMAL,
369#ifdef CONFIG_HIGHMEM
370
371
372
373
374
375
376
377
378 ZONE_HIGHMEM,
379#endif
380 ZONE_MOVABLE,
381#ifdef CONFIG_ZONE_DEVICE
382 ZONE_DEVICE,
383#endif
384 __MAX_NR_ZONES
385
386};
387
388#ifndef __GENERATING_BOUNDS_H
389
390struct zone {
391
392
393
394 unsigned long _watermark[NR_WMARK];
395 unsigned long watermark_boost;
396
397 unsigned long nr_reserved_highatomic;
398
399
400
401
402
403
404
405
406
407
408 long lowmem_reserve[MAX_NR_ZONES];
409
410#ifdef CONFIG_NUMA
411 int node;
412#endif
413 struct pglist_data *zone_pgdat;
414 struct per_cpu_pageset __percpu *pageset;
415
416#ifndef CONFIG_SPARSEMEM
417
418
419
420
421 unsigned long *pageblock_flags;
422#endif
423
424
425 unsigned long zone_start_pfn;
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462 atomic_long_t managed_pages;
463 unsigned long spanned_pages;
464 unsigned long present_pages;
465
466 const char *name;
467
468#ifdef CONFIG_MEMORY_ISOLATION
469
470
471
472
473
474 unsigned long nr_isolate_pageblock;
475#endif
476
477#ifdef CONFIG_MEMORY_HOTPLUG
478
479 seqlock_t span_seqlock;
480#endif
481
482 int initialized;
483
484
485 ZONE_PADDING(_pad1_)
486
487
488 struct free_area free_area[MAX_ORDER];
489
490
491 unsigned long flags;
492
493
494 spinlock_t lock;
495
496
497 ZONE_PADDING(_pad2_)
498
499
500
501
502
503
504 unsigned long percpu_drift_mark;
505
506#if defined CONFIG_COMPACTION || defined CONFIG_CMA
507
508 unsigned long compact_cached_free_pfn;
509
510 unsigned long compact_cached_migrate_pfn[2];
511 unsigned long compact_init_migrate_pfn;
512 unsigned long compact_init_free_pfn;
513#endif
514
515#ifdef CONFIG_COMPACTION
516
517
518
519
520
521 unsigned int compact_considered;
522 unsigned int compact_defer_shift;
523 int compact_order_failed;
524#endif
525
526#if defined CONFIG_COMPACTION || defined CONFIG_CMA
527
528 bool compact_blockskip_flush;
529#endif
530
531 bool contiguous;
532
533 ZONE_PADDING(_pad3_)
534
535 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
536 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
537} ____cacheline_internodealigned_in_smp;
538
539enum pgdat_flags {
540 PGDAT_DIRTY,
541
542
543
544 PGDAT_WRITEBACK,
545
546
547 PGDAT_RECLAIM_LOCKED,
548};
549
550enum zone_flags {
551 ZONE_BOOSTED_WATERMARK,
552
553
554};
555
556static inline unsigned long zone_managed_pages(struct zone *zone)
557{
558 return (unsigned long)atomic_long_read(&zone->managed_pages);
559}
560
561static inline unsigned long zone_end_pfn(const struct zone *zone)
562{
563 return zone->zone_start_pfn + zone->spanned_pages;
564}
565
566static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
567{
568 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
569}
570
571static inline bool zone_is_initialized(struct zone *zone)
572{
573 return zone->initialized;
574}
575
576static inline bool zone_is_empty(struct zone *zone)
577{
578 return zone->spanned_pages == 0;
579}
580
581
582
583
584
585static inline bool zone_intersects(struct zone *zone,
586 unsigned long start_pfn, unsigned long nr_pages)
587{
588 if (zone_is_empty(zone))
589 return false;
590 if (start_pfn >= zone_end_pfn(zone) ||
591 start_pfn + nr_pages <= zone->zone_start_pfn)
592 return false;
593
594 return true;
595}
596
597
598
599
600
601
602#define DEF_PRIORITY 12
603
604
605#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
606
607enum {
608 ZONELIST_FALLBACK,
609#ifdef CONFIG_NUMA
610
611
612
613
614 ZONELIST_NOFALLBACK,
615#endif
616 MAX_ZONELISTS
617};
618
619
620
621
622
623struct zoneref {
624 struct zone *zone;
625 int zone_idx;
626};
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642struct zonelist {
643 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
644};
645
646#ifndef CONFIG_DISCONTIGMEM
647
648extern struct page *mem_map;
649#endif
650
651#ifdef CONFIG_TRANSPARENT_HUGEPAGE
652struct deferred_split {
653 spinlock_t split_queue_lock;
654 struct list_head split_queue;
655 unsigned long split_queue_len;
656};
657#endif
658
659
660
661
662
663
664
665
666
667typedef struct pglist_data {
668 struct zone node_zones[MAX_NR_ZONES];
669 struct zonelist node_zonelists[MAX_ZONELISTS];
670 int nr_zones;
671#ifdef CONFIG_FLAT_NODE_MEM_MAP
672 struct page *node_mem_map;
673#ifdef CONFIG_PAGE_EXTENSION
674 struct page_ext *node_page_ext;
675#endif
676#endif
677#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
678
679
680
681
682
683
684
685
686
687
688 spinlock_t node_size_lock;
689#endif
690 unsigned long node_start_pfn;
691 unsigned long node_present_pages;
692 unsigned long node_spanned_pages;
693
694 int node_id;
695 wait_queue_head_t kswapd_wait;
696 wait_queue_head_t pfmemalloc_wait;
697 struct task_struct *kswapd;
698
699 int kswapd_order;
700 enum zone_type kswapd_classzone_idx;
701
702 int kswapd_failures;
703
704#ifdef CONFIG_COMPACTION
705 int kcompactd_max_order;
706 enum zone_type kcompactd_classzone_idx;
707 wait_queue_head_t kcompactd_wait;
708 struct task_struct *kcompactd;
709#endif
710
711
712
713
714 unsigned long totalreserve_pages;
715
716#ifdef CONFIG_NUMA
717
718
719
720 unsigned long min_unmapped_pages;
721 unsigned long min_slab_pages;
722#endif
723
724
725 ZONE_PADDING(_pad1_)
726 spinlock_t lru_lock;
727
728#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
729
730
731
732
733 unsigned long first_deferred_pfn;
734#endif
735
736#ifdef CONFIG_TRANSPARENT_HUGEPAGE
737 struct deferred_split deferred_split_queue;
738#endif
739
740
741
742
743
744
745
746
747 struct lruvec __lruvec;
748
749 unsigned long flags;
750
751 ZONE_PADDING(_pad2_)
752
753
754 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
755 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
756} pg_data_t;
757
758#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
759#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
760#ifdef CONFIG_FLAT_NODE_MEM_MAP
761#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
762#else
763#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
764#endif
765#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
766
767#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
768#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
769
770static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
771{
772 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
773}
774
775static inline bool pgdat_is_empty(pg_data_t *pgdat)
776{
777 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
778}
779
780#include <linux/memory_hotplug.h>
781
782void build_all_zonelists(pg_data_t *pgdat);
783void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
784 enum zone_type classzone_idx);
785bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
786 int classzone_idx, unsigned int alloc_flags,
787 long free_pages);
788bool zone_watermark_ok(struct zone *z, unsigned int order,
789 unsigned long mark, int classzone_idx,
790 unsigned int alloc_flags);
791bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
792 unsigned long mark, int classzone_idx);
793enum memmap_context {
794 MEMMAP_EARLY,
795 MEMMAP_HOTPLUG,
796};
797extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
798 unsigned long size);
799
800extern void lruvec_init(struct lruvec *lruvec);
801
802static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
803{
804#ifdef CONFIG_MEMCG
805 return lruvec->pgdat;
806#else
807 return container_of(lruvec, struct pglist_data, __lruvec);
808#endif
809}
810
811extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
812
813#ifdef CONFIG_HAVE_MEMORY_PRESENT
814void memory_present(int nid, unsigned long start, unsigned long end);
815#else
816static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
817#endif
818
819#if defined(CONFIG_SPARSEMEM)
820void memblocks_present(void);
821#else
822static inline void memblocks_present(void) {}
823#endif
824
825#ifdef CONFIG_HAVE_MEMORYLESS_NODES
826int local_memory_node(int node_id);
827#else
828static inline int local_memory_node(int node_id) { return node_id; };
829#endif
830
831
832
833
834#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
835
836
837
838
839
840
841
842static inline bool managed_zone(struct zone *zone)
843{
844 return zone_managed_pages(zone);
845}
846
847
848static inline bool populated_zone(struct zone *zone)
849{
850 return zone->present_pages;
851}
852
853#ifdef CONFIG_NUMA
854static inline int zone_to_nid(struct zone *zone)
855{
856 return zone->node;
857}
858
859static inline void zone_set_nid(struct zone *zone, int nid)
860{
861 zone->node = nid;
862}
863#else
864static inline int zone_to_nid(struct zone *zone)
865{
866 return 0;
867}
868
869static inline void zone_set_nid(struct zone *zone, int nid) {}
870#endif
871
872extern int movable_zone;
873
874#ifdef CONFIG_HIGHMEM
875static inline int zone_movable_is_highmem(void)
876{
877#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
878 return movable_zone == ZONE_HIGHMEM;
879#else
880 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
881#endif
882}
883#endif
884
885static inline int is_highmem_idx(enum zone_type idx)
886{
887#ifdef CONFIG_HIGHMEM
888 return (idx == ZONE_HIGHMEM ||
889 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
890#else
891 return 0;
892#endif
893}
894
895
896
897
898
899
900
901static inline int is_highmem(struct zone *zone)
902{
903#ifdef CONFIG_HIGHMEM
904 return is_highmem_idx(zone_idx(zone));
905#else
906 return 0;
907#endif
908}
909
910
911struct ctl_table;
912int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
913 void __user *, size_t *, loff_t *);
914int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
915 void __user *, size_t *, loff_t *);
916int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
917 void __user *, size_t *, loff_t *);
918extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
919int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
920 void __user *, size_t *, loff_t *);
921int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
922 void __user *, size_t *, loff_t *);
923int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
924 void __user *, size_t *, loff_t *);
925int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
926 void __user *, size_t *, loff_t *);
927
928extern int numa_zonelist_order_handler(struct ctl_table *, int,
929 void __user *, size_t *, loff_t *);
930extern char numa_zonelist_order[];
931#define NUMA_ZONELIST_ORDER_LEN 16
932
933#ifndef CONFIG_NEED_MULTIPLE_NODES
934
935extern struct pglist_data contig_page_data;
936#define NODE_DATA(nid) (&contig_page_data)
937#define NODE_MEM_MAP(nid) mem_map
938
939#else
940
941#include <asm/mmzone.h>
942
943#endif
944
945extern struct pglist_data *first_online_pgdat(void);
946extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
947extern struct zone *next_zone(struct zone *zone);
948
949
950
951
952
953#define for_each_online_pgdat(pgdat) \
954 for (pgdat = first_online_pgdat(); \
955 pgdat; \
956 pgdat = next_online_pgdat(pgdat))
957
958
959
960
961
962
963
964#define for_each_zone(zone) \
965 for (zone = (first_online_pgdat())->node_zones; \
966 zone; \
967 zone = next_zone(zone))
968
969#define for_each_populated_zone(zone) \
970 for (zone = (first_online_pgdat())->node_zones; \
971 zone; \
972 zone = next_zone(zone)) \
973 if (!populated_zone(zone)) \
974 ; \
975 else
976
977static inline struct zone *zonelist_zone(struct zoneref *zoneref)
978{
979 return zoneref->zone;
980}
981
982static inline int zonelist_zone_idx(struct zoneref *zoneref)
983{
984 return zoneref->zone_idx;
985}
986
987static inline int zonelist_node_idx(struct zoneref *zoneref)
988{
989 return zone_to_nid(zoneref->zone);
990}
991
992struct zoneref *__next_zones_zonelist(struct zoneref *z,
993 enum zone_type highest_zoneidx,
994 nodemask_t *nodes);
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1009 enum zone_type highest_zoneidx,
1010 nodemask_t *nodes)
1011{
1012 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1013 return z;
1014 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1034 enum zone_type highest_zoneidx,
1035 nodemask_t *nodes)
1036{
1037 return next_zones_zonelist(zonelist->_zonerefs,
1038 highest_zoneidx, nodes);
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1053 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1054 zone; \
1055 z = next_zones_zonelist(++z, highidx, nodemask), \
1056 zone = zonelist_zone(z))
1057
1058#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1059 for (zone = z->zone; \
1060 zone; \
1061 z = next_zones_zonelist(++z, highidx, nodemask), \
1062 zone = zonelist_zone(z))
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1075 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1076
1077#ifdef CONFIG_SPARSEMEM
1078#include <asm/sparsemem.h>
1079#endif
1080
1081#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1082 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1083static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1084{
1085 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1086 return 0;
1087}
1088#endif
1089
1090#ifdef CONFIG_FLATMEM
1091#define pfn_to_nid(pfn) (0)
1092#endif
1093
1094#ifdef CONFIG_SPARSEMEM
1095
1096
1097
1098
1099
1100
1101
1102#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1103#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1104
1105#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1106
1107#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1108#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1109
1110#define SECTION_BLOCKFLAGS_BITS \
1111 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1112
1113#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1114#error Allocator MAX_ORDER exceeds SECTION_SIZE
1115#endif
1116
1117static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1118{
1119 return pfn >> PFN_SECTION_SHIFT;
1120}
1121static inline unsigned long section_nr_to_pfn(unsigned long sec)
1122{
1123 return sec << PFN_SECTION_SHIFT;
1124}
1125
1126#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1127#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1128
1129#define SUBSECTION_SHIFT 21
1130#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
1131
1132#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1133#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
1134#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1135
1136#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
1137#error Subsection size exceeds section size
1138#else
1139#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1140#endif
1141
1142#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
1143#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
1144
1145struct mem_section_usage {
1146#ifdef CONFIG_SPARSEMEM_VMEMMAP
1147 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
1148#endif
1149
1150 unsigned long pageblock_flags[0];
1151};
1152
1153void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
1154
1155struct page;
1156struct page_ext;
1157struct mem_section {
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 unsigned long section_mem_map;
1171
1172 struct mem_section_usage *usage;
1173#ifdef CONFIG_PAGE_EXTENSION
1174
1175
1176
1177
1178 struct page_ext *page_ext;
1179 unsigned long pad;
1180#endif
1181
1182
1183
1184
1185};
1186
1187#ifdef CONFIG_SPARSEMEM_EXTREME
1188#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1189#else
1190#define SECTIONS_PER_ROOT 1
1191#endif
1192
1193#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1194#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1195#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1196
1197#ifdef CONFIG_SPARSEMEM_EXTREME
1198extern struct mem_section **mem_section;
1199#else
1200extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1201#endif
1202
1203static inline unsigned long *section_to_usemap(struct mem_section *ms)
1204{
1205 return ms->usage->pageblock_flags;
1206}
1207
1208static inline struct mem_section *__nr_to_section(unsigned long nr)
1209{
1210#ifdef CONFIG_SPARSEMEM_EXTREME
1211 if (!mem_section)
1212 return NULL;
1213#endif
1214 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1215 return NULL;
1216 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1217}
1218extern unsigned long __section_nr(struct mem_section *ms);
1219extern size_t mem_section_usage_size(void);
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234#define SECTION_MARKED_PRESENT (1UL<<0)
1235#define SECTION_HAS_MEM_MAP (1UL<<1)
1236#define SECTION_IS_ONLINE (1UL<<2)
1237#define SECTION_IS_EARLY (1UL<<3)
1238#define SECTION_MAP_LAST_BIT (1UL<<4)
1239#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1240#define SECTION_NID_SHIFT 3
1241
1242static inline struct page *__section_mem_map_addr(struct mem_section *section)
1243{
1244 unsigned long map = section->section_mem_map;
1245 map &= SECTION_MAP_MASK;
1246 return (struct page *)map;
1247}
1248
1249static inline int present_section(struct mem_section *section)
1250{
1251 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1252}
1253
1254static inline int present_section_nr(unsigned long nr)
1255{
1256 return present_section(__nr_to_section(nr));
1257}
1258
1259static inline int valid_section(struct mem_section *section)
1260{
1261 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1262}
1263
1264static inline int early_section(struct mem_section *section)
1265{
1266 return (section && (section->section_mem_map & SECTION_IS_EARLY));
1267}
1268
1269static inline int valid_section_nr(unsigned long nr)
1270{
1271 return valid_section(__nr_to_section(nr));
1272}
1273
1274static inline int online_section(struct mem_section *section)
1275{
1276 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1277}
1278
1279static inline int online_section_nr(unsigned long nr)
1280{
1281 return online_section(__nr_to_section(nr));
1282}
1283
1284#ifdef CONFIG_MEMORY_HOTPLUG
1285void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1286#ifdef CONFIG_MEMORY_HOTREMOVE
1287void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1288#endif
1289#endif
1290
1291static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1292{
1293 return __nr_to_section(pfn_to_section_nr(pfn));
1294}
1295
1296extern unsigned long __highest_present_section_nr;
1297
1298static inline int subsection_map_index(unsigned long pfn)
1299{
1300 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
1301}
1302
1303#ifdef CONFIG_SPARSEMEM_VMEMMAP
1304static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1305{
1306 int idx = subsection_map_index(pfn);
1307
1308 return test_bit(idx, ms->usage->subsection_map);
1309}
1310#else
1311static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1312{
1313 return 1;
1314}
1315#endif
1316
1317#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1318static inline int pfn_valid(unsigned long pfn)
1319{
1320 struct mem_section *ms;
1321
1322 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1323 return 0;
1324 ms = __nr_to_section(pfn_to_section_nr(pfn));
1325 if (!valid_section(ms))
1326 return 0;
1327
1328
1329
1330
1331 return early_section(ms) || pfn_section_valid(ms, pfn);
1332}
1333#endif
1334
1335static inline int pfn_in_present_section(unsigned long pfn)
1336{
1337 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1338 return 0;
1339 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1340}
1341
1342static inline unsigned long next_present_section_nr(unsigned long section_nr)
1343{
1344 while (++section_nr <= __highest_present_section_nr) {
1345 if (present_section_nr(section_nr))
1346 return section_nr;
1347 }
1348
1349 return -1;
1350}
1351
1352
1353
1354
1355
1356
1357#ifdef CONFIG_NUMA
1358#define pfn_to_nid(pfn) \
1359({ \
1360 unsigned long __pfn_to_nid_pfn = (pfn); \
1361 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1362})
1363#else
1364#define pfn_to_nid(pfn) (0)
1365#endif
1366
1367#define early_pfn_valid(pfn) pfn_valid(pfn)
1368void sparse_init(void);
1369#else
1370#define sparse_init() do {} while (0)
1371#define sparse_index_init(_sec, _nid) do {} while (0)
1372#define pfn_in_present_section pfn_valid
1373#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
1374#endif
1375
1376
1377
1378
1379
1380
1381struct mminit_pfnnid_cache {
1382 unsigned long last_start;
1383 unsigned long last_end;
1384 int last_nid;
1385};
1386
1387#ifndef early_pfn_valid
1388#define early_pfn_valid(pfn) (1)
1389#endif
1390
1391void memory_present(int nid, unsigned long start, unsigned long end);
1392
1393
1394
1395
1396
1397
1398
1399#ifdef CONFIG_HOLES_IN_ZONE
1400#define pfn_valid_within(pfn) pfn_valid(pfn)
1401#else
1402#define pfn_valid_within(pfn) (1)
1403#endif
1404
1405#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426bool memmap_valid_within(unsigned long pfn,
1427 struct page *page, struct zone *zone);
1428#else
1429static inline bool memmap_valid_within(unsigned long pfn,
1430 struct page *page, struct zone *zone)
1431{
1432 return true;
1433}
1434#endif
1435
1436#endif
1437#endif
1438#endif
1439