1
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include <linux/bitops.h>
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
16#include <linux/seqlock.h>
17#include <linux/nodemask.h>
18#include <linux/pageblock-flags.h>
19#include <linux/page-flags-layout.h>
20#include <linux/atomic.h>
21#include <asm/page.h>
22
23
24#ifndef CONFIG_FORCE_MAX_ZONEORDER
25#define MAX_ORDER 11
26#else
27#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
28#endif
29#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
30
31
32
33
34
35
36
37#define PAGE_ALLOC_COSTLY_ORDER 3
38
39enum migratetype {
40 MIGRATE_UNMOVABLE,
41 MIGRATE_MOVABLE,
42 MIGRATE_RECLAIMABLE,
43 MIGRATE_PCPTYPES,
44 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
45#ifdef CONFIG_CMA
46
47
48
49
50
51
52
53
54
55
56
57
58
59 MIGRATE_CMA,
60#endif
61#ifdef CONFIG_MEMORY_ISOLATION
62 MIGRATE_ISOLATE,
63#endif
64 MIGRATE_TYPES
65};
66
67
68extern const char * const migratetype_names[MIGRATE_TYPES];
69
70#ifdef CONFIG_CMA
71# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
72# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
73#else
74# define is_migrate_cma(migratetype) false
75# define is_migrate_cma_page(_page) false
76#endif
77
78static inline bool is_migrate_movable(int mt)
79{
80 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
81}
82
83#define for_each_migratetype_order(order, type) \
84 for (order = 0; order < MAX_ORDER; order++) \
85 for (type = 0; type < MIGRATE_TYPES; type++)
86
87extern int page_group_by_mobility_disabled;
88
89#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
90#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
91
92#define get_pageblock_migratetype(page) \
93 get_pfnblock_flags_mask(page, page_to_pfn(page), \
94 PB_migrate_end, MIGRATETYPE_MASK)
95
96struct free_area {
97 struct list_head free_list[MIGRATE_TYPES];
98 unsigned long nr_free;
99};
100
101struct pglist_data;
102
103
104
105
106
107
108
109#if defined(CONFIG_SMP)
110struct zone_padding {
111 char x[0];
112} ____cacheline_internodealigned_in_smp;
113#define ZONE_PADDING(name) struct zone_padding name;
114#else
115#define ZONE_PADDING(name)
116#endif
117
118#ifdef CONFIG_NUMA
119enum numa_stat_item {
120 NUMA_HIT,
121 NUMA_MISS,
122 NUMA_FOREIGN,
123 NUMA_INTERLEAVE_HIT,
124 NUMA_LOCAL,
125 NUMA_OTHER,
126 NR_VM_NUMA_STAT_ITEMS
127};
128#else
129#define NR_VM_NUMA_STAT_ITEMS 0
130#endif
131
132enum zone_stat_item {
133
134 NR_FREE_PAGES,
135 NR_ZONE_LRU_BASE,
136 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
137 NR_ZONE_ACTIVE_ANON,
138 NR_ZONE_INACTIVE_FILE,
139 NR_ZONE_ACTIVE_FILE,
140 NR_ZONE_UNEVICTABLE,
141 NR_ZONE_WRITE_PENDING,
142 NR_MLOCK,
143 NR_PAGETABLE,
144 NR_KERNEL_STACK_KB,
145
146 NR_BOUNCE,
147#if IS_ENABLED(CONFIG_ZSMALLOC)
148 NR_ZSPAGES,
149#endif
150 NR_FREE_CMA_PAGES,
151 NR_VM_ZONE_STAT_ITEMS };
152
153enum node_stat_item {
154 NR_LRU_BASE,
155 NR_INACTIVE_ANON = NR_LRU_BASE,
156 NR_ACTIVE_ANON,
157 NR_INACTIVE_FILE,
158 NR_ACTIVE_FILE,
159 NR_UNEVICTABLE,
160 NR_SLAB_RECLAIMABLE,
161 NR_SLAB_UNRECLAIMABLE,
162 NR_ISOLATED_ANON,
163 NR_ISOLATED_FILE,
164 WORKINGSET_NODES,
165 WORKINGSET_REFAULT,
166 WORKINGSET_ACTIVATE,
167 WORKINGSET_RESTORE,
168 WORKINGSET_NODERECLAIM,
169 NR_ANON_MAPPED,
170 NR_FILE_MAPPED,
171
172 NR_FILE_PAGES,
173 NR_FILE_DIRTY,
174 NR_WRITEBACK,
175 NR_WRITEBACK_TEMP,
176 NR_SHMEM,
177 NR_SHMEM_THPS,
178 NR_SHMEM_PMDMAPPED,
179 NR_ANON_THPS,
180 NR_UNSTABLE_NFS,
181 NR_VMSCAN_WRITE,
182 NR_VMSCAN_IMMEDIATE,
183 NR_DIRTIED,
184 NR_WRITTEN,
185 NR_KERNEL_MISC_RECLAIMABLE,
186 NR_VM_NODE_STAT_ITEMS
187};
188
189
190
191
192
193
194
195
196
197
198#define LRU_BASE 0
199#define LRU_ACTIVE 1
200#define LRU_FILE 2
201
202enum lru_list {
203 LRU_INACTIVE_ANON = LRU_BASE,
204 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
205 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
206 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
207 LRU_UNEVICTABLE,
208 NR_LRU_LISTS
209};
210
211#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
212
213#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
214
215static inline int is_file_lru(enum lru_list lru)
216{
217 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
218}
219
220static inline int is_active_lru(enum lru_list lru)
221{
222 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
223}
224
225struct zone_reclaim_stat {
226
227
228
229
230
231
232
233
234 unsigned long recent_rotated[2];
235 unsigned long recent_scanned[2];
236};
237
238struct lruvec {
239 struct list_head lists[NR_LRU_LISTS];
240 struct zone_reclaim_stat reclaim_stat;
241
242 atomic_long_t inactive_age;
243
244 unsigned long refaults;
245#ifdef CONFIG_MEMCG
246 struct pglist_data *pgdat;
247#endif
248};
249
250
251#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
252#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
253#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
254
255
256#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
257
258#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
259
260#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
261
262
263typedef unsigned __bitwise isolate_mode_t;
264
265enum zone_watermarks {
266 WMARK_MIN,
267 WMARK_LOW,
268 WMARK_HIGH,
269 NR_WMARK
270};
271
272#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
273#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
274#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
275#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
276
277struct per_cpu_pages {
278 int count;
279 int high;
280 int batch;
281
282
283 struct list_head lists[MIGRATE_PCPTYPES];
284};
285
286struct per_cpu_pageset {
287 struct per_cpu_pages pcp;
288#ifdef CONFIG_NUMA
289 s8 expire;
290 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
291#endif
292#ifdef CONFIG_SMP
293 s8 stat_threshold;
294 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
295#endif
296};
297
298struct per_cpu_nodestat {
299 s8 stat_threshold;
300 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
301};
302
303#endif
304
305enum zone_type {
306#ifdef CONFIG_ZONE_DMA
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325 ZONE_DMA,
326#endif
327#ifdef CONFIG_ZONE_DMA32
328
329
330
331
332
333 ZONE_DMA32,
334#endif
335
336
337
338
339
340 ZONE_NORMAL,
341#ifdef CONFIG_HIGHMEM
342
343
344
345
346
347
348
349
350 ZONE_HIGHMEM,
351#endif
352 ZONE_MOVABLE,
353#ifdef CONFIG_ZONE_DEVICE
354 ZONE_DEVICE,
355#endif
356 __MAX_NR_ZONES
357
358};
359
360#ifndef __GENERATING_BOUNDS_H
361
362struct zone {
363
364
365
366 unsigned long _watermark[NR_WMARK];
367 unsigned long watermark_boost;
368
369 unsigned long nr_reserved_highatomic;
370
371
372
373
374
375
376
377
378
379
380 long lowmem_reserve[MAX_NR_ZONES];
381
382#ifdef CONFIG_NUMA
383 int node;
384#endif
385 struct pglist_data *zone_pgdat;
386 struct per_cpu_pageset __percpu *pageset;
387
388#ifndef CONFIG_SPARSEMEM
389
390
391
392
393 unsigned long *pageblock_flags;
394#endif
395
396
397 unsigned long zone_start_pfn;
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434 atomic_long_t managed_pages;
435 unsigned long spanned_pages;
436 unsigned long present_pages;
437
438 const char *name;
439
440#ifdef CONFIG_MEMORY_ISOLATION
441
442
443
444
445
446 unsigned long nr_isolate_pageblock;
447#endif
448
449#ifdef CONFIG_MEMORY_HOTPLUG
450
451 seqlock_t span_seqlock;
452#endif
453
454 int initialized;
455
456
457 ZONE_PADDING(_pad1_)
458
459
460 struct free_area free_area[MAX_ORDER];
461
462
463 unsigned long flags;
464
465
466 spinlock_t lock;
467
468
469 ZONE_PADDING(_pad2_)
470
471
472
473
474
475
476 unsigned long percpu_drift_mark;
477
478#if defined CONFIG_COMPACTION || defined CONFIG_CMA
479
480 unsigned long compact_cached_free_pfn;
481
482 unsigned long compact_cached_migrate_pfn[2];
483#endif
484
485#ifdef CONFIG_COMPACTION
486
487
488
489
490
491 unsigned int compact_considered;
492 unsigned int compact_defer_shift;
493 int compact_order_failed;
494#endif
495
496#if defined CONFIG_COMPACTION || defined CONFIG_CMA
497
498 bool compact_blockskip_flush;
499#endif
500
501 bool contiguous;
502
503 ZONE_PADDING(_pad3_)
504
505 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
506 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
507} ____cacheline_internodealigned_in_smp;
508
509enum pgdat_flags {
510 PGDAT_CONGESTED,
511
512
513 PGDAT_DIRTY,
514
515
516
517 PGDAT_WRITEBACK,
518
519
520 PGDAT_RECLAIM_LOCKED,
521};
522
523enum zone_flags {
524 ZONE_BOOSTED_WATERMARK,
525
526
527};
528
529static inline unsigned long zone_managed_pages(struct zone *zone)
530{
531 return (unsigned long)atomic_long_read(&zone->managed_pages);
532}
533
534static inline unsigned long zone_end_pfn(const struct zone *zone)
535{
536 return zone->zone_start_pfn + zone->spanned_pages;
537}
538
539static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
540{
541 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
542}
543
544static inline bool zone_is_initialized(struct zone *zone)
545{
546 return zone->initialized;
547}
548
549static inline bool zone_is_empty(struct zone *zone)
550{
551 return zone->spanned_pages == 0;
552}
553
554
555
556
557
558static inline bool zone_intersects(struct zone *zone,
559 unsigned long start_pfn, unsigned long nr_pages)
560{
561 if (zone_is_empty(zone))
562 return false;
563 if (start_pfn >= zone_end_pfn(zone) ||
564 start_pfn + nr_pages <= zone->zone_start_pfn)
565 return false;
566
567 return true;
568}
569
570
571
572
573
574
575#define DEF_PRIORITY 12
576
577
578#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
579
580enum {
581 ZONELIST_FALLBACK,
582#ifdef CONFIG_NUMA
583
584
585
586
587 ZONELIST_NOFALLBACK,
588#endif
589 MAX_ZONELISTS
590};
591
592
593
594
595
596struct zoneref {
597 struct zone *zone;
598 int zone_idx;
599};
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615struct zonelist {
616 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
617};
618
619#ifndef CONFIG_DISCONTIGMEM
620
621extern struct page *mem_map;
622#endif
623
624
625
626
627
628
629
630
631
632struct bootmem_data;
633typedef struct pglist_data {
634 struct zone node_zones[MAX_NR_ZONES];
635 struct zonelist node_zonelists[MAX_ZONELISTS];
636 int nr_zones;
637#ifdef CONFIG_FLAT_NODE_MEM_MAP
638 struct page *node_mem_map;
639#ifdef CONFIG_PAGE_EXTENSION
640 struct page_ext *node_page_ext;
641#endif
642#endif
643#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
644
645
646
647
648
649
650
651
652
653
654 spinlock_t node_size_lock;
655#endif
656 unsigned long node_start_pfn;
657 unsigned long node_present_pages;
658 unsigned long node_spanned_pages;
659
660 int node_id;
661 wait_queue_head_t kswapd_wait;
662 wait_queue_head_t pfmemalloc_wait;
663 struct task_struct *kswapd;
664
665 int kswapd_order;
666 enum zone_type kswapd_classzone_idx;
667
668 int kswapd_failures;
669
670#ifdef CONFIG_COMPACTION
671 int kcompactd_max_order;
672 enum zone_type kcompactd_classzone_idx;
673 wait_queue_head_t kcompactd_wait;
674 struct task_struct *kcompactd;
675#endif
676
677
678
679
680 unsigned long totalreserve_pages;
681
682#ifdef CONFIG_NUMA
683
684
685
686 unsigned long min_unmapped_pages;
687 unsigned long min_slab_pages;
688#endif
689
690
691 ZONE_PADDING(_pad1_)
692 spinlock_t lru_lock;
693
694#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
695
696
697
698
699 unsigned long first_deferred_pfn;
700#endif
701
702#ifdef CONFIG_TRANSPARENT_HUGEPAGE
703 spinlock_t split_queue_lock;
704 struct list_head split_queue;
705 unsigned long split_queue_len;
706#endif
707
708
709 struct lruvec lruvec;
710
711 unsigned long flags;
712
713 ZONE_PADDING(_pad2_)
714
715
716 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
717 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
718} pg_data_t;
719
720#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
721#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
722#ifdef CONFIG_FLAT_NODE_MEM_MAP
723#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
724#else
725#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
726#endif
727#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
728
729#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
730#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
731static inline spinlock_t *zone_lru_lock(struct zone *zone)
732{
733 return &zone->zone_pgdat->lru_lock;
734}
735
736static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
737{
738 return &pgdat->lruvec;
739}
740
741static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
742{
743 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
744}
745
746static inline bool pgdat_is_empty(pg_data_t *pgdat)
747{
748 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
749}
750
751#include <linux/memory_hotplug.h>
752
753void build_all_zonelists(pg_data_t *pgdat);
754void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
755 enum zone_type classzone_idx);
756bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
757 int classzone_idx, unsigned int alloc_flags,
758 long free_pages);
759bool zone_watermark_ok(struct zone *z, unsigned int order,
760 unsigned long mark, int classzone_idx,
761 unsigned int alloc_flags);
762bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
763 unsigned long mark, int classzone_idx);
764enum memmap_context {
765 MEMMAP_EARLY,
766 MEMMAP_HOTPLUG,
767};
768extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
769 unsigned long size);
770
771extern void lruvec_init(struct lruvec *lruvec);
772
773static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
774{
775#ifdef CONFIG_MEMCG
776 return lruvec->pgdat;
777#else
778 return container_of(lruvec, struct pglist_data, lruvec);
779#endif
780}
781
782extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
783
784#ifdef CONFIG_HAVE_MEMORY_PRESENT
785void memory_present(int nid, unsigned long start, unsigned long end);
786#else
787static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
788#endif
789
790#if defined(CONFIG_SPARSEMEM)
791void memblocks_present(void);
792#else
793static inline void memblocks_present(void) {}
794#endif
795
796#ifdef CONFIG_HAVE_MEMORYLESS_NODES
797int local_memory_node(int node_id);
798#else
799static inline int local_memory_node(int node_id) { return node_id; };
800#endif
801
802
803
804
805#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
806
807#ifdef CONFIG_ZONE_DEVICE
808static inline bool is_dev_zone(const struct zone *zone)
809{
810 return zone_idx(zone) == ZONE_DEVICE;
811}
812#else
813static inline bool is_dev_zone(const struct zone *zone)
814{
815 return false;
816}
817#endif
818
819
820
821
822
823
824
825static inline bool managed_zone(struct zone *zone)
826{
827 return zone_managed_pages(zone);
828}
829
830
831static inline bool populated_zone(struct zone *zone)
832{
833 return zone->present_pages;
834}
835
836#ifdef CONFIG_NUMA
837static inline int zone_to_nid(struct zone *zone)
838{
839 return zone->node;
840}
841
842static inline void zone_set_nid(struct zone *zone, int nid)
843{
844 zone->node = nid;
845}
846#else
847static inline int zone_to_nid(struct zone *zone)
848{
849 return 0;
850}
851
852static inline void zone_set_nid(struct zone *zone, int nid) {}
853#endif
854
855extern int movable_zone;
856
857#ifdef CONFIG_HIGHMEM
858static inline int zone_movable_is_highmem(void)
859{
860#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
861 return movable_zone == ZONE_HIGHMEM;
862#else
863 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
864#endif
865}
866#endif
867
868static inline int is_highmem_idx(enum zone_type idx)
869{
870#ifdef CONFIG_HIGHMEM
871 return (idx == ZONE_HIGHMEM ||
872 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
873#else
874 return 0;
875#endif
876}
877
878
879
880
881
882
883
884static inline int is_highmem(struct zone *zone)
885{
886#ifdef CONFIG_HIGHMEM
887 return is_highmem_idx(zone_idx(zone));
888#else
889 return 0;
890#endif
891}
892
893
894struct ctl_table;
895int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
896 void __user *, size_t *, loff_t *);
897int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
898 void __user *, size_t *, loff_t *);
899int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
900 void __user *, size_t *, loff_t *);
901extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
902int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
903 void __user *, size_t *, loff_t *);
904int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
905 void __user *, size_t *, loff_t *);
906int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
907 void __user *, size_t *, loff_t *);
908int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
909 void __user *, size_t *, loff_t *);
910
911extern int numa_zonelist_order_handler(struct ctl_table *, int,
912 void __user *, size_t *, loff_t *);
913extern char numa_zonelist_order[];
914#define NUMA_ZONELIST_ORDER_LEN 16
915
916#ifndef CONFIG_NEED_MULTIPLE_NODES
917
918extern struct pglist_data contig_page_data;
919#define NODE_DATA(nid) (&contig_page_data)
920#define NODE_MEM_MAP(nid) mem_map
921
922#else
923
924#include <asm/mmzone.h>
925
926#endif
927
928extern struct pglist_data *first_online_pgdat(void);
929extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
930extern struct zone *next_zone(struct zone *zone);
931
932
933
934
935
936#define for_each_online_pgdat(pgdat) \
937 for (pgdat = first_online_pgdat(); \
938 pgdat; \
939 pgdat = next_online_pgdat(pgdat))
940
941
942
943
944
945
946
947#define for_each_zone(zone) \
948 for (zone = (first_online_pgdat())->node_zones; \
949 zone; \
950 zone = next_zone(zone))
951
952#define for_each_populated_zone(zone) \
953 for (zone = (first_online_pgdat())->node_zones; \
954 zone; \
955 zone = next_zone(zone)) \
956 if (!populated_zone(zone)) \
957 ; \
958 else
959
960static inline struct zone *zonelist_zone(struct zoneref *zoneref)
961{
962 return zoneref->zone;
963}
964
965static inline int zonelist_zone_idx(struct zoneref *zoneref)
966{
967 return zoneref->zone_idx;
968}
969
970static inline int zonelist_node_idx(struct zoneref *zoneref)
971{
972 return zone_to_nid(zoneref->zone);
973}
974
975struct zoneref *__next_zones_zonelist(struct zoneref *z,
976 enum zone_type highest_zoneidx,
977 nodemask_t *nodes);
978
979
980
981
982
983
984
985
986
987
988
989
990
991static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
992 enum zone_type highest_zoneidx,
993 nodemask_t *nodes)
994{
995 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
996 return z;
997 return __next_zones_zonelist(z, highest_zoneidx, nodes);
998}
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1017 enum zone_type highest_zoneidx,
1018 nodemask_t *nodes)
1019{
1020 return next_zones_zonelist(zonelist->_zonerefs,
1021 highest_zoneidx, nodes);
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1036 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1037 zone; \
1038 z = next_zones_zonelist(++z, highidx, nodemask), \
1039 zone = zonelist_zone(z))
1040
1041#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1042 for (zone = z->zone; \
1043 zone; \
1044 z = next_zones_zonelist(++z, highidx, nodemask), \
1045 zone = zonelist_zone(z))
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1058 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1059
1060#ifdef CONFIG_SPARSEMEM
1061#include <asm/sparsemem.h>
1062#endif
1063
1064#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1065 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1066static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1067{
1068 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1069 return 0;
1070}
1071#endif
1072
1073#ifdef CONFIG_FLATMEM
1074#define pfn_to_nid(pfn) (0)
1075#endif
1076
1077#ifdef CONFIG_SPARSEMEM
1078
1079
1080
1081
1082
1083
1084
1085#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1086#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1087
1088#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1089
1090#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1091#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1092
1093#define SECTION_BLOCKFLAGS_BITS \
1094 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1095
1096#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1097#error Allocator MAX_ORDER exceeds SECTION_SIZE
1098#endif
1099
1100static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1101{
1102 return pfn >> PFN_SECTION_SHIFT;
1103}
1104static inline unsigned long section_nr_to_pfn(unsigned long sec)
1105{
1106 return sec << PFN_SECTION_SHIFT;
1107}
1108
1109#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1110#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1111
1112struct page;
1113struct page_ext;
1114struct mem_section {
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 unsigned long section_mem_map;
1128
1129
1130 unsigned long *pageblock_flags;
1131#ifdef CONFIG_PAGE_EXTENSION
1132
1133
1134
1135
1136 struct page_ext *page_ext;
1137 unsigned long pad;
1138#endif
1139
1140
1141
1142
1143};
1144
1145#ifdef CONFIG_SPARSEMEM_EXTREME
1146#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1147#else
1148#define SECTIONS_PER_ROOT 1
1149#endif
1150
1151#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1152#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1153#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1154
1155#ifdef CONFIG_SPARSEMEM_EXTREME
1156extern struct mem_section **mem_section;
1157#else
1158extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1159#endif
1160
1161static inline struct mem_section *__nr_to_section(unsigned long nr)
1162{
1163#ifdef CONFIG_SPARSEMEM_EXTREME
1164 if (!mem_section)
1165 return NULL;
1166#endif
1167 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1168 return NULL;
1169 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1170}
1171extern int __section_nr(struct mem_section* ms);
1172extern unsigned long usemap_size(void);
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187#define SECTION_MARKED_PRESENT (1UL<<0)
1188#define SECTION_HAS_MEM_MAP (1UL<<1)
1189#define SECTION_IS_ONLINE (1UL<<2)
1190#define SECTION_MAP_LAST_BIT (1UL<<3)
1191#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1192#define SECTION_NID_SHIFT 3
1193
1194static inline struct page *__section_mem_map_addr(struct mem_section *section)
1195{
1196 unsigned long map = section->section_mem_map;
1197 map &= SECTION_MAP_MASK;
1198 return (struct page *)map;
1199}
1200
1201static inline int present_section(struct mem_section *section)
1202{
1203 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1204}
1205
1206static inline int present_section_nr(unsigned long nr)
1207{
1208 return present_section(__nr_to_section(nr));
1209}
1210
1211static inline int valid_section(struct mem_section *section)
1212{
1213 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1214}
1215
1216static inline int valid_section_nr(unsigned long nr)
1217{
1218 return valid_section(__nr_to_section(nr));
1219}
1220
1221static inline int online_section(struct mem_section *section)
1222{
1223 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1224}
1225
1226static inline int online_section_nr(unsigned long nr)
1227{
1228 return online_section(__nr_to_section(nr));
1229}
1230
1231#ifdef CONFIG_MEMORY_HOTPLUG
1232void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1233#ifdef CONFIG_MEMORY_HOTREMOVE
1234void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1235#endif
1236#endif
1237
1238static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1239{
1240 return __nr_to_section(pfn_to_section_nr(pfn));
1241}
1242
1243extern int __highest_present_section_nr;
1244
1245#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1246static inline int pfn_valid(unsigned long pfn)
1247{
1248 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1249 return 0;
1250 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1251}
1252#endif
1253
1254static inline int pfn_present(unsigned long pfn)
1255{
1256 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1257 return 0;
1258 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1259}
1260
1261
1262
1263
1264
1265
1266#ifdef CONFIG_NUMA
1267#define pfn_to_nid(pfn) \
1268({ \
1269 unsigned long __pfn_to_nid_pfn = (pfn); \
1270 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1271})
1272#else
1273#define pfn_to_nid(pfn) (0)
1274#endif
1275
1276#define early_pfn_valid(pfn) pfn_valid(pfn)
1277void sparse_init(void);
1278#else
1279#define sparse_init() do {} while (0)
1280#define sparse_index_init(_sec, _nid) do {} while (0)
1281#endif
1282
1283
1284
1285
1286
1287
1288struct mminit_pfnnid_cache {
1289 unsigned long last_start;
1290 unsigned long last_end;
1291 int last_nid;
1292};
1293
1294#ifndef early_pfn_valid
1295#define early_pfn_valid(pfn) (1)
1296#endif
1297
1298void memory_present(int nid, unsigned long start, unsigned long end);
1299
1300
1301
1302
1303
1304
1305
1306#ifdef CONFIG_HOLES_IN_ZONE
1307#define pfn_valid_within(pfn) pfn_valid(pfn)
1308#else
1309#define pfn_valid_within(pfn) (1)
1310#endif
1311
1312#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333bool memmap_valid_within(unsigned long pfn,
1334 struct page *page, struct zone *zone);
1335#else
1336static inline bool memmap_valid_within(unsigned long pfn,
1337 struct page *page, struct zone *zone)
1338{
1339 return true;
1340}
1341#endif
1342
1343#endif
1344#endif
1345#endif
1346