1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <linux/page-flags-layout.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38enum migratetype {
39 MIGRATE_UNMOVABLE,
40 MIGRATE_MOVABLE,
41 MIGRATE_RECLAIMABLE,
42 MIGRATE_PCPTYPES,
43 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
44#ifdef CONFIG_CMA
45
46
47
48
49
50
51
52
53
54
55
56
57
58 MIGRATE_CMA,
59#endif
60#ifdef CONFIG_MEMORY_ISOLATION
61 MIGRATE_ISOLATE,
62#endif
63 MIGRATE_TYPES
64};
65
66
67extern char * const migratetype_names[MIGRATE_TYPES];
68
69#ifdef CONFIG_CMA
70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
71# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
72#else
73# define is_migrate_cma(migratetype) false
74# define is_migrate_cma_page(_page) false
75#endif
76
77static inline bool is_migrate_movable(int mt)
78{
79 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
80}
81
82#define for_each_migratetype_order(order, type) \
83 for (order = 0; order < MAX_ORDER; order++) \
84 for (type = 0; type < MIGRATE_TYPES; type++)
85
86extern int page_group_by_mobility_disabled;
87
88#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
89#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
90
91#define get_pageblock_migratetype(page) \
92 get_pfnblock_flags_mask(page, page_to_pfn(page), \
93 PB_migrate_end, MIGRATETYPE_MASK)
94
95struct free_area {
96 struct list_head free_list[MIGRATE_TYPES];
97 unsigned long nr_free;
98};
99
100struct pglist_data;
101
102
103
104
105
106
107
108#if defined(CONFIG_SMP)
109struct zone_padding {
110 char x[0];
111} ____cacheline_internodealigned_in_smp;
112#define ZONE_PADDING(name) struct zone_padding name;
113#else
114#define ZONE_PADDING(name)
115#endif
116
117enum zone_stat_item {
118
119 NR_FREE_PAGES,
120 NR_ZONE_LRU_BASE,
121 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
122 NR_ZONE_ACTIVE_ANON,
123 NR_ZONE_INACTIVE_FILE,
124 NR_ZONE_ACTIVE_FILE,
125 NR_ZONE_UNEVICTABLE,
126 NR_ZONE_WRITE_PENDING,
127 NR_MLOCK,
128 NR_PAGETABLE,
129 NR_KERNEL_STACK_KB,
130
131 NR_BOUNCE,
132#if IS_ENABLED(CONFIG_ZSMALLOC)
133 NR_ZSPAGES,
134#endif
135#ifdef CONFIG_NUMA
136 NUMA_HIT,
137 NUMA_MISS,
138 NUMA_FOREIGN,
139 NUMA_INTERLEAVE_HIT,
140 NUMA_LOCAL,
141 NUMA_OTHER,
142#endif
143 NR_FREE_CMA_PAGES,
144 NR_VM_ZONE_STAT_ITEMS };
145
146enum node_stat_item {
147 NR_LRU_BASE,
148 NR_INACTIVE_ANON = NR_LRU_BASE,
149 NR_ACTIVE_ANON,
150 NR_INACTIVE_FILE,
151 NR_ACTIVE_FILE,
152 NR_UNEVICTABLE,
153 NR_SLAB_RECLAIMABLE,
154 NR_SLAB_UNRECLAIMABLE,
155 NR_ISOLATED_ANON,
156 NR_ISOLATED_FILE,
157 WORKINGSET_REFAULT,
158 WORKINGSET_ACTIVATE,
159 WORKINGSET_NODERECLAIM,
160 NR_ANON_MAPPED,
161 NR_FILE_MAPPED,
162
163 NR_FILE_PAGES,
164 NR_FILE_DIRTY,
165 NR_WRITEBACK,
166 NR_WRITEBACK_TEMP,
167 NR_SHMEM,
168 NR_SHMEM_THPS,
169 NR_SHMEM_PMDMAPPED,
170 NR_ANON_THPS,
171 NR_UNSTABLE_NFS,
172 NR_VMSCAN_WRITE,
173 NR_VMSCAN_IMMEDIATE,
174 NR_DIRTIED,
175 NR_WRITTEN,
176 NR_VM_NODE_STAT_ITEMS
177};
178
179
180
181
182
183
184
185
186
187
188#define LRU_BASE 0
189#define LRU_ACTIVE 1
190#define LRU_FILE 2
191
192enum lru_list {
193 LRU_INACTIVE_ANON = LRU_BASE,
194 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
195 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
196 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
197 LRU_UNEVICTABLE,
198 NR_LRU_LISTS
199};
200
201#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
202
203#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
204
205static inline int is_file_lru(enum lru_list lru)
206{
207 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
208}
209
210static inline int is_active_lru(enum lru_list lru)
211{
212 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
213}
214
215struct zone_reclaim_stat {
216
217
218
219
220
221
222
223
224 unsigned long recent_rotated[2];
225 unsigned long recent_scanned[2];
226};
227
228struct lruvec {
229 struct list_head lists[NR_LRU_LISTS];
230 struct zone_reclaim_stat reclaim_stat;
231
232 atomic_long_t inactive_age;
233
234 unsigned long refaults;
235#ifdef CONFIG_MEMCG
236 struct pglist_data *pgdat;
237#endif
238};
239
240
241#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
242#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
243#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
244
245
246#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
247
248#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
249
250#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
251
252
253typedef unsigned __bitwise isolate_mode_t;
254
255enum zone_watermarks {
256 WMARK_MIN,
257 WMARK_LOW,
258 WMARK_HIGH,
259 NR_WMARK
260};
261
262#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
263#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
264#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
265
266struct per_cpu_pages {
267 int count;
268 int high;
269 int batch;
270
271
272 struct list_head lists[MIGRATE_PCPTYPES];
273};
274
275struct per_cpu_pageset {
276 struct per_cpu_pages pcp;
277#ifdef CONFIG_NUMA
278 s8 expire;
279#endif
280#ifdef CONFIG_SMP
281 s8 stat_threshold;
282 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
283#endif
284};
285
286struct per_cpu_nodestat {
287 s8 stat_threshold;
288 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
289};
290
291#endif
292
293enum zone_type {
294#ifdef CONFIG_ZONE_DMA
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313 ZONE_DMA,
314#endif
315#ifdef CONFIG_ZONE_DMA32
316
317
318
319
320
321 ZONE_DMA32,
322#endif
323
324
325
326
327
328 ZONE_NORMAL,
329#ifdef CONFIG_HIGHMEM
330
331
332
333
334
335
336
337
338 ZONE_HIGHMEM,
339#endif
340 ZONE_MOVABLE,
341#ifdef CONFIG_ZONE_DEVICE
342 ZONE_DEVICE,
343#endif
344 __MAX_NR_ZONES
345
346};
347
348#ifndef __GENERATING_BOUNDS_H
349
350struct zone {
351
352
353
354 unsigned long watermark[NR_WMARK];
355
356 unsigned long nr_reserved_highatomic;
357
358
359
360
361
362
363
364
365
366
367 long lowmem_reserve[MAX_NR_ZONES];
368
369#ifdef CONFIG_NUMA
370 int node;
371#endif
372 struct pglist_data *zone_pgdat;
373 struct per_cpu_pageset __percpu *pageset;
374
375#ifndef CONFIG_SPARSEMEM
376
377
378
379
380 unsigned long *pageblock_flags;
381#endif
382
383
384 unsigned long zone_start_pfn;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427 unsigned long managed_pages;
428 unsigned long spanned_pages;
429 unsigned long present_pages;
430
431 const char *name;
432
433#ifdef CONFIG_MEMORY_ISOLATION
434
435
436
437
438
439 unsigned long nr_isolate_pageblock;
440#endif
441
442#ifdef CONFIG_MEMORY_HOTPLUG
443
444 seqlock_t span_seqlock;
445#endif
446
447 int initialized;
448
449
450 ZONE_PADDING(_pad1_)
451
452
453 struct free_area free_area[MAX_ORDER];
454
455
456 unsigned long flags;
457
458
459 spinlock_t lock;
460
461
462 ZONE_PADDING(_pad2_)
463
464
465
466
467
468
469 unsigned long percpu_drift_mark;
470
471#if defined CONFIG_COMPACTION || defined CONFIG_CMA
472
473 unsigned long compact_cached_free_pfn;
474
475 unsigned long compact_cached_migrate_pfn[2];
476#endif
477
478#ifdef CONFIG_COMPACTION
479
480
481
482
483
484 unsigned int compact_considered;
485 unsigned int compact_defer_shift;
486 int compact_order_failed;
487#endif
488
489#if defined CONFIG_COMPACTION || defined CONFIG_CMA
490
491 bool compact_blockskip_flush;
492#endif
493
494 bool contiguous;
495
496 ZONE_PADDING(_pad3_)
497
498 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
499} ____cacheline_internodealigned_in_smp;
500
501enum pgdat_flags {
502 PGDAT_CONGESTED,
503
504
505 PGDAT_DIRTY,
506
507
508
509 PGDAT_WRITEBACK,
510
511
512 PGDAT_RECLAIM_LOCKED,
513};
514
515static inline unsigned long zone_end_pfn(const struct zone *zone)
516{
517 return zone->zone_start_pfn + zone->spanned_pages;
518}
519
520static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
521{
522 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
523}
524
525static inline bool zone_is_initialized(struct zone *zone)
526{
527 return zone->initialized;
528}
529
530static inline bool zone_is_empty(struct zone *zone)
531{
532 return zone->spanned_pages == 0;
533}
534
535
536
537
538
539static inline bool zone_intersects(struct zone *zone,
540 unsigned long start_pfn, unsigned long nr_pages)
541{
542 if (zone_is_empty(zone))
543 return false;
544 if (start_pfn >= zone_end_pfn(zone) ||
545 start_pfn + nr_pages <= zone->zone_start_pfn)
546 return false;
547
548 return true;
549}
550
551
552
553
554
555
556#define DEF_PRIORITY 12
557
558
559#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
560
561enum {
562 ZONELIST_FALLBACK,
563#ifdef CONFIG_NUMA
564
565
566
567
568 ZONELIST_NOFALLBACK,
569#endif
570 MAX_ZONELISTS
571};
572
573
574
575
576
577struct zoneref {
578 struct zone *zone;
579 int zone_idx;
580};
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596struct zonelist {
597 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
598};
599
600#ifndef CONFIG_DISCONTIGMEM
601
602extern struct page *mem_map;
603#endif
604
605
606
607
608
609
610
611
612
613struct bootmem_data;
614typedef struct pglist_data {
615 struct zone node_zones[MAX_NR_ZONES];
616 struct zonelist node_zonelists[MAX_ZONELISTS];
617 int nr_zones;
618#ifdef CONFIG_FLAT_NODE_MEM_MAP
619 struct page *node_mem_map;
620#ifdef CONFIG_PAGE_EXTENSION
621 struct page_ext *node_page_ext;
622#endif
623#endif
624#ifndef CONFIG_NO_BOOTMEM
625 struct bootmem_data *bdata;
626#endif
627#ifdef CONFIG_MEMORY_HOTPLUG
628
629
630
631
632
633
634
635
636
637
638 spinlock_t node_size_lock;
639#endif
640 unsigned long node_start_pfn;
641 unsigned long node_present_pages;
642 unsigned long node_spanned_pages;
643
644 int node_id;
645 wait_queue_head_t kswapd_wait;
646 wait_queue_head_t pfmemalloc_wait;
647 struct task_struct *kswapd;
648
649 int kswapd_order;
650 enum zone_type kswapd_classzone_idx;
651
652 int kswapd_failures;
653
654#ifdef CONFIG_COMPACTION
655 int kcompactd_max_order;
656 enum zone_type kcompactd_classzone_idx;
657 wait_queue_head_t kcompactd_wait;
658 struct task_struct *kcompactd;
659#endif
660#ifdef CONFIG_NUMA_BALANCING
661
662 spinlock_t numabalancing_migrate_lock;
663
664
665 unsigned long numabalancing_migrate_next_window;
666
667
668 unsigned long numabalancing_migrate_nr_pages;
669#endif
670
671
672
673
674 unsigned long totalreserve_pages;
675
676#ifdef CONFIG_NUMA
677
678
679
680 unsigned long min_unmapped_pages;
681 unsigned long min_slab_pages;
682#endif
683
684
685 ZONE_PADDING(_pad1_)
686 spinlock_t lru_lock;
687
688#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
689
690
691
692
693 unsigned long first_deferred_pfn;
694 unsigned long static_init_size;
695#endif
696
697#ifdef CONFIG_TRANSPARENT_HUGEPAGE
698 spinlock_t split_queue_lock;
699 struct list_head split_queue;
700 unsigned long split_queue_len;
701#endif
702
703
704 struct lruvec lruvec;
705
706
707
708
709
710 unsigned int inactive_ratio;
711
712 unsigned long flags;
713
714 ZONE_PADDING(_pad2_)
715
716
717 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
718 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
719} pg_data_t;
720
721#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
722#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
723#ifdef CONFIG_FLAT_NODE_MEM_MAP
724#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
725#else
726#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
727#endif
728#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
729
730#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
731#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
732static inline spinlock_t *zone_lru_lock(struct zone *zone)
733{
734 return &zone->zone_pgdat->lru_lock;
735}
736
737static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
738{
739 return &pgdat->lruvec;
740}
741
742static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
743{
744 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
745}
746
747static inline bool pgdat_is_empty(pg_data_t *pgdat)
748{
749 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
750}
751
752static inline int zone_id(const struct zone *zone)
753{
754 struct pglist_data *pgdat = zone->zone_pgdat;
755
756 return zone - pgdat->node_zones;
757}
758
759#ifdef CONFIG_ZONE_DEVICE
760static inline bool is_dev_zone(const struct zone *zone)
761{
762 return zone_id(zone) == ZONE_DEVICE;
763}
764#else
765static inline bool is_dev_zone(const struct zone *zone)
766{
767 return false;
768}
769#endif
770
771#include <linux/memory_hotplug.h>
772
773extern struct mutex zonelists_mutex;
774void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
775void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
776bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
777 int classzone_idx, unsigned int alloc_flags,
778 long free_pages);
779bool zone_watermark_ok(struct zone *z, unsigned int order,
780 unsigned long mark, int classzone_idx,
781 unsigned int alloc_flags);
782bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
783 unsigned long mark, int classzone_idx);
784enum memmap_context {
785 MEMMAP_EARLY,
786 MEMMAP_HOTPLUG,
787};
788extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
789 unsigned long size);
790
791extern void lruvec_init(struct lruvec *lruvec);
792
793static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
794{
795#ifdef CONFIG_MEMCG
796 return lruvec->pgdat;
797#else
798 return container_of(lruvec, struct pglist_data, lruvec);
799#endif
800}
801
802extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
803
804#ifdef CONFIG_HAVE_MEMORY_PRESENT
805void memory_present(int nid, unsigned long start, unsigned long end);
806#else
807static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
808#endif
809
810#ifdef CONFIG_HAVE_MEMORYLESS_NODES
811int local_memory_node(int node_id);
812#else
813static inline int local_memory_node(int node_id) { return node_id; };
814#endif
815
816#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
817unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
818#endif
819
820
821
822
823#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
824
825
826
827
828
829
830
831static inline bool managed_zone(struct zone *zone)
832{
833 return zone->managed_pages;
834}
835
836
837static inline bool populated_zone(struct zone *zone)
838{
839 return zone->present_pages;
840}
841
842extern int movable_zone;
843
844#ifdef CONFIG_HIGHMEM
845static inline int zone_movable_is_highmem(void)
846{
847#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
848 return movable_zone == ZONE_HIGHMEM;
849#else
850 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
851#endif
852}
853#endif
854
855static inline int is_highmem_idx(enum zone_type idx)
856{
857#ifdef CONFIG_HIGHMEM
858 return (idx == ZONE_HIGHMEM ||
859 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
860#else
861 return 0;
862#endif
863}
864
865
866
867
868
869
870
871static inline int is_highmem(struct zone *zone)
872{
873#ifdef CONFIG_HIGHMEM
874 return is_highmem_idx(zone_idx(zone));
875#else
876 return 0;
877#endif
878}
879
880
881struct ctl_table;
882int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
883 void __user *, size_t *, loff_t *);
884int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
885 void __user *, size_t *, loff_t *);
886extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
887int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
888 void __user *, size_t *, loff_t *);
889int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
890 void __user *, size_t *, loff_t *);
891int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
892 void __user *, size_t *, loff_t *);
893int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
894 void __user *, size_t *, loff_t *);
895
896extern int numa_zonelist_order_handler(struct ctl_table *, int,
897 void __user *, size_t *, loff_t *);
898extern char numa_zonelist_order[];
899#define NUMA_ZONELIST_ORDER_LEN 16
900
901#ifndef CONFIG_NEED_MULTIPLE_NODES
902
903extern struct pglist_data contig_page_data;
904#define NODE_DATA(nid) (&contig_page_data)
905#define NODE_MEM_MAP(nid) mem_map
906
907#else
908
909#include <asm/mmzone.h>
910
911#endif
912
913extern struct pglist_data *first_online_pgdat(void);
914extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
915extern struct zone *next_zone(struct zone *zone);
916
917
918
919
920
921#define for_each_online_pgdat(pgdat) \
922 for (pgdat = first_online_pgdat(); \
923 pgdat; \
924 pgdat = next_online_pgdat(pgdat))
925
926
927
928
929
930
931
932#define for_each_zone(zone) \
933 for (zone = (first_online_pgdat())->node_zones; \
934 zone; \
935 zone = next_zone(zone))
936
937#define for_each_populated_zone(zone) \
938 for (zone = (first_online_pgdat())->node_zones; \
939 zone; \
940 zone = next_zone(zone)) \
941 if (!populated_zone(zone)) \
942 ; \
943 else
944
945static inline struct zone *zonelist_zone(struct zoneref *zoneref)
946{
947 return zoneref->zone;
948}
949
950static inline int zonelist_zone_idx(struct zoneref *zoneref)
951{
952 return zoneref->zone_idx;
953}
954
955static inline int zonelist_node_idx(struct zoneref *zoneref)
956{
957#ifdef CONFIG_NUMA
958
959 return zoneref->zone->node;
960#else
961 return 0;
962#endif
963}
964
965struct zoneref *__next_zones_zonelist(struct zoneref *z,
966 enum zone_type highest_zoneidx,
967 nodemask_t *nodes);
968
969
970
971
972
973
974
975
976
977
978
979
980
981static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
982 enum zone_type highest_zoneidx,
983 nodemask_t *nodes)
984{
985 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
986 return z;
987 return __next_zones_zonelist(z, highest_zoneidx, nodes);
988}
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1007 enum zone_type highest_zoneidx,
1008 nodemask_t *nodes)
1009{
1010 return next_zones_zonelist(zonelist->_zonerefs,
1011 highest_zoneidx, nodes);
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1026 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1027 zone; \
1028 z = next_zones_zonelist(++z, highidx, nodemask), \
1029 zone = zonelist_zone(z))
1030
1031#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1032 for (zone = z->zone; \
1033 zone; \
1034 z = next_zones_zonelist(++z, highidx, nodemask), \
1035 zone = zonelist_zone(z))
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1048 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1049
1050#ifdef CONFIG_SPARSEMEM
1051#include <asm/sparsemem.h>
1052#endif
1053
1054#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1055 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1056static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1057{
1058 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1059 return 0;
1060}
1061#endif
1062
1063#ifdef CONFIG_FLATMEM
1064#define pfn_to_nid(pfn) (0)
1065#endif
1066
1067#ifdef CONFIG_SPARSEMEM
1068
1069
1070
1071
1072
1073
1074
1075#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1076#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1077
1078#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1079
1080#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1081#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1082
1083#define SECTION_BLOCKFLAGS_BITS \
1084 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1085
1086#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1087#error Allocator MAX_ORDER exceeds SECTION_SIZE
1088#endif
1089
1090#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
1091#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
1092
1093#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1094#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1095
1096struct page;
1097struct page_ext;
1098struct mem_section {
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 unsigned long section_mem_map;
1112
1113
1114 unsigned long *pageblock_flags;
1115#ifdef CONFIG_PAGE_EXTENSION
1116
1117
1118
1119
1120 struct page_ext *page_ext;
1121 unsigned long pad;
1122#endif
1123
1124
1125
1126
1127};
1128
1129#ifdef CONFIG_SPARSEMEM_EXTREME
1130#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1131#else
1132#define SECTIONS_PER_ROOT 1
1133#endif
1134
1135#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1136#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1137#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1138
1139#ifdef CONFIG_SPARSEMEM_EXTREME
1140extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1141#else
1142extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1143#endif
1144
1145static inline struct mem_section *__nr_to_section(unsigned long nr)
1146{
1147 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1148 return NULL;
1149 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1150}
1151extern int __section_nr(struct mem_section* ms);
1152extern unsigned long usemap_size(void);
1153
1154
1155
1156
1157
1158
1159#define SECTION_MARKED_PRESENT (1UL<<0)
1160#define SECTION_HAS_MEM_MAP (1UL<<1)
1161#define SECTION_IS_ONLINE (1UL<<2)
1162#define SECTION_MAP_LAST_BIT (1UL<<3)
1163#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1164#define SECTION_NID_SHIFT 3
1165
1166static inline struct page *__section_mem_map_addr(struct mem_section *section)
1167{
1168 unsigned long map = section->section_mem_map;
1169 map &= SECTION_MAP_MASK;
1170 return (struct page *)map;
1171}
1172
1173static inline int present_section(struct mem_section *section)
1174{
1175 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1176}
1177
1178static inline int present_section_nr(unsigned long nr)
1179{
1180 return present_section(__nr_to_section(nr));
1181}
1182
1183static inline int valid_section(struct mem_section *section)
1184{
1185 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1186}
1187
1188static inline int valid_section_nr(unsigned long nr)
1189{
1190 return valid_section(__nr_to_section(nr));
1191}
1192
1193static inline int online_section(struct mem_section *section)
1194{
1195 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1196}
1197
1198static inline int online_section_nr(unsigned long nr)
1199{
1200 return online_section(__nr_to_section(nr));
1201}
1202
1203#ifdef CONFIG_MEMORY_HOTPLUG
1204void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1205#ifdef CONFIG_MEMORY_HOTREMOVE
1206void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1207#endif
1208#endif
1209
1210static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1211{
1212 return __nr_to_section(pfn_to_section_nr(pfn));
1213}
1214
1215extern int __highest_present_section_nr;
1216
1217#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1218static inline int pfn_valid(unsigned long pfn)
1219{
1220 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1221 return 0;
1222 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1223}
1224#endif
1225
1226static inline int pfn_present(unsigned long pfn)
1227{
1228 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1229 return 0;
1230 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1231}
1232
1233
1234
1235
1236
1237
1238#ifdef CONFIG_NUMA
1239#define pfn_to_nid(pfn) \
1240({ \
1241 unsigned long __pfn_to_nid_pfn = (pfn); \
1242 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1243})
1244#else
1245#define pfn_to_nid(pfn) (0)
1246#endif
1247
1248#define early_pfn_valid(pfn) pfn_valid(pfn)
1249void sparse_init(void);
1250#else
1251#define sparse_init() do {} while (0)
1252#define sparse_index_init(_sec, _nid) do {} while (0)
1253#endif
1254
1255
1256
1257
1258
1259
1260struct mminit_pfnnid_cache {
1261 unsigned long last_start;
1262 unsigned long last_end;
1263 int last_nid;
1264};
1265
1266#ifndef early_pfn_valid
1267#define early_pfn_valid(pfn) (1)
1268#endif
1269
1270void memory_present(int nid, unsigned long start, unsigned long end);
1271unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1272
1273
1274
1275
1276
1277
1278
1279#ifdef CONFIG_HOLES_IN_ZONE
1280#define pfn_valid_within(pfn) pfn_valid(pfn)
1281#else
1282#define pfn_valid_within(pfn) (1)
1283#endif
1284
1285#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306bool memmap_valid_within(unsigned long pfn,
1307 struct page *page, struct zone *zone);
1308#else
1309static inline bool memmap_valid_within(unsigned long pfn,
1310 struct page *page, struct zone *zone)
1311{
1312 return true;
1313}
1314#endif
1315
1316#endif
1317#endif
1318#endif
1319