1
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include <linux/bitops.h>
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
16#include <linux/seqlock.h>
17#include <linux/nodemask.h>
18#include <linux/pageblock-flags.h>
19#include <linux/page-flags-layout.h>
20#include <linux/atomic.h>
21#include <asm/page.h>
22
23
24#ifndef CONFIG_FORCE_MAX_ZONEORDER
25#define MAX_ORDER 11
26#else
27#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
28#endif
29#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
30
31
32
33
34
35
36
37#define PAGE_ALLOC_COSTLY_ORDER 3
38
39enum migratetype {
40 MIGRATE_UNMOVABLE,
41 MIGRATE_MOVABLE,
42 MIGRATE_RECLAIMABLE,
43 MIGRATE_PCPTYPES,
44 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
45#ifdef CONFIG_CMA
46
47
48
49
50
51
52
53
54
55
56
57
58
59 MIGRATE_CMA,
60#endif
61#ifdef CONFIG_MEMORY_ISOLATION
62 MIGRATE_ISOLATE,
63#endif
64 MIGRATE_TYPES
65};
66
67
68extern char * const migratetype_names[MIGRATE_TYPES];
69
70#ifdef CONFIG_CMA
71# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
72# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
73#else
74# define is_migrate_cma(migratetype) false
75# define is_migrate_cma_page(_page) false
76#endif
77
78static inline bool is_migrate_movable(int mt)
79{
80 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
81}
82
83#define for_each_migratetype_order(order, type) \
84 for (order = 0; order < MAX_ORDER; order++) \
85 for (type = 0; type < MIGRATE_TYPES; type++)
86
87extern int page_group_by_mobility_disabled;
88
89#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
90#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
91
92#define get_pageblock_migratetype(page) \
93 get_pfnblock_flags_mask(page, page_to_pfn(page), \
94 PB_migrate_end, MIGRATETYPE_MASK)
95
96struct free_area {
97 struct list_head free_list[MIGRATE_TYPES];
98 unsigned long nr_free;
99};
100
101struct pglist_data;
102
103
104
105
106
107
108
109#if defined(CONFIG_SMP)
110struct zone_padding {
111 char x[0];
112} ____cacheline_internodealigned_in_smp;
113#define ZONE_PADDING(name) struct zone_padding name;
114#else
115#define ZONE_PADDING(name)
116#endif
117
118#ifdef CONFIG_NUMA
119enum numa_stat_item {
120 NUMA_HIT,
121 NUMA_MISS,
122 NUMA_FOREIGN,
123 NUMA_INTERLEAVE_HIT,
124 NUMA_LOCAL,
125 NUMA_OTHER,
126 NR_VM_NUMA_STAT_ITEMS
127};
128#else
129#define NR_VM_NUMA_STAT_ITEMS 0
130#endif
131
132enum zone_stat_item {
133
134 NR_FREE_PAGES,
135 NR_ZONE_LRU_BASE,
136 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
137 NR_ZONE_ACTIVE_ANON,
138 NR_ZONE_INACTIVE_FILE,
139 NR_ZONE_ACTIVE_FILE,
140 NR_ZONE_UNEVICTABLE,
141 NR_ZONE_WRITE_PENDING,
142 NR_MLOCK,
143 NR_PAGETABLE,
144 NR_KERNEL_STACK_KB,
145
146 NR_BOUNCE,
147#if IS_ENABLED(CONFIG_ZSMALLOC)
148 NR_ZSPAGES,
149#endif
150 NR_FREE_CMA_PAGES,
151 NR_VM_ZONE_STAT_ITEMS };
152
153enum node_stat_item {
154 NR_LRU_BASE,
155 NR_INACTIVE_ANON = NR_LRU_BASE,
156 NR_ACTIVE_ANON,
157 NR_INACTIVE_FILE,
158 NR_ACTIVE_FILE,
159 NR_UNEVICTABLE,
160 NR_SLAB_RECLAIMABLE,
161 NR_SLAB_UNRECLAIMABLE,
162 NR_ISOLATED_ANON,
163 NR_ISOLATED_FILE,
164 WORKINGSET_REFAULT,
165 WORKINGSET_ACTIVATE,
166 WORKINGSET_NODERECLAIM,
167 NR_ANON_MAPPED,
168 NR_FILE_MAPPED,
169
170 NR_FILE_PAGES,
171 NR_FILE_DIRTY,
172 NR_WRITEBACK,
173 NR_WRITEBACK_TEMP,
174 NR_SHMEM,
175 NR_SHMEM_THPS,
176 NR_SHMEM_PMDMAPPED,
177 NR_ANON_THPS,
178 NR_UNSTABLE_NFS,
179 NR_VMSCAN_WRITE,
180 NR_VMSCAN_IMMEDIATE,
181 NR_DIRTIED,
182 NR_WRITTEN,
183 NR_INDIRECTLY_RECLAIMABLE_BYTES,
184 NR_VM_NODE_STAT_ITEMS
185};
186
187
188
189
190
191
192
193
194
195
196#define LRU_BASE 0
197#define LRU_ACTIVE 1
198#define LRU_FILE 2
199
200enum lru_list {
201 LRU_INACTIVE_ANON = LRU_BASE,
202 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
203 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
204 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
205 LRU_UNEVICTABLE,
206 NR_LRU_LISTS
207};
208
209#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
210
211#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
212
213static inline int is_file_lru(enum lru_list lru)
214{
215 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
216}
217
218static inline int is_active_lru(enum lru_list lru)
219{
220 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
221}
222
223struct zone_reclaim_stat {
224
225
226
227
228
229
230
231
232 unsigned long recent_rotated[2];
233 unsigned long recent_scanned[2];
234};
235
236struct lruvec {
237 struct list_head lists[NR_LRU_LISTS];
238 struct zone_reclaim_stat reclaim_stat;
239
240 atomic_long_t inactive_age;
241
242 unsigned long refaults;
243#ifdef CONFIG_MEMCG
244 struct pglist_data *pgdat;
245#endif
246};
247
248
249#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
250#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
251#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
252
253
254#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
255
256#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
257
258#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
259
260
261typedef unsigned __bitwise isolate_mode_t;
262
263enum zone_watermarks {
264 WMARK_MIN,
265 WMARK_LOW,
266 WMARK_HIGH,
267 NR_WMARK
268};
269
270#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
271#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
272#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
273
274struct per_cpu_pages {
275 int count;
276 int high;
277 int batch;
278
279
280 struct list_head lists[MIGRATE_PCPTYPES];
281};
282
283struct per_cpu_pageset {
284 struct per_cpu_pages pcp;
285#ifdef CONFIG_NUMA
286 s8 expire;
287 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
288#endif
289#ifdef CONFIG_SMP
290 s8 stat_threshold;
291 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
292#endif
293};
294
295struct per_cpu_nodestat {
296 s8 stat_threshold;
297 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
298};
299
300#endif
301
302enum zone_type {
303#ifdef CONFIG_ZONE_DMA
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322 ZONE_DMA,
323#endif
324#ifdef CONFIG_ZONE_DMA32
325
326
327
328
329
330 ZONE_DMA32,
331#endif
332
333
334
335
336
337 ZONE_NORMAL,
338#ifdef CONFIG_HIGHMEM
339
340
341
342
343
344
345
346
347 ZONE_HIGHMEM,
348#endif
349 ZONE_MOVABLE,
350#ifdef CONFIG_ZONE_DEVICE
351 ZONE_DEVICE,
352#endif
353 __MAX_NR_ZONES
354
355};
356
357#ifndef __GENERATING_BOUNDS_H
358
359struct zone {
360
361
362
363 unsigned long watermark[NR_WMARK];
364
365 unsigned long nr_reserved_highatomic;
366
367
368
369
370
371
372
373
374
375
376 long lowmem_reserve[MAX_NR_ZONES];
377
378#ifdef CONFIG_NUMA
379 int node;
380#endif
381 struct pglist_data *zone_pgdat;
382 struct per_cpu_pageset __percpu *pageset;
383
384#ifndef CONFIG_SPARSEMEM
385
386
387
388
389 unsigned long *pageblock_flags;
390#endif
391
392
393 unsigned long zone_start_pfn;
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436 unsigned long managed_pages;
437 unsigned long spanned_pages;
438 unsigned long present_pages;
439
440 const char *name;
441
442#ifdef CONFIG_MEMORY_ISOLATION
443
444
445
446
447
448 unsigned long nr_isolate_pageblock;
449#endif
450
451#ifdef CONFIG_MEMORY_HOTPLUG
452
453 seqlock_t span_seqlock;
454#endif
455
456 int initialized;
457
458
459 ZONE_PADDING(_pad1_)
460
461
462 struct free_area free_area[MAX_ORDER];
463
464
465 unsigned long flags;
466
467
468 spinlock_t lock;
469
470
471 ZONE_PADDING(_pad2_)
472
473
474
475
476
477
478 unsigned long percpu_drift_mark;
479
480#if defined CONFIG_COMPACTION || defined CONFIG_CMA
481
482 unsigned long compact_cached_free_pfn;
483
484 unsigned long compact_cached_migrate_pfn[2];
485#endif
486
487#ifdef CONFIG_COMPACTION
488
489
490
491
492
493 unsigned int compact_considered;
494 unsigned int compact_defer_shift;
495 int compact_order_failed;
496#endif
497
498#if defined CONFIG_COMPACTION || defined CONFIG_CMA
499
500 bool compact_blockskip_flush;
501#endif
502
503 bool contiguous;
504
505 ZONE_PADDING(_pad3_)
506
507 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
508 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
509} ____cacheline_internodealigned_in_smp;
510
511enum pgdat_flags {
512 PGDAT_CONGESTED,
513
514
515 PGDAT_DIRTY,
516
517
518
519 PGDAT_WRITEBACK,
520
521
522 PGDAT_RECLAIM_LOCKED,
523};
524
525static inline unsigned long zone_end_pfn(const struct zone *zone)
526{
527 return zone->zone_start_pfn + zone->spanned_pages;
528}
529
530static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
531{
532 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
533}
534
535static inline bool zone_is_initialized(struct zone *zone)
536{
537 return zone->initialized;
538}
539
540static inline bool zone_is_empty(struct zone *zone)
541{
542 return zone->spanned_pages == 0;
543}
544
545
546
547
548
549static inline bool zone_intersects(struct zone *zone,
550 unsigned long start_pfn, unsigned long nr_pages)
551{
552 if (zone_is_empty(zone))
553 return false;
554 if (start_pfn >= zone_end_pfn(zone) ||
555 start_pfn + nr_pages <= zone->zone_start_pfn)
556 return false;
557
558 return true;
559}
560
561
562
563
564
565
566#define DEF_PRIORITY 12
567
568
569#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
570
571enum {
572 ZONELIST_FALLBACK,
573#ifdef CONFIG_NUMA
574
575
576
577
578 ZONELIST_NOFALLBACK,
579#endif
580 MAX_ZONELISTS
581};
582
583
584
585
586
587struct zoneref {
588 struct zone *zone;
589 int zone_idx;
590};
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606struct zonelist {
607 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
608};
609
610#ifndef CONFIG_DISCONTIGMEM
611
612extern struct page *mem_map;
613#endif
614
615
616
617
618
619
620
621
622
623struct bootmem_data;
624typedef struct pglist_data {
625 struct zone node_zones[MAX_NR_ZONES];
626 struct zonelist node_zonelists[MAX_ZONELISTS];
627 int nr_zones;
628#ifdef CONFIG_FLAT_NODE_MEM_MAP
629 struct page *node_mem_map;
630#ifdef CONFIG_PAGE_EXTENSION
631 struct page_ext *node_page_ext;
632#endif
633#endif
634#ifndef CONFIG_NO_BOOTMEM
635 struct bootmem_data *bdata;
636#endif
637#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
638
639
640
641
642
643
644
645
646
647
648
649 spinlock_t node_size_lock;
650#endif
651 unsigned long node_start_pfn;
652 unsigned long node_present_pages;
653 unsigned long node_spanned_pages;
654
655 int node_id;
656 wait_queue_head_t kswapd_wait;
657 wait_queue_head_t pfmemalloc_wait;
658 struct task_struct *kswapd;
659
660 int kswapd_order;
661 enum zone_type kswapd_classzone_idx;
662
663 int kswapd_failures;
664
665#ifdef CONFIG_COMPACTION
666 int kcompactd_max_order;
667 enum zone_type kcompactd_classzone_idx;
668 wait_queue_head_t kcompactd_wait;
669 struct task_struct *kcompactd;
670#endif
671
672
673
674
675 unsigned long totalreserve_pages;
676
677#ifdef CONFIG_NUMA
678
679
680
681 unsigned long min_unmapped_pages;
682 unsigned long min_slab_pages;
683#endif
684
685
686 ZONE_PADDING(_pad1_)
687 spinlock_t lru_lock;
688
689#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
690
691
692
693
694 unsigned long first_deferred_pfn;
695
696 unsigned long static_init_pgcnt;
697#endif
698
699#ifdef CONFIG_TRANSPARENT_HUGEPAGE
700 spinlock_t split_queue_lock;
701 struct list_head split_queue;
702 unsigned long split_queue_len;
703#endif
704
705
706 struct lruvec lruvec;
707
708 unsigned long flags;
709
710 ZONE_PADDING(_pad2_)
711
712
713 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
714 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
715} pg_data_t;
716
717#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
718#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
719#ifdef CONFIG_FLAT_NODE_MEM_MAP
720#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
721#else
722#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
723#endif
724#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
725
726#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
727#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
728static inline spinlock_t *zone_lru_lock(struct zone *zone)
729{
730 return &zone->zone_pgdat->lru_lock;
731}
732
733static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
734{
735 return &pgdat->lruvec;
736}
737
738static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
739{
740 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
741}
742
743static inline bool pgdat_is_empty(pg_data_t *pgdat)
744{
745 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
746}
747
748#include <linux/memory_hotplug.h>
749
750void build_all_zonelists(pg_data_t *pgdat);
751void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
752 enum zone_type classzone_idx);
753bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
754 int classzone_idx, unsigned int alloc_flags,
755 long free_pages);
756bool zone_watermark_ok(struct zone *z, unsigned int order,
757 unsigned long mark, int classzone_idx,
758 unsigned int alloc_flags);
759bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
760 unsigned long mark, int classzone_idx);
761enum memmap_context {
762 MEMMAP_EARLY,
763 MEMMAP_HOTPLUG,
764};
765extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
766 unsigned long size);
767
768extern void lruvec_init(struct lruvec *lruvec);
769
770static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
771{
772#ifdef CONFIG_MEMCG
773 return lruvec->pgdat;
774#else
775 return container_of(lruvec, struct pglist_data, lruvec);
776#endif
777}
778
779extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
780
781#ifdef CONFIG_HAVE_MEMORY_PRESENT
782void memory_present(int nid, unsigned long start, unsigned long end);
783#else
784static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
785#endif
786
787#ifdef CONFIG_HAVE_MEMORYLESS_NODES
788int local_memory_node(int node_id);
789#else
790static inline int local_memory_node(int node_id) { return node_id; };
791#endif
792
793
794
795
796#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
797
798#ifdef CONFIG_ZONE_DEVICE
799static inline bool is_dev_zone(const struct zone *zone)
800{
801 return zone_idx(zone) == ZONE_DEVICE;
802}
803#else
804static inline bool is_dev_zone(const struct zone *zone)
805{
806 return false;
807}
808#endif
809
810
811
812
813
814
815
816static inline bool managed_zone(struct zone *zone)
817{
818 return zone->managed_pages;
819}
820
821
822static inline bool populated_zone(struct zone *zone)
823{
824 return zone->present_pages;
825}
826
827#ifdef CONFIG_NUMA
828static inline int zone_to_nid(struct zone *zone)
829{
830 return zone->node;
831}
832
833static inline void zone_set_nid(struct zone *zone, int nid)
834{
835 zone->node = nid;
836}
837#else
838static inline int zone_to_nid(struct zone *zone)
839{
840 return 0;
841}
842
843static inline void zone_set_nid(struct zone *zone, int nid) {}
844#endif
845
846extern int movable_zone;
847
848#ifdef CONFIG_HIGHMEM
849static inline int zone_movable_is_highmem(void)
850{
851#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
852 return movable_zone == ZONE_HIGHMEM;
853#else
854 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
855#endif
856}
857#endif
858
859static inline int is_highmem_idx(enum zone_type idx)
860{
861#ifdef CONFIG_HIGHMEM
862 return (idx == ZONE_HIGHMEM ||
863 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
864#else
865 return 0;
866#endif
867}
868
869
870
871
872
873
874
875static inline int is_highmem(struct zone *zone)
876{
877#ifdef CONFIG_HIGHMEM
878 return is_highmem_idx(zone_idx(zone));
879#else
880 return 0;
881#endif
882}
883
884
885struct ctl_table;
886int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
887 void __user *, size_t *, loff_t *);
888int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
889 void __user *, size_t *, loff_t *);
890extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
891int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
892 void __user *, size_t *, loff_t *);
893int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
894 void __user *, size_t *, loff_t *);
895int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
896 void __user *, size_t *, loff_t *);
897int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
898 void __user *, size_t *, loff_t *);
899
900extern int numa_zonelist_order_handler(struct ctl_table *, int,
901 void __user *, size_t *, loff_t *);
902extern char numa_zonelist_order[];
903#define NUMA_ZONELIST_ORDER_LEN 16
904
905#ifndef CONFIG_NEED_MULTIPLE_NODES
906
907extern struct pglist_data contig_page_data;
908#define NODE_DATA(nid) (&contig_page_data)
909#define NODE_MEM_MAP(nid) mem_map
910
911#else
912
913#include <asm/mmzone.h>
914
915#endif
916
917extern struct pglist_data *first_online_pgdat(void);
918extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
919extern struct zone *next_zone(struct zone *zone);
920
921
922
923
924
925#define for_each_online_pgdat(pgdat) \
926 for (pgdat = first_online_pgdat(); \
927 pgdat; \
928 pgdat = next_online_pgdat(pgdat))
929
930
931
932
933
934
935
936#define for_each_zone(zone) \
937 for (zone = (first_online_pgdat())->node_zones; \
938 zone; \
939 zone = next_zone(zone))
940
941#define for_each_populated_zone(zone) \
942 for (zone = (first_online_pgdat())->node_zones; \
943 zone; \
944 zone = next_zone(zone)) \
945 if (!populated_zone(zone)) \
946 ; \
947 else
948
949static inline struct zone *zonelist_zone(struct zoneref *zoneref)
950{
951 return zoneref->zone;
952}
953
954static inline int zonelist_zone_idx(struct zoneref *zoneref)
955{
956 return zoneref->zone_idx;
957}
958
959static inline int zonelist_node_idx(struct zoneref *zoneref)
960{
961 return zone_to_nid(zoneref->zone);
962}
963
964struct zoneref *__next_zones_zonelist(struct zoneref *z,
965 enum zone_type highest_zoneidx,
966 nodemask_t *nodes);
967
968
969
970
971
972
973
974
975
976
977
978
979
980static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
981 enum zone_type highest_zoneidx,
982 nodemask_t *nodes)
983{
984 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
985 return z;
986 return __next_zones_zonelist(z, highest_zoneidx, nodes);
987}
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1006 enum zone_type highest_zoneidx,
1007 nodemask_t *nodes)
1008{
1009 return next_zones_zonelist(zonelist->_zonerefs,
1010 highest_zoneidx, nodes);
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1025 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1026 zone; \
1027 z = next_zones_zonelist(++z, highidx, nodemask), \
1028 zone = zonelist_zone(z))
1029
1030#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1031 for (zone = z->zone; \
1032 zone; \
1033 z = next_zones_zonelist(++z, highidx, nodemask), \
1034 zone = zonelist_zone(z))
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1047 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1048
1049#ifdef CONFIG_SPARSEMEM
1050#include <asm/sparsemem.h>
1051#endif
1052
1053#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1054 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1055static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1056{
1057 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1058 return 0;
1059}
1060#endif
1061
1062#ifdef CONFIG_FLATMEM
1063#define pfn_to_nid(pfn) (0)
1064#endif
1065
1066#ifdef CONFIG_SPARSEMEM
1067
1068
1069
1070
1071
1072
1073
1074#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1075#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1076
1077#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1078
1079#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1080#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1081
1082#define SECTION_BLOCKFLAGS_BITS \
1083 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1084
1085#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1086#error Allocator MAX_ORDER exceeds SECTION_SIZE
1087#endif
1088
1089static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1090{
1091 return pfn >> PFN_SECTION_SHIFT;
1092}
1093static inline unsigned long section_nr_to_pfn(unsigned long sec)
1094{
1095 return sec << PFN_SECTION_SHIFT;
1096}
1097
1098#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1099#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1100
1101struct page;
1102struct page_ext;
1103struct mem_section {
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 unsigned long section_mem_map;
1117
1118
1119 unsigned long *pageblock_flags;
1120#ifdef CONFIG_PAGE_EXTENSION
1121
1122
1123
1124
1125 struct page_ext *page_ext;
1126 unsigned long pad;
1127#endif
1128
1129
1130
1131
1132};
1133
1134#ifdef CONFIG_SPARSEMEM_EXTREME
1135#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1136#else
1137#define SECTIONS_PER_ROOT 1
1138#endif
1139
1140#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1141#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1142#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1143
1144#ifdef CONFIG_SPARSEMEM_EXTREME
1145extern struct mem_section **mem_section;
1146#else
1147extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1148#endif
1149
1150static inline struct mem_section *__nr_to_section(unsigned long nr)
1151{
1152#ifdef CONFIG_SPARSEMEM_EXTREME
1153 if (!mem_section)
1154 return NULL;
1155#endif
1156 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1157 return NULL;
1158 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1159}
1160extern int __section_nr(struct mem_section* ms);
1161extern unsigned long usemap_size(void);
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176#define SECTION_MARKED_PRESENT (1UL<<0)
1177#define SECTION_HAS_MEM_MAP (1UL<<1)
1178#define SECTION_IS_ONLINE (1UL<<2)
1179#define SECTION_MAP_LAST_BIT (1UL<<3)
1180#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1181#define SECTION_NID_SHIFT 3
1182
1183static inline struct page *__section_mem_map_addr(struct mem_section *section)
1184{
1185 unsigned long map = section->section_mem_map;
1186 map &= SECTION_MAP_MASK;
1187 return (struct page *)map;
1188}
1189
1190static inline int present_section(struct mem_section *section)
1191{
1192 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1193}
1194
1195static inline int present_section_nr(unsigned long nr)
1196{
1197 return present_section(__nr_to_section(nr));
1198}
1199
1200static inline int valid_section(struct mem_section *section)
1201{
1202 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1203}
1204
1205static inline int valid_section_nr(unsigned long nr)
1206{
1207 return valid_section(__nr_to_section(nr));
1208}
1209
1210static inline int online_section(struct mem_section *section)
1211{
1212 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1213}
1214
1215static inline int online_section_nr(unsigned long nr)
1216{
1217 return online_section(__nr_to_section(nr));
1218}
1219
1220#ifdef CONFIG_MEMORY_HOTPLUG
1221void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1222#ifdef CONFIG_MEMORY_HOTREMOVE
1223void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1224#endif
1225#endif
1226
1227static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1228{
1229 return __nr_to_section(pfn_to_section_nr(pfn));
1230}
1231
1232extern int __highest_present_section_nr;
1233
1234#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1235static inline int pfn_valid(unsigned long pfn)
1236{
1237 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1238 return 0;
1239 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1240}
1241#endif
1242
1243static inline int pfn_present(unsigned long pfn)
1244{
1245 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1246 return 0;
1247 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1248}
1249
1250
1251
1252
1253
1254
1255#ifdef CONFIG_NUMA
1256#define pfn_to_nid(pfn) \
1257({ \
1258 unsigned long __pfn_to_nid_pfn = (pfn); \
1259 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1260})
1261#else
1262#define pfn_to_nid(pfn) (0)
1263#endif
1264
1265#define early_pfn_valid(pfn) pfn_valid(pfn)
1266void sparse_init(void);
1267#else
1268#define sparse_init() do {} while (0)
1269#define sparse_index_init(_sec, _nid) do {} while (0)
1270#endif
1271
1272
1273
1274
1275
1276
1277struct mminit_pfnnid_cache {
1278 unsigned long last_start;
1279 unsigned long last_end;
1280 int last_nid;
1281};
1282
1283#ifndef early_pfn_valid
1284#define early_pfn_valid(pfn) (1)
1285#endif
1286
1287void memory_present(int nid, unsigned long start, unsigned long end);
1288
1289
1290
1291
1292
1293
1294
1295#ifdef CONFIG_HOLES_IN_ZONE
1296#define pfn_valid_within(pfn) pfn_valid(pfn)
1297#else
1298#define pfn_valid_within(pfn) (1)
1299#endif
1300
1301#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322bool memmap_valid_within(unsigned long pfn,
1323 struct page *page, struct zone *zone);
1324#else
1325static inline bool memmap_valid_within(unsigned long pfn,
1326 struct page *page, struct zone *zone)
1327{
1328 return true;
1329}
1330#endif
1331
1332#endif
1333#endif
1334#endif
1335