1
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include <linux/bitops.h>
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
16#include <linux/seqlock.h>
17#include <linux/nodemask.h>
18#include <linux/pageblock-flags.h>
19#include <linux/page-flags-layout.h>
20#include <linux/atomic.h>
21#include <asm/page.h>
22
23
24#ifndef CONFIG_FORCE_MAX_ZONEORDER
25#define MAX_ORDER 11
26#else
27#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
28#endif
29#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
30
31
32
33
34
35
36
37#define PAGE_ALLOC_COSTLY_ORDER 3
38
39enum migratetype {
40 MIGRATE_UNMOVABLE,
41 MIGRATE_MOVABLE,
42 MIGRATE_RECLAIMABLE,
43 MIGRATE_PCPTYPES,
44 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
45#ifdef CONFIG_CMA
46
47
48
49
50
51
52
53
54
55
56
57
58
59 MIGRATE_CMA,
60#endif
61#ifdef CONFIG_MEMORY_ISOLATION
62 MIGRATE_ISOLATE,
63#endif
64 MIGRATE_TYPES
65};
66
67
68extern char * const migratetype_names[MIGRATE_TYPES];
69
70#ifdef CONFIG_CMA
71# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
72# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
73#else
74# define is_migrate_cma(migratetype) false
75# define is_migrate_cma_page(_page) false
76#endif
77
78static inline bool is_migrate_movable(int mt)
79{
80 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
81}
82
83#define for_each_migratetype_order(order, type) \
84 for (order = 0; order < MAX_ORDER; order++) \
85 for (type = 0; type < MIGRATE_TYPES; type++)
86
87extern int page_group_by_mobility_disabled;
88
89#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
90#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
91
92#define get_pageblock_migratetype(page) \
93 get_pfnblock_flags_mask(page, page_to_pfn(page), \
94 PB_migrate_end, MIGRATETYPE_MASK)
95
96struct free_area {
97 struct list_head free_list[MIGRATE_TYPES];
98 unsigned long nr_free;
99};
100
101struct pglist_data;
102
103
104
105
106
107
108
109#if defined(CONFIG_SMP)
110struct zone_padding {
111 char x[0];
112} ____cacheline_internodealigned_in_smp;
113#define ZONE_PADDING(name) struct zone_padding name;
114#else
115#define ZONE_PADDING(name)
116#endif
117
118#ifdef CONFIG_NUMA
119enum numa_stat_item {
120 NUMA_HIT,
121 NUMA_MISS,
122 NUMA_FOREIGN,
123 NUMA_INTERLEAVE_HIT,
124 NUMA_LOCAL,
125 NUMA_OTHER,
126 NR_VM_NUMA_STAT_ITEMS
127};
128#else
129#define NR_VM_NUMA_STAT_ITEMS 0
130#endif
131
132enum zone_stat_item {
133
134 NR_FREE_PAGES,
135 NR_ZONE_LRU_BASE,
136 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
137 NR_ZONE_ACTIVE_ANON,
138 NR_ZONE_INACTIVE_FILE,
139 NR_ZONE_ACTIVE_FILE,
140 NR_ZONE_UNEVICTABLE,
141 NR_ZONE_WRITE_PENDING,
142 NR_MLOCK,
143 NR_PAGETABLE,
144 NR_KERNEL_STACK_KB,
145
146 NR_BOUNCE,
147#if IS_ENABLED(CONFIG_ZSMALLOC)
148 NR_ZSPAGES,
149#endif
150 NR_FREE_CMA_PAGES,
151 NR_VM_ZONE_STAT_ITEMS };
152
153enum node_stat_item {
154 NR_LRU_BASE,
155 NR_INACTIVE_ANON = NR_LRU_BASE,
156 NR_ACTIVE_ANON,
157 NR_INACTIVE_FILE,
158 NR_ACTIVE_FILE,
159 NR_UNEVICTABLE,
160 NR_SLAB_RECLAIMABLE,
161 NR_SLAB_UNRECLAIMABLE,
162 NR_ISOLATED_ANON,
163 NR_ISOLATED_FILE,
164 WORKINGSET_REFAULT,
165 WORKINGSET_ACTIVATE,
166 WORKINGSET_NODERECLAIM,
167 NR_ANON_MAPPED,
168 NR_FILE_MAPPED,
169
170 NR_FILE_PAGES,
171 NR_FILE_DIRTY,
172 NR_WRITEBACK,
173 NR_WRITEBACK_TEMP,
174 NR_SHMEM,
175 NR_SHMEM_THPS,
176 NR_SHMEM_PMDMAPPED,
177 NR_ANON_THPS,
178 NR_UNSTABLE_NFS,
179 NR_VMSCAN_WRITE,
180 NR_VMSCAN_IMMEDIATE,
181 NR_DIRTIED,
182 NR_WRITTEN,
183 NR_VM_NODE_STAT_ITEMS
184};
185
186
187
188
189
190
191
192
193
194
195#define LRU_BASE 0
196#define LRU_ACTIVE 1
197#define LRU_FILE 2
198
199enum lru_list {
200 LRU_INACTIVE_ANON = LRU_BASE,
201 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
202 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
203 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
204 LRU_UNEVICTABLE,
205 NR_LRU_LISTS
206};
207
208#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
209
210#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
211
212static inline int is_file_lru(enum lru_list lru)
213{
214 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
215}
216
217static inline int is_active_lru(enum lru_list lru)
218{
219 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
220}
221
222struct zone_reclaim_stat {
223
224
225
226
227
228
229
230
231 unsigned long recent_rotated[2];
232 unsigned long recent_scanned[2];
233};
234
235struct lruvec {
236 struct list_head lists[NR_LRU_LISTS];
237 struct zone_reclaim_stat reclaim_stat;
238
239 atomic_long_t inactive_age;
240
241 unsigned long refaults;
242#ifdef CONFIG_MEMCG
243 struct pglist_data *pgdat;
244#endif
245};
246
247
248#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
249#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
250#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
251
252
253#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
254
255#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
256
257#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
258
259
260typedef unsigned __bitwise isolate_mode_t;
261
262enum zone_watermarks {
263 WMARK_MIN,
264 WMARK_LOW,
265 WMARK_HIGH,
266 NR_WMARK
267};
268
269#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
270#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
271#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
272
273struct per_cpu_pages {
274 int count;
275 int high;
276 int batch;
277
278
279 struct list_head lists[MIGRATE_PCPTYPES];
280};
281
282struct per_cpu_pageset {
283 struct per_cpu_pages pcp;
284#ifdef CONFIG_NUMA
285 s8 expire;
286 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
287#endif
288#ifdef CONFIG_SMP
289 s8 stat_threshold;
290 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
291#endif
292};
293
294struct per_cpu_nodestat {
295 s8 stat_threshold;
296 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
297};
298
299#endif
300
301enum zone_type {
302#ifdef CONFIG_ZONE_DMA
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321 ZONE_DMA,
322#endif
323#ifdef CONFIG_ZONE_DMA32
324
325
326
327
328
329 ZONE_DMA32,
330#endif
331
332
333
334
335
336 ZONE_NORMAL,
337#ifdef CONFIG_HIGHMEM
338
339
340
341
342
343
344
345
346 ZONE_HIGHMEM,
347#endif
348 ZONE_MOVABLE,
349#ifdef CONFIG_ZONE_DEVICE
350 ZONE_DEVICE,
351#endif
352 __MAX_NR_ZONES
353
354};
355
356#ifndef __GENERATING_BOUNDS_H
357
358struct zone {
359
360
361
362 unsigned long watermark[NR_WMARK];
363
364 unsigned long nr_reserved_highatomic;
365
366
367
368
369
370
371
372
373
374
375 long lowmem_reserve[MAX_NR_ZONES];
376
377#ifdef CONFIG_NUMA
378 int node;
379#endif
380 struct pglist_data *zone_pgdat;
381 struct per_cpu_pageset __percpu *pageset;
382
383#ifndef CONFIG_SPARSEMEM
384
385
386
387
388 unsigned long *pageblock_flags;
389#endif
390
391
392 unsigned long zone_start_pfn;
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435 unsigned long managed_pages;
436 unsigned long spanned_pages;
437 unsigned long present_pages;
438
439 const char *name;
440
441#ifdef CONFIG_MEMORY_ISOLATION
442
443
444
445
446
447 unsigned long nr_isolate_pageblock;
448#endif
449
450#ifdef CONFIG_MEMORY_HOTPLUG
451
452 seqlock_t span_seqlock;
453#endif
454
455 int initialized;
456
457
458 ZONE_PADDING(_pad1_)
459
460
461 struct free_area free_area[MAX_ORDER];
462
463
464 unsigned long flags;
465
466
467 spinlock_t lock;
468
469
470 ZONE_PADDING(_pad2_)
471
472
473
474
475
476
477 unsigned long percpu_drift_mark;
478
479#if defined CONFIG_COMPACTION || defined CONFIG_CMA
480
481 unsigned long compact_cached_free_pfn;
482
483 unsigned long compact_cached_migrate_pfn[2];
484#endif
485
486#ifdef CONFIG_COMPACTION
487
488
489
490
491
492 unsigned int compact_considered;
493 unsigned int compact_defer_shift;
494 int compact_order_failed;
495#endif
496
497#if defined CONFIG_COMPACTION || defined CONFIG_CMA
498
499 bool compact_blockskip_flush;
500#endif
501
502 bool contiguous;
503
504 ZONE_PADDING(_pad3_)
505
506 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
507 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
508} ____cacheline_internodealigned_in_smp;
509
510enum pgdat_flags {
511 PGDAT_CONGESTED,
512
513
514 PGDAT_DIRTY,
515
516
517
518 PGDAT_WRITEBACK,
519
520
521 PGDAT_RECLAIM_LOCKED,
522};
523
524static inline unsigned long zone_end_pfn(const struct zone *zone)
525{
526 return zone->zone_start_pfn + zone->spanned_pages;
527}
528
529static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
530{
531 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
532}
533
534static inline bool zone_is_initialized(struct zone *zone)
535{
536 return zone->initialized;
537}
538
539static inline bool zone_is_empty(struct zone *zone)
540{
541 return zone->spanned_pages == 0;
542}
543
544
545
546
547
548static inline bool zone_intersects(struct zone *zone,
549 unsigned long start_pfn, unsigned long nr_pages)
550{
551 if (zone_is_empty(zone))
552 return false;
553 if (start_pfn >= zone_end_pfn(zone) ||
554 start_pfn + nr_pages <= zone->zone_start_pfn)
555 return false;
556
557 return true;
558}
559
560
561
562
563
564
565#define DEF_PRIORITY 12
566
567
568#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
569
570enum {
571 ZONELIST_FALLBACK,
572#ifdef CONFIG_NUMA
573
574
575
576
577 ZONELIST_NOFALLBACK,
578#endif
579 MAX_ZONELISTS
580};
581
582
583
584
585
586struct zoneref {
587 struct zone *zone;
588 int zone_idx;
589};
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605struct zonelist {
606 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
607};
608
609#ifndef CONFIG_DISCONTIGMEM
610
611extern struct page *mem_map;
612#endif
613
614
615
616
617
618
619
620
621
622struct bootmem_data;
623typedef struct pglist_data {
624 struct zone node_zones[MAX_NR_ZONES];
625 struct zonelist node_zonelists[MAX_ZONELISTS];
626 int nr_zones;
627#ifdef CONFIG_FLAT_NODE_MEM_MAP
628 struct page *node_mem_map;
629#ifdef CONFIG_PAGE_EXTENSION
630 struct page_ext *node_page_ext;
631#endif
632#endif
633#ifndef CONFIG_NO_BOOTMEM
634 struct bootmem_data *bdata;
635#endif
636#ifdef CONFIG_MEMORY_HOTPLUG
637
638
639
640
641
642
643
644
645
646
647 spinlock_t node_size_lock;
648#endif
649 unsigned long node_start_pfn;
650 unsigned long node_present_pages;
651 unsigned long node_spanned_pages;
652
653 int node_id;
654 wait_queue_head_t kswapd_wait;
655 wait_queue_head_t pfmemalloc_wait;
656 struct task_struct *kswapd;
657
658 int kswapd_order;
659 enum zone_type kswapd_classzone_idx;
660
661 int kswapd_failures;
662
663#ifdef CONFIG_COMPACTION
664 int kcompactd_max_order;
665 enum zone_type kcompactd_classzone_idx;
666 wait_queue_head_t kcompactd_wait;
667 struct task_struct *kcompactd;
668#endif
669#ifdef CONFIG_NUMA_BALANCING
670
671 spinlock_t numabalancing_migrate_lock;
672
673
674 unsigned long numabalancing_migrate_next_window;
675
676
677 unsigned long numabalancing_migrate_nr_pages;
678#endif
679
680
681
682
683 unsigned long totalreserve_pages;
684
685#ifdef CONFIG_NUMA
686
687
688
689 unsigned long min_unmapped_pages;
690 unsigned long min_slab_pages;
691#endif
692
693
694 ZONE_PADDING(_pad1_)
695 spinlock_t lru_lock;
696
697#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
698
699
700
701
702 unsigned long first_deferred_pfn;
703 unsigned long static_init_size;
704#endif
705
706#ifdef CONFIG_TRANSPARENT_HUGEPAGE
707 spinlock_t split_queue_lock;
708 struct list_head split_queue;
709 unsigned long split_queue_len;
710#endif
711
712
713 struct lruvec lruvec;
714
715
716
717
718
719 unsigned int inactive_ratio;
720
721 unsigned long flags;
722
723 ZONE_PADDING(_pad2_)
724
725
726 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
727 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
728} pg_data_t;
729
730#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
731#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
732#ifdef CONFIG_FLAT_NODE_MEM_MAP
733#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
734#else
735#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
736#endif
737#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
738
739#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
740#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
741static inline spinlock_t *zone_lru_lock(struct zone *zone)
742{
743 return &zone->zone_pgdat->lru_lock;
744}
745
746static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
747{
748 return &pgdat->lruvec;
749}
750
751static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
752{
753 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
754}
755
756static inline bool pgdat_is_empty(pg_data_t *pgdat)
757{
758 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
759}
760
761static inline int zone_id(const struct zone *zone)
762{
763 struct pglist_data *pgdat = zone->zone_pgdat;
764
765 return zone - pgdat->node_zones;
766}
767
768#ifdef CONFIG_ZONE_DEVICE
769static inline bool is_dev_zone(const struct zone *zone)
770{
771 return zone_id(zone) == ZONE_DEVICE;
772}
773#else
774static inline bool is_dev_zone(const struct zone *zone)
775{
776 return false;
777}
778#endif
779
780#include <linux/memory_hotplug.h>
781
782void build_all_zonelists(pg_data_t *pgdat);
783void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
784bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
785 int classzone_idx, unsigned int alloc_flags,
786 long free_pages);
787bool zone_watermark_ok(struct zone *z, unsigned int order,
788 unsigned long mark, int classzone_idx,
789 unsigned int alloc_flags);
790bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
791 unsigned long mark, int classzone_idx);
792enum memmap_context {
793 MEMMAP_EARLY,
794 MEMMAP_HOTPLUG,
795};
796extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
797 unsigned long size);
798
799extern void lruvec_init(struct lruvec *lruvec);
800
801static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
802{
803#ifdef CONFIG_MEMCG
804 return lruvec->pgdat;
805#else
806 return container_of(lruvec, struct pglist_data, lruvec);
807#endif
808}
809
810extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
811
812#ifdef CONFIG_HAVE_MEMORY_PRESENT
813void memory_present(int nid, unsigned long start, unsigned long end);
814#else
815static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
816#endif
817
818#ifdef CONFIG_HAVE_MEMORYLESS_NODES
819int local_memory_node(int node_id);
820#else
821static inline int local_memory_node(int node_id) { return node_id; };
822#endif
823
824#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
825unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
826#endif
827
828
829
830
831#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
832
833
834
835
836
837
838
839static inline bool managed_zone(struct zone *zone)
840{
841 return zone->managed_pages;
842}
843
844
845static inline bool populated_zone(struct zone *zone)
846{
847 return zone->present_pages;
848}
849
850extern int movable_zone;
851
852#ifdef CONFIG_HIGHMEM
853static inline int zone_movable_is_highmem(void)
854{
855#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
856 return movable_zone == ZONE_HIGHMEM;
857#else
858 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
859#endif
860}
861#endif
862
863static inline int is_highmem_idx(enum zone_type idx)
864{
865#ifdef CONFIG_HIGHMEM
866 return (idx == ZONE_HIGHMEM ||
867 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
868#else
869 return 0;
870#endif
871}
872
873
874
875
876
877
878
879static inline int is_highmem(struct zone *zone)
880{
881#ifdef CONFIG_HIGHMEM
882 return is_highmem_idx(zone_idx(zone));
883#else
884 return 0;
885#endif
886}
887
888
889struct ctl_table;
890int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
891 void __user *, size_t *, loff_t *);
892int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
893 void __user *, size_t *, loff_t *);
894extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
895int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
896 void __user *, size_t *, loff_t *);
897int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
898 void __user *, size_t *, loff_t *);
899int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
900 void __user *, size_t *, loff_t *);
901int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
902 void __user *, size_t *, loff_t *);
903
904extern int numa_zonelist_order_handler(struct ctl_table *, int,
905 void __user *, size_t *, loff_t *);
906extern char numa_zonelist_order[];
907#define NUMA_ZONELIST_ORDER_LEN 16
908
909#ifndef CONFIG_NEED_MULTIPLE_NODES
910
911extern struct pglist_data contig_page_data;
912#define NODE_DATA(nid) (&contig_page_data)
913#define NODE_MEM_MAP(nid) mem_map
914
915#else
916
917#include <asm/mmzone.h>
918
919#endif
920
921extern struct pglist_data *first_online_pgdat(void);
922extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
923extern struct zone *next_zone(struct zone *zone);
924
925
926
927
928
929#define for_each_online_pgdat(pgdat) \
930 for (pgdat = first_online_pgdat(); \
931 pgdat; \
932 pgdat = next_online_pgdat(pgdat))
933
934
935
936
937
938
939
940#define for_each_zone(zone) \
941 for (zone = (first_online_pgdat())->node_zones; \
942 zone; \
943 zone = next_zone(zone))
944
945#define for_each_populated_zone(zone) \
946 for (zone = (first_online_pgdat())->node_zones; \
947 zone; \
948 zone = next_zone(zone)) \
949 if (!populated_zone(zone)) \
950 ; \
951 else
952
953static inline struct zone *zonelist_zone(struct zoneref *zoneref)
954{
955 return zoneref->zone;
956}
957
958static inline int zonelist_zone_idx(struct zoneref *zoneref)
959{
960 return zoneref->zone_idx;
961}
962
963static inline int zonelist_node_idx(struct zoneref *zoneref)
964{
965#ifdef CONFIG_NUMA
966
967 return zoneref->zone->node;
968#else
969 return 0;
970#endif
971}
972
973struct zoneref *__next_zones_zonelist(struct zoneref *z,
974 enum zone_type highest_zoneidx,
975 nodemask_t *nodes);
976
977
978
979
980
981
982
983
984
985
986
987
988
989static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
990 enum zone_type highest_zoneidx,
991 nodemask_t *nodes)
992{
993 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
994 return z;
995 return __next_zones_zonelist(z, highest_zoneidx, nodes);
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1015 enum zone_type highest_zoneidx,
1016 nodemask_t *nodes)
1017{
1018 return next_zones_zonelist(zonelist->_zonerefs,
1019 highest_zoneidx, nodes);
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1034 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1035 zone; \
1036 z = next_zones_zonelist(++z, highidx, nodemask), \
1037 zone = zonelist_zone(z))
1038
1039#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1040 for (zone = z->zone; \
1041 zone; \
1042 z = next_zones_zonelist(++z, highidx, nodemask), \
1043 zone = zonelist_zone(z))
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1056 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1057
1058#ifdef CONFIG_SPARSEMEM
1059#include <asm/sparsemem.h>
1060#endif
1061
1062#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1063 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1064static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1065{
1066 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1067 return 0;
1068}
1069#endif
1070
1071#ifdef CONFIG_FLATMEM
1072#define pfn_to_nid(pfn) (0)
1073#endif
1074
1075#ifdef CONFIG_SPARSEMEM
1076
1077
1078
1079
1080
1081
1082
1083#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1084#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1085
1086#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1087
1088#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1089#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1090
1091#define SECTION_BLOCKFLAGS_BITS \
1092 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1093
1094#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1095#error Allocator MAX_ORDER exceeds SECTION_SIZE
1096#endif
1097
1098static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1099{
1100 return pfn >> PFN_SECTION_SHIFT;
1101}
1102static inline unsigned long section_nr_to_pfn(unsigned long sec)
1103{
1104 return sec << PFN_SECTION_SHIFT;
1105}
1106
1107#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1108#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1109
1110struct page;
1111struct page_ext;
1112struct mem_section {
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125 unsigned long section_mem_map;
1126
1127
1128 unsigned long *pageblock_flags;
1129#ifdef CONFIG_PAGE_EXTENSION
1130
1131
1132
1133
1134 struct page_ext *page_ext;
1135 unsigned long pad;
1136#endif
1137
1138
1139
1140
1141};
1142
1143#ifdef CONFIG_SPARSEMEM_EXTREME
1144#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1145#else
1146#define SECTIONS_PER_ROOT 1
1147#endif
1148
1149#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1150#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1151#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1152
1153#ifdef CONFIG_SPARSEMEM_EXTREME
1154extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1155#else
1156extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1157#endif
1158
1159static inline struct mem_section *__nr_to_section(unsigned long nr)
1160{
1161 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1162 return NULL;
1163 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1164}
1165extern int __section_nr(struct mem_section* ms);
1166extern unsigned long usemap_size(void);
1167
1168
1169
1170
1171
1172
1173#define SECTION_MARKED_PRESENT (1UL<<0)
1174#define SECTION_HAS_MEM_MAP (1UL<<1)
1175#define SECTION_IS_ONLINE (1UL<<2)
1176#define SECTION_MAP_LAST_BIT (1UL<<3)
1177#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1178#define SECTION_NID_SHIFT 3
1179
1180static inline struct page *__section_mem_map_addr(struct mem_section *section)
1181{
1182 unsigned long map = section->section_mem_map;
1183 map &= SECTION_MAP_MASK;
1184 return (struct page *)map;
1185}
1186
1187static inline int present_section(struct mem_section *section)
1188{
1189 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1190}
1191
1192static inline int present_section_nr(unsigned long nr)
1193{
1194 return present_section(__nr_to_section(nr));
1195}
1196
1197static inline int valid_section(struct mem_section *section)
1198{
1199 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1200}
1201
1202static inline int valid_section_nr(unsigned long nr)
1203{
1204 return valid_section(__nr_to_section(nr));
1205}
1206
1207static inline int online_section(struct mem_section *section)
1208{
1209 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1210}
1211
1212static inline int online_section_nr(unsigned long nr)
1213{
1214 return online_section(__nr_to_section(nr));
1215}
1216
1217#ifdef CONFIG_MEMORY_HOTPLUG
1218void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1219#ifdef CONFIG_MEMORY_HOTREMOVE
1220void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1221#endif
1222#endif
1223
1224static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1225{
1226 return __nr_to_section(pfn_to_section_nr(pfn));
1227}
1228
1229extern int __highest_present_section_nr;
1230
1231#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1232static inline int pfn_valid(unsigned long pfn)
1233{
1234 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1235 return 0;
1236 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1237}
1238#endif
1239
1240static inline int pfn_present(unsigned long pfn)
1241{
1242 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1243 return 0;
1244 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1245}
1246
1247
1248
1249
1250
1251
1252#ifdef CONFIG_NUMA
1253#define pfn_to_nid(pfn) \
1254({ \
1255 unsigned long __pfn_to_nid_pfn = (pfn); \
1256 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1257})
1258#else
1259#define pfn_to_nid(pfn) (0)
1260#endif
1261
1262#define early_pfn_valid(pfn) pfn_valid(pfn)
1263void sparse_init(void);
1264#else
1265#define sparse_init() do {} while (0)
1266#define sparse_index_init(_sec, _nid) do {} while (0)
1267#endif
1268
1269
1270
1271
1272
1273
1274struct mminit_pfnnid_cache {
1275 unsigned long last_start;
1276 unsigned long last_end;
1277 int last_nid;
1278};
1279
1280#ifndef early_pfn_valid
1281#define early_pfn_valid(pfn) (1)
1282#endif
1283
1284void memory_present(int nid, unsigned long start, unsigned long end);
1285unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1286
1287
1288
1289
1290
1291
1292
1293#ifdef CONFIG_HOLES_IN_ZONE
1294#define pfn_valid_within(pfn) pfn_valid(pfn)
1295#else
1296#define pfn_valid_within(pfn) (1)
1297#endif
1298
1299#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320bool memmap_valid_within(unsigned long pfn,
1321 struct page *page, struct zone *zone);
1322#else
1323static inline bool memmap_valid_within(unsigned long pfn,
1324 struct page *page, struct zone *zone)
1325{
1326 return true;
1327}
1328#endif
1329
1330#endif
1331#endif
1332#endif
1333