1
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include <linux/bitops.h>
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
16#include <linux/seqlock.h>
17#include <linux/nodemask.h>
18#include <linux/pageblock-flags.h>
19#include <linux/page-flags-layout.h>
20#include <linux/atomic.h>
21#include <linux/mm_types.h>
22#include <linux/page-flags.h>
23#include <asm/page.h>
24
25
26#ifndef CONFIG_FORCE_MAX_ZONEORDER
27#define MAX_ORDER 11
28#else
29#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
30#endif
31#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
32
33
34
35
36
37
38
39#define PAGE_ALLOC_COSTLY_ORDER 3
40
41enum migratetype {
42 MIGRATE_UNMOVABLE,
43 MIGRATE_MOVABLE,
44 MIGRATE_RECLAIMABLE,
45 MIGRATE_PCPTYPES,
46 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
47#ifdef CONFIG_CMA
48
49
50
51
52
53
54
55
56
57
58
59
60
61 MIGRATE_CMA,
62#endif
63#ifdef CONFIG_MEMORY_ISOLATION
64 MIGRATE_ISOLATE,
65#endif
66 MIGRATE_TYPES
67};
68
69
70extern const char * const migratetype_names[MIGRATE_TYPES];
71
72#ifdef CONFIG_CMA
73# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
74# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
75#else
76# define is_migrate_cma(migratetype) false
77# define is_migrate_cma_page(_page) false
78#endif
79
80static inline bool is_migrate_movable(int mt)
81{
82 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
83}
84
85#define for_each_migratetype_order(order, type) \
86 for (order = 0; order < MAX_ORDER; order++) \
87 for (type = 0; type < MIGRATE_TYPES; type++)
88
89extern int page_group_by_mobility_disabled;
90
91#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
92#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
93
94#define get_pageblock_migratetype(page) \
95 get_pfnblock_flags_mask(page, page_to_pfn(page), \
96 PB_migrate_end, MIGRATETYPE_MASK)
97
98struct free_area {
99 struct list_head free_list[MIGRATE_TYPES];
100 unsigned long nr_free;
101};
102
103static inline struct page *get_page_from_free_area(struct free_area *area,
104 int migratetype)
105{
106 return list_first_entry_or_null(&area->free_list[migratetype],
107 struct page, lru);
108}
109
110static inline bool free_area_empty(struct free_area *area, int migratetype)
111{
112 return list_empty(&area->free_list[migratetype]);
113}
114
115struct pglist_data;
116
117
118
119
120
121
122
123#if defined(CONFIG_SMP)
124struct zone_padding {
125 char x[0];
126} ____cacheline_internodealigned_in_smp;
127#define ZONE_PADDING(name) struct zone_padding name;
128#else
129#define ZONE_PADDING(name)
130#endif
131
132#ifdef CONFIG_NUMA
133enum numa_stat_item {
134 NUMA_HIT,
135 NUMA_MISS,
136 NUMA_FOREIGN,
137 NUMA_INTERLEAVE_HIT,
138 NUMA_LOCAL,
139 NUMA_OTHER,
140 NR_VM_NUMA_STAT_ITEMS
141};
142#else
143#define NR_VM_NUMA_STAT_ITEMS 0
144#endif
145
146enum zone_stat_item {
147
148 NR_FREE_PAGES,
149 NR_ZONE_LRU_BASE,
150 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
151 NR_ZONE_ACTIVE_ANON,
152 NR_ZONE_INACTIVE_FILE,
153 NR_ZONE_ACTIVE_FILE,
154 NR_ZONE_UNEVICTABLE,
155 NR_ZONE_WRITE_PENDING,
156 NR_MLOCK,
157 NR_PAGETABLE,
158 NR_KERNEL_STACK_KB,
159#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
160 NR_KERNEL_SCS_KB,
161#endif
162
163 NR_BOUNCE,
164#if IS_ENABLED(CONFIG_ZSMALLOC)
165 NR_ZSPAGES,
166#endif
167 NR_FREE_CMA_PAGES,
168 NR_VM_ZONE_STAT_ITEMS };
169
170enum node_stat_item {
171 NR_LRU_BASE,
172 NR_INACTIVE_ANON = NR_LRU_BASE,
173 NR_ACTIVE_ANON,
174 NR_INACTIVE_FILE,
175 NR_ACTIVE_FILE,
176 NR_UNEVICTABLE,
177 NR_SLAB_RECLAIMABLE,
178 NR_SLAB_UNRECLAIMABLE,
179 NR_ISOLATED_ANON,
180 NR_ISOLATED_FILE,
181 WORKINGSET_NODES,
182 WORKINGSET_REFAULT,
183 WORKINGSET_ACTIVATE,
184 WORKINGSET_RESTORE,
185 WORKINGSET_NODERECLAIM,
186 NR_ANON_MAPPED,
187 NR_FILE_MAPPED,
188
189 NR_FILE_PAGES,
190 NR_FILE_DIRTY,
191 NR_WRITEBACK,
192 NR_WRITEBACK_TEMP,
193 NR_SHMEM,
194 NR_SHMEM_THPS,
195 NR_SHMEM_PMDMAPPED,
196 NR_FILE_THPS,
197 NR_FILE_PMDMAPPED,
198 NR_ANON_THPS,
199 NR_VMSCAN_WRITE,
200 NR_VMSCAN_IMMEDIATE,
201 NR_DIRTIED,
202 NR_WRITTEN,
203 NR_KERNEL_MISC_RECLAIMABLE,
204 NR_FOLL_PIN_ACQUIRED,
205 NR_FOLL_PIN_RELEASED,
206 NR_VM_NODE_STAT_ITEMS
207};
208
209
210
211
212
213
214
215
216
217
218#define LRU_BASE 0
219#define LRU_ACTIVE 1
220#define LRU_FILE 2
221
222enum lru_list {
223 LRU_INACTIVE_ANON = LRU_BASE,
224 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
225 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
226 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
227 LRU_UNEVICTABLE,
228 NR_LRU_LISTS
229};
230
231#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
232
233#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
234
235static inline bool is_file_lru(enum lru_list lru)
236{
237 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
238}
239
240static inline bool is_active_lru(enum lru_list lru)
241{
242 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
243}
244
245enum lruvec_flags {
246 LRUVEC_CONGESTED,
247
248
249};
250
251struct lruvec {
252 struct list_head lists[NR_LRU_LISTS];
253
254
255
256
257
258 unsigned long anon_cost;
259 unsigned long file_cost;
260
261 atomic_long_t nonresident_age;
262
263 unsigned long refaults;
264
265 unsigned long flags;
266#ifdef CONFIG_MEMCG
267 struct pglist_data *pgdat;
268#endif
269};
270
271
272#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
273
274#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
275
276#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
277
278
279typedef unsigned __bitwise isolate_mode_t;
280
281enum zone_watermarks {
282 WMARK_MIN,
283 WMARK_LOW,
284 WMARK_HIGH,
285 NR_WMARK
286};
287
288#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
289#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
290#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
291#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
292
293struct per_cpu_pages {
294 int count;
295 int high;
296 int batch;
297
298
299 struct list_head lists[MIGRATE_PCPTYPES];
300};
301
302struct per_cpu_pageset {
303 struct per_cpu_pages pcp;
304#ifdef CONFIG_NUMA
305 s8 expire;
306 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
307#endif
308#ifdef CONFIG_SMP
309 s8 stat_threshold;
310 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
311#endif
312};
313
314struct per_cpu_nodestat {
315 s8 stat_threshold;
316 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
317};
318
319#endif
320
321enum zone_type {
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352#ifdef CONFIG_ZONE_DMA
353 ZONE_DMA,
354#endif
355#ifdef CONFIG_ZONE_DMA32
356 ZONE_DMA32,
357#endif
358
359
360
361
362
363 ZONE_NORMAL,
364#ifdef CONFIG_HIGHMEM
365
366
367
368
369
370
371
372
373 ZONE_HIGHMEM,
374#endif
375 ZONE_MOVABLE,
376#ifdef CONFIG_ZONE_DEVICE
377 ZONE_DEVICE,
378#endif
379 __MAX_NR_ZONES
380
381};
382
383#ifndef __GENERATING_BOUNDS_H
384
385struct zone {
386
387
388
389 unsigned long _watermark[NR_WMARK];
390 unsigned long watermark_boost;
391
392 unsigned long nr_reserved_highatomic;
393
394
395
396
397
398
399
400
401
402
403 long lowmem_reserve[MAX_NR_ZONES];
404
405#ifdef CONFIG_NUMA
406 int node;
407#endif
408 struct pglist_data *zone_pgdat;
409 struct per_cpu_pageset __percpu *pageset;
410
411#ifndef CONFIG_SPARSEMEM
412
413
414
415
416 unsigned long *pageblock_flags;
417#endif
418
419
420 unsigned long zone_start_pfn;
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457 atomic_long_t managed_pages;
458 unsigned long spanned_pages;
459 unsigned long present_pages;
460
461 const char *name;
462
463#ifdef CONFIG_MEMORY_ISOLATION
464
465
466
467
468
469 unsigned long nr_isolate_pageblock;
470#endif
471
472#ifdef CONFIG_MEMORY_HOTPLUG
473
474 seqlock_t span_seqlock;
475#endif
476
477 int initialized;
478
479
480 ZONE_PADDING(_pad1_)
481
482
483 struct free_area free_area[MAX_ORDER];
484
485
486 unsigned long flags;
487
488
489 spinlock_t lock;
490
491
492 ZONE_PADDING(_pad2_)
493
494
495
496
497
498
499 unsigned long percpu_drift_mark;
500
501#if defined CONFIG_COMPACTION || defined CONFIG_CMA
502
503 unsigned long compact_cached_free_pfn;
504
505 unsigned long compact_cached_migrate_pfn[2];
506 unsigned long compact_init_migrate_pfn;
507 unsigned long compact_init_free_pfn;
508#endif
509
510#ifdef CONFIG_COMPACTION
511
512
513
514
515
516 unsigned int compact_considered;
517 unsigned int compact_defer_shift;
518 int compact_order_failed;
519#endif
520
521#if defined CONFIG_COMPACTION || defined CONFIG_CMA
522
523 bool compact_blockskip_flush;
524#endif
525
526 bool contiguous;
527
528 ZONE_PADDING(_pad3_)
529
530 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
531 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
532} ____cacheline_internodealigned_in_smp;
533
534enum pgdat_flags {
535 PGDAT_DIRTY,
536
537
538
539 PGDAT_WRITEBACK,
540
541
542 PGDAT_RECLAIM_LOCKED,
543};
544
545enum zone_flags {
546 ZONE_BOOSTED_WATERMARK,
547
548
549};
550
551static inline unsigned long zone_managed_pages(struct zone *zone)
552{
553 return (unsigned long)atomic_long_read(&zone->managed_pages);
554}
555
556static inline unsigned long zone_end_pfn(const struct zone *zone)
557{
558 return zone->zone_start_pfn + zone->spanned_pages;
559}
560
561static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
562{
563 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
564}
565
566static inline bool zone_is_initialized(struct zone *zone)
567{
568 return zone->initialized;
569}
570
571static inline bool zone_is_empty(struct zone *zone)
572{
573 return zone->spanned_pages == 0;
574}
575
576
577
578
579
580static inline bool zone_intersects(struct zone *zone,
581 unsigned long start_pfn, unsigned long nr_pages)
582{
583 if (zone_is_empty(zone))
584 return false;
585 if (start_pfn >= zone_end_pfn(zone) ||
586 start_pfn + nr_pages <= zone->zone_start_pfn)
587 return false;
588
589 return true;
590}
591
592
593
594
595
596
597#define DEF_PRIORITY 12
598
599
600#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
601
602enum {
603 ZONELIST_FALLBACK,
604#ifdef CONFIG_NUMA
605
606
607
608
609 ZONELIST_NOFALLBACK,
610#endif
611 MAX_ZONELISTS
612};
613
614
615
616
617
618struct zoneref {
619 struct zone *zone;
620 int zone_idx;
621};
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637struct zonelist {
638 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
639};
640
641#ifndef CONFIG_DISCONTIGMEM
642
643extern struct page *mem_map;
644#endif
645
646#ifdef CONFIG_TRANSPARENT_HUGEPAGE
647struct deferred_split {
648 spinlock_t split_queue_lock;
649 struct list_head split_queue;
650 unsigned long split_queue_len;
651};
652#endif
653
654
655
656
657
658
659
660
661
662typedef struct pglist_data {
663
664
665
666
667
668 struct zone node_zones[MAX_NR_ZONES];
669
670
671
672
673
674
675 struct zonelist node_zonelists[MAX_ZONELISTS];
676
677 int nr_zones;
678#ifdef CONFIG_FLAT_NODE_MEM_MAP
679 struct page *node_mem_map;
680#ifdef CONFIG_PAGE_EXTENSION
681 struct page_ext *node_page_ext;
682#endif
683#endif
684#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
685
686
687
688
689
690
691
692
693
694
695
696
697 spinlock_t node_size_lock;
698#endif
699 unsigned long node_start_pfn;
700 unsigned long node_present_pages;
701 unsigned long node_spanned_pages;
702
703 int node_id;
704 wait_queue_head_t kswapd_wait;
705 wait_queue_head_t pfmemalloc_wait;
706 struct task_struct *kswapd;
707
708 int kswapd_order;
709 enum zone_type kswapd_highest_zoneidx;
710
711 int kswapd_failures;
712
713#ifdef CONFIG_COMPACTION
714 int kcompactd_max_order;
715 enum zone_type kcompactd_highest_zoneidx;
716 wait_queue_head_t kcompactd_wait;
717 struct task_struct *kcompactd;
718#endif
719
720
721
722
723 unsigned long totalreserve_pages;
724
725#ifdef CONFIG_NUMA
726
727
728
729 unsigned long min_unmapped_pages;
730 unsigned long min_slab_pages;
731#endif
732
733
734 ZONE_PADDING(_pad1_)
735 spinlock_t lru_lock;
736
737#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
738
739
740
741
742 unsigned long first_deferred_pfn;
743#endif
744
745#ifdef CONFIG_TRANSPARENT_HUGEPAGE
746 struct deferred_split deferred_split_queue;
747#endif
748
749
750
751
752
753
754
755
756 struct lruvec __lruvec;
757
758 unsigned long flags;
759
760 ZONE_PADDING(_pad2_)
761
762
763 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
764 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
765} pg_data_t;
766
767#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
768#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
769#ifdef CONFIG_FLAT_NODE_MEM_MAP
770#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
771#else
772#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
773#endif
774#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
775
776#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
777#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
778
779static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
780{
781 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
782}
783
784static inline bool pgdat_is_empty(pg_data_t *pgdat)
785{
786 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
787}
788
789#include <linux/memory_hotplug.h>
790
791void build_all_zonelists(pg_data_t *pgdat);
792void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
793 enum zone_type highest_zoneidx);
794bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
795 int highest_zoneidx, unsigned int alloc_flags,
796 long free_pages);
797bool zone_watermark_ok(struct zone *z, unsigned int order,
798 unsigned long mark, int highest_zoneidx,
799 unsigned int alloc_flags);
800bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
801 unsigned long mark, int highest_zoneidx);
802enum memmap_context {
803 MEMMAP_EARLY,
804 MEMMAP_HOTPLUG,
805};
806extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
807 unsigned long size);
808
809extern void lruvec_init(struct lruvec *lruvec);
810
811static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
812{
813#ifdef CONFIG_MEMCG
814 return lruvec->pgdat;
815#else
816 return container_of(lruvec, struct pglist_data, __lruvec);
817#endif
818}
819
820extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
821
822#ifdef CONFIG_HAVE_MEMORY_PRESENT
823void memory_present(int nid, unsigned long start, unsigned long end);
824#else
825static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
826#endif
827
828#if defined(CONFIG_SPARSEMEM)
829void memblocks_present(void);
830#else
831static inline void memblocks_present(void) {}
832#endif
833
834#ifdef CONFIG_HAVE_MEMORYLESS_NODES
835int local_memory_node(int node_id);
836#else
837static inline int local_memory_node(int node_id) { return node_id; };
838#endif
839
840
841
842
843#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
844
845
846
847
848
849
850
851static inline bool managed_zone(struct zone *zone)
852{
853 return zone_managed_pages(zone);
854}
855
856
857static inline bool populated_zone(struct zone *zone)
858{
859 return zone->present_pages;
860}
861
862#ifdef CONFIG_NUMA
863static inline int zone_to_nid(struct zone *zone)
864{
865 return zone->node;
866}
867
868static inline void zone_set_nid(struct zone *zone, int nid)
869{
870 zone->node = nid;
871}
872#else
873static inline int zone_to_nid(struct zone *zone)
874{
875 return 0;
876}
877
878static inline void zone_set_nid(struct zone *zone, int nid) {}
879#endif
880
881extern int movable_zone;
882
883#ifdef CONFIG_HIGHMEM
884static inline int zone_movable_is_highmem(void)
885{
886#ifdef CONFIG_NEED_MULTIPLE_NODES
887 return movable_zone == ZONE_HIGHMEM;
888#else
889 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
890#endif
891}
892#endif
893
894static inline int is_highmem_idx(enum zone_type idx)
895{
896#ifdef CONFIG_HIGHMEM
897 return (idx == ZONE_HIGHMEM ||
898 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
899#else
900 return 0;
901#endif
902}
903
904
905
906
907
908
909
910static inline int is_highmem(struct zone *zone)
911{
912#ifdef CONFIG_HIGHMEM
913 return is_highmem_idx(zone_idx(zone));
914#else
915 return 0;
916#endif
917}
918
919
920struct ctl_table;
921
922int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
923 loff_t *);
924int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
925 size_t *, loff_t *);
926extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
927int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
928 size_t *, loff_t *);
929int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
930 void *, size_t *, loff_t *);
931int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
932 void *, size_t *, loff_t *);
933int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
934 void *, size_t *, loff_t *);
935int numa_zonelist_order_handler(struct ctl_table *, int,
936 void *, size_t *, loff_t *);
937extern int percpu_pagelist_fraction;
938extern char numa_zonelist_order[];
939#define NUMA_ZONELIST_ORDER_LEN 16
940
941#ifndef CONFIG_NEED_MULTIPLE_NODES
942
943extern struct pglist_data contig_page_data;
944#define NODE_DATA(nid) (&contig_page_data)
945#define NODE_MEM_MAP(nid) mem_map
946
947#else
948
949#include <asm/mmzone.h>
950
951#endif
952
953extern struct pglist_data *first_online_pgdat(void);
954extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
955extern struct zone *next_zone(struct zone *zone);
956
957
958
959
960
961#define for_each_online_pgdat(pgdat) \
962 for (pgdat = first_online_pgdat(); \
963 pgdat; \
964 pgdat = next_online_pgdat(pgdat))
965
966
967
968
969
970
971
972#define for_each_zone(zone) \
973 for (zone = (first_online_pgdat())->node_zones; \
974 zone; \
975 zone = next_zone(zone))
976
977#define for_each_populated_zone(zone) \
978 for (zone = (first_online_pgdat())->node_zones; \
979 zone; \
980 zone = next_zone(zone)) \
981 if (!populated_zone(zone)) \
982 ; \
983 else
984
985static inline struct zone *zonelist_zone(struct zoneref *zoneref)
986{
987 return zoneref->zone;
988}
989
990static inline int zonelist_zone_idx(struct zoneref *zoneref)
991{
992 return zoneref->zone_idx;
993}
994
995static inline int zonelist_node_idx(struct zoneref *zoneref)
996{
997 return zone_to_nid(zoneref->zone);
998}
999
1000struct zoneref *__next_zones_zonelist(struct zoneref *z,
1001 enum zone_type highest_zoneidx,
1002 nodemask_t *nodes);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1017 enum zone_type highest_zoneidx,
1018 nodemask_t *nodes)
1019{
1020 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1021 return z;
1022 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1042 enum zone_type highest_zoneidx,
1043 nodemask_t *nodes)
1044{
1045 return next_zones_zonelist(zonelist->_zonerefs,
1046 highest_zoneidx, nodes);
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1061 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1062 zone; \
1063 z = next_zones_zonelist(++z, highidx, nodemask), \
1064 zone = zonelist_zone(z))
1065
1066#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1067 for (zone = z->zone; \
1068 zone; \
1069 z = next_zones_zonelist(++z, highidx, nodemask), \
1070 zone = zonelist_zone(z))
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1083 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1084
1085#ifdef CONFIG_SPARSEMEM
1086#include <asm/sparsemem.h>
1087#endif
1088
1089#ifdef CONFIG_FLATMEM
1090#define pfn_to_nid(pfn) (0)
1091#endif
1092
1093#ifdef CONFIG_SPARSEMEM
1094
1095
1096
1097
1098
1099
1100
1101#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1102#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1103
1104#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1105
1106#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1107#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1108
1109#define SECTION_BLOCKFLAGS_BITS \
1110 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1111
1112#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1113#error Allocator MAX_ORDER exceeds SECTION_SIZE
1114#endif
1115
1116static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1117{
1118 return pfn >> PFN_SECTION_SHIFT;
1119}
1120static inline unsigned long section_nr_to_pfn(unsigned long sec)
1121{
1122 return sec << PFN_SECTION_SHIFT;
1123}
1124
1125#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1126#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1127
1128#define SUBSECTION_SHIFT 21
1129#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
1130
1131#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1132#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
1133#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1134
1135#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
1136#error Subsection size exceeds section size
1137#else
1138#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1139#endif
1140
1141#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
1142#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
1143
1144struct mem_section_usage {
1145#ifdef CONFIG_SPARSEMEM_VMEMMAP
1146 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
1147#endif
1148
1149 unsigned long pageblock_flags[0];
1150};
1151
1152void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
1153
1154struct page;
1155struct page_ext;
1156struct mem_section {
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 unsigned long section_mem_map;
1170
1171 struct mem_section_usage *usage;
1172#ifdef CONFIG_PAGE_EXTENSION
1173
1174
1175
1176
1177 struct page_ext *page_ext;
1178 unsigned long pad;
1179#endif
1180
1181
1182
1183
1184};
1185
1186#ifdef CONFIG_SPARSEMEM_EXTREME
1187#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1188#else
1189#define SECTIONS_PER_ROOT 1
1190#endif
1191
1192#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1193#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1194#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1195
1196#ifdef CONFIG_SPARSEMEM_EXTREME
1197extern struct mem_section **mem_section;
1198#else
1199extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1200#endif
1201
1202static inline unsigned long *section_to_usemap(struct mem_section *ms)
1203{
1204 return ms->usage->pageblock_flags;
1205}
1206
1207static inline struct mem_section *__nr_to_section(unsigned long nr)
1208{
1209#ifdef CONFIG_SPARSEMEM_EXTREME
1210 if (!mem_section)
1211 return NULL;
1212#endif
1213 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1214 return NULL;
1215 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1216}
1217extern unsigned long __section_nr(struct mem_section *ms);
1218extern size_t mem_section_usage_size(void);
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233#define SECTION_MARKED_PRESENT (1UL<<0)
1234#define SECTION_HAS_MEM_MAP (1UL<<1)
1235#define SECTION_IS_ONLINE (1UL<<2)
1236#define SECTION_IS_EARLY (1UL<<3)
1237#define SECTION_MAP_LAST_BIT (1UL<<4)
1238#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1239#define SECTION_NID_SHIFT 3
1240
1241static inline struct page *__section_mem_map_addr(struct mem_section *section)
1242{
1243 unsigned long map = section->section_mem_map;
1244 map &= SECTION_MAP_MASK;
1245 return (struct page *)map;
1246}
1247
1248static inline int present_section(struct mem_section *section)
1249{
1250 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1251}
1252
1253static inline int present_section_nr(unsigned long nr)
1254{
1255 return present_section(__nr_to_section(nr));
1256}
1257
1258static inline int valid_section(struct mem_section *section)
1259{
1260 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1261}
1262
1263static inline int early_section(struct mem_section *section)
1264{
1265 return (section && (section->section_mem_map & SECTION_IS_EARLY));
1266}
1267
1268static inline int valid_section_nr(unsigned long nr)
1269{
1270 return valid_section(__nr_to_section(nr));
1271}
1272
1273static inline int online_section(struct mem_section *section)
1274{
1275 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1276}
1277
1278static inline int online_section_nr(unsigned long nr)
1279{
1280 return online_section(__nr_to_section(nr));
1281}
1282
1283#ifdef CONFIG_MEMORY_HOTPLUG
1284void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1285#ifdef CONFIG_MEMORY_HOTREMOVE
1286void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1287#endif
1288#endif
1289
1290static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1291{
1292 return __nr_to_section(pfn_to_section_nr(pfn));
1293}
1294
1295extern unsigned long __highest_present_section_nr;
1296
1297static inline int subsection_map_index(unsigned long pfn)
1298{
1299 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
1300}
1301
1302#ifdef CONFIG_SPARSEMEM_VMEMMAP
1303static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1304{
1305 int idx = subsection_map_index(pfn);
1306
1307 return test_bit(idx, ms->usage->subsection_map);
1308}
1309#else
1310static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1311{
1312 return 1;
1313}
1314#endif
1315
1316#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1317static inline int pfn_valid(unsigned long pfn)
1318{
1319 struct mem_section *ms;
1320
1321 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1322 return 0;
1323 ms = __nr_to_section(pfn_to_section_nr(pfn));
1324 if (!valid_section(ms))
1325 return 0;
1326
1327
1328
1329
1330 return early_section(ms) || pfn_section_valid(ms, pfn);
1331}
1332#endif
1333
1334static inline int pfn_in_present_section(unsigned long pfn)
1335{
1336 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1337 return 0;
1338 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1339}
1340
1341static inline unsigned long next_present_section_nr(unsigned long section_nr)
1342{
1343 while (++section_nr <= __highest_present_section_nr) {
1344 if (present_section_nr(section_nr))
1345 return section_nr;
1346 }
1347
1348 return -1;
1349}
1350
1351
1352
1353
1354
1355
1356#ifdef CONFIG_NUMA
1357#define pfn_to_nid(pfn) \
1358({ \
1359 unsigned long __pfn_to_nid_pfn = (pfn); \
1360 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1361})
1362#else
1363#define pfn_to_nid(pfn) (0)
1364#endif
1365
1366#define early_pfn_valid(pfn) pfn_valid(pfn)
1367void sparse_init(void);
1368#else
1369#define sparse_init() do {} while (0)
1370#define sparse_index_init(_sec, _nid) do {} while (0)
1371#define pfn_in_present_section pfn_valid
1372#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
1373#endif
1374
1375
1376
1377
1378
1379
1380struct mminit_pfnnid_cache {
1381 unsigned long last_start;
1382 unsigned long last_end;
1383 int last_nid;
1384};
1385
1386#ifndef early_pfn_valid
1387#define early_pfn_valid(pfn) (1)
1388#endif
1389
1390void memory_present(int nid, unsigned long start, unsigned long end);
1391
1392
1393
1394
1395
1396
1397
1398#ifdef CONFIG_HOLES_IN_ZONE
1399#define pfn_valid_within(pfn) pfn_valid(pfn)
1400#else
1401#define pfn_valid_within(pfn) (1)
1402#endif
1403
1404#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425bool memmap_valid_within(unsigned long pfn,
1426 struct page *page, struct zone *zone);
1427#else
1428static inline bool memmap_valid_within(unsigned long pfn,
1429 struct page *page, struct zone *zone)
1430{
1431 return true;
1432}
1433#endif
1434
1435#endif
1436#endif
1437#endif
1438