1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <linux/page-flags-layout.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38enum migratetype {
39 MIGRATE_UNMOVABLE,
40 MIGRATE_MOVABLE,
41 MIGRATE_RECLAIMABLE,
42 MIGRATE_PCPTYPES,
43 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
44#ifdef CONFIG_CMA
45
46
47
48
49
50
51
52
53
54
55
56
57
58 MIGRATE_CMA,
59#endif
60#ifdef CONFIG_MEMORY_ISOLATION
61 MIGRATE_ISOLATE,
62#endif
63 MIGRATE_TYPES
64};
65
66
67extern char * const migratetype_names[MIGRATE_TYPES];
68
69#ifdef CONFIG_CMA
70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
71# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
72#else
73# define is_migrate_cma(migratetype) false
74# define is_migrate_cma_page(_page) false
75#endif
76
77static inline bool is_migrate_movable(int mt)
78{
79 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
80}
81
82#define for_each_migratetype_order(order, type) \
83 for (order = 0; order < MAX_ORDER; order++) \
84 for (type = 0; type < MIGRATE_TYPES; type++)
85
86extern int page_group_by_mobility_disabled;
87
88#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
89#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
90
91#define get_pageblock_migratetype(page) \
92 get_pfnblock_flags_mask(page, page_to_pfn(page), \
93 PB_migrate_end, MIGRATETYPE_MASK)
94
95struct free_area {
96 struct list_head free_list[MIGRATE_TYPES];
97 unsigned long nr_free;
98};
99
100struct pglist_data;
101
102
103
104
105
106
107
108#if defined(CONFIG_SMP)
109struct zone_padding {
110 char x[0];
111} ____cacheline_internodealigned_in_smp;
112#define ZONE_PADDING(name) struct zone_padding name;
113#else
114#define ZONE_PADDING(name)
115#endif
116
117enum zone_stat_item {
118
119 NR_FREE_PAGES,
120 NR_ZONE_LRU_BASE,
121 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
122 NR_ZONE_ACTIVE_ANON,
123 NR_ZONE_INACTIVE_FILE,
124 NR_ZONE_ACTIVE_FILE,
125 NR_ZONE_UNEVICTABLE,
126 NR_ZONE_WRITE_PENDING,
127 NR_MLOCK,
128 NR_SLAB_RECLAIMABLE,
129 NR_SLAB_UNRECLAIMABLE,
130 NR_PAGETABLE,
131 NR_KERNEL_STACK_KB,
132
133 NR_BOUNCE,
134#if IS_ENABLED(CONFIG_ZSMALLOC)
135 NR_ZSPAGES,
136#endif
137#ifdef CONFIG_NUMA
138 NUMA_HIT,
139 NUMA_MISS,
140 NUMA_FOREIGN,
141 NUMA_INTERLEAVE_HIT,
142 NUMA_LOCAL,
143 NUMA_OTHER,
144#endif
145 NR_FREE_CMA_PAGES,
146 NR_VM_ZONE_STAT_ITEMS };
147
148enum node_stat_item {
149 NR_LRU_BASE,
150 NR_INACTIVE_ANON = NR_LRU_BASE,
151 NR_ACTIVE_ANON,
152 NR_INACTIVE_FILE,
153 NR_ACTIVE_FILE,
154 NR_UNEVICTABLE,
155 NR_ISOLATED_ANON,
156 NR_ISOLATED_FILE,
157 WORKINGSET_REFAULT,
158 WORKINGSET_ACTIVATE,
159 WORKINGSET_NODERECLAIM,
160 NR_ANON_MAPPED,
161 NR_FILE_MAPPED,
162
163 NR_FILE_PAGES,
164 NR_FILE_DIRTY,
165 NR_WRITEBACK,
166 NR_WRITEBACK_TEMP,
167 NR_SHMEM,
168 NR_SHMEM_THPS,
169 NR_SHMEM_PMDMAPPED,
170 NR_ANON_THPS,
171 NR_UNSTABLE_NFS,
172 NR_VMSCAN_WRITE,
173 NR_VMSCAN_IMMEDIATE,
174 NR_DIRTIED,
175 NR_WRITTEN,
176 NR_VM_NODE_STAT_ITEMS
177};
178
179
180
181
182
183
184
185
186
187
188#define LRU_BASE 0
189#define LRU_ACTIVE 1
190#define LRU_FILE 2
191
192enum lru_list {
193 LRU_INACTIVE_ANON = LRU_BASE,
194 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
195 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
196 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
197 LRU_UNEVICTABLE,
198 NR_LRU_LISTS
199};
200
201#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
202
203#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
204
205static inline int is_file_lru(enum lru_list lru)
206{
207 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
208}
209
210static inline int is_active_lru(enum lru_list lru)
211{
212 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
213}
214
215struct zone_reclaim_stat {
216
217
218
219
220
221
222
223
224 unsigned long recent_rotated[2];
225 unsigned long recent_scanned[2];
226};
227
228struct lruvec {
229 struct list_head lists[NR_LRU_LISTS];
230 struct zone_reclaim_stat reclaim_stat;
231
232 atomic_long_t inactive_age;
233
234 unsigned long refaults;
235#ifdef CONFIG_MEMCG
236 struct pglist_data *pgdat;
237#endif
238};
239
240
241#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
242#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
243#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
244
245
246#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
247
248#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
249
250#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
251
252
253typedef unsigned __bitwise isolate_mode_t;
254
255enum zone_watermarks {
256 WMARK_MIN,
257 WMARK_LOW,
258 WMARK_HIGH,
259 NR_WMARK
260};
261
262#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
263#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
264#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
265
266struct per_cpu_pages {
267 int count;
268 int high;
269 int batch;
270
271
272 struct list_head lists[MIGRATE_PCPTYPES];
273};
274
275struct per_cpu_pageset {
276 struct per_cpu_pages pcp;
277#ifdef CONFIG_NUMA
278 s8 expire;
279#endif
280#ifdef CONFIG_SMP
281 s8 stat_threshold;
282 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
283#endif
284};
285
286struct per_cpu_nodestat {
287 s8 stat_threshold;
288 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
289};
290
291#endif
292
293enum zone_type {
294#ifdef CONFIG_ZONE_DMA
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313 ZONE_DMA,
314#endif
315#ifdef CONFIG_ZONE_DMA32
316
317
318
319
320
321 ZONE_DMA32,
322#endif
323
324
325
326
327
328 ZONE_NORMAL,
329#ifdef CONFIG_HIGHMEM
330
331
332
333
334
335
336
337
338 ZONE_HIGHMEM,
339#endif
340 ZONE_MOVABLE,
341#ifdef CONFIG_ZONE_DEVICE
342 ZONE_DEVICE,
343#endif
344 __MAX_NR_ZONES
345
346};
347
348#ifndef __GENERATING_BOUNDS_H
349
350struct zone {
351
352
353
354 unsigned long watermark[NR_WMARK];
355
356 unsigned long nr_reserved_highatomic;
357
358
359
360
361
362
363
364
365
366
367 long lowmem_reserve[MAX_NR_ZONES];
368
369#ifdef CONFIG_NUMA
370 int node;
371#endif
372 struct pglist_data *zone_pgdat;
373 struct per_cpu_pageset __percpu *pageset;
374
375#ifndef CONFIG_SPARSEMEM
376
377
378
379
380 unsigned long *pageblock_flags;
381#endif
382
383
384 unsigned long zone_start_pfn;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427 unsigned long managed_pages;
428 unsigned long spanned_pages;
429 unsigned long present_pages;
430
431 const char *name;
432
433#ifdef CONFIG_MEMORY_ISOLATION
434
435
436
437
438
439 unsigned long nr_isolate_pageblock;
440#endif
441
442#ifdef CONFIG_MEMORY_HOTPLUG
443
444 seqlock_t span_seqlock;
445#endif
446
447 int initialized;
448
449
450 ZONE_PADDING(_pad1_)
451
452
453 struct free_area free_area[MAX_ORDER];
454
455
456 unsigned long flags;
457
458
459 spinlock_t lock;
460
461
462 ZONE_PADDING(_pad2_)
463
464
465
466
467
468
469 unsigned long percpu_drift_mark;
470
471#if defined CONFIG_COMPACTION || defined CONFIG_CMA
472
473 unsigned long compact_cached_free_pfn;
474
475 unsigned long compact_cached_migrate_pfn[2];
476#endif
477
478#ifdef CONFIG_COMPACTION
479
480
481
482
483
484 unsigned int compact_considered;
485 unsigned int compact_defer_shift;
486 int compact_order_failed;
487#endif
488
489#if defined CONFIG_COMPACTION || defined CONFIG_CMA
490
491 bool compact_blockskip_flush;
492#endif
493
494 bool contiguous;
495
496 ZONE_PADDING(_pad3_)
497
498 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
499} ____cacheline_internodealigned_in_smp;
500
501enum pgdat_flags {
502 PGDAT_CONGESTED,
503
504
505 PGDAT_DIRTY,
506
507
508
509 PGDAT_WRITEBACK,
510
511
512 PGDAT_RECLAIM_LOCKED,
513};
514
515static inline unsigned long zone_end_pfn(const struct zone *zone)
516{
517 return zone->zone_start_pfn + zone->spanned_pages;
518}
519
520static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
521{
522 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
523}
524
525static inline bool zone_is_initialized(struct zone *zone)
526{
527 return zone->initialized;
528}
529
530static inline bool zone_is_empty(struct zone *zone)
531{
532 return zone->spanned_pages == 0;
533}
534
535
536
537
538
539
540#define DEF_PRIORITY 12
541
542
543#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
544
545enum {
546 ZONELIST_FALLBACK,
547#ifdef CONFIG_NUMA
548
549
550
551
552 ZONELIST_NOFALLBACK,
553#endif
554 MAX_ZONELISTS
555};
556
557
558
559
560
561struct zoneref {
562 struct zone *zone;
563 int zone_idx;
564};
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580struct zonelist {
581 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
582};
583
584#ifndef CONFIG_DISCONTIGMEM
585
586extern struct page *mem_map;
587#endif
588
589
590
591
592
593
594
595
596
597
598
599
600struct bootmem_data;
601typedef struct pglist_data {
602 struct zone node_zones[MAX_NR_ZONES];
603 struct zonelist node_zonelists[MAX_ZONELISTS];
604 int nr_zones;
605#ifdef CONFIG_FLAT_NODE_MEM_MAP
606 struct page *node_mem_map;
607#ifdef CONFIG_PAGE_EXTENSION
608 struct page_ext *node_page_ext;
609#endif
610#endif
611#ifndef CONFIG_NO_BOOTMEM
612 struct bootmem_data *bdata;
613#endif
614#ifdef CONFIG_MEMORY_HOTPLUG
615
616
617
618
619
620
621
622
623
624
625 spinlock_t node_size_lock;
626#endif
627 unsigned long node_start_pfn;
628 unsigned long node_present_pages;
629 unsigned long node_spanned_pages;
630
631 int node_id;
632 wait_queue_head_t kswapd_wait;
633 wait_queue_head_t pfmemalloc_wait;
634 struct task_struct *kswapd;
635
636 int kswapd_order;
637 enum zone_type kswapd_classzone_idx;
638
639 int kswapd_failures;
640
641#ifdef CONFIG_COMPACTION
642 int kcompactd_max_order;
643 enum zone_type kcompactd_classzone_idx;
644 wait_queue_head_t kcompactd_wait;
645 struct task_struct *kcompactd;
646#endif
647#ifdef CONFIG_NUMA_BALANCING
648
649 spinlock_t numabalancing_migrate_lock;
650
651
652 unsigned long numabalancing_migrate_next_window;
653
654
655 unsigned long numabalancing_migrate_nr_pages;
656#endif
657
658
659
660
661 unsigned long totalreserve_pages;
662
663#ifdef CONFIG_NUMA
664
665
666
667 unsigned long min_unmapped_pages;
668 unsigned long min_slab_pages;
669#endif
670
671
672 ZONE_PADDING(_pad1_)
673 spinlock_t lru_lock;
674
675#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
676
677
678
679
680 unsigned long first_deferred_pfn;
681 unsigned long static_init_size;
682#endif
683
684#ifdef CONFIG_TRANSPARENT_HUGEPAGE
685 spinlock_t split_queue_lock;
686 struct list_head split_queue;
687 unsigned long split_queue_len;
688#endif
689
690
691 struct lruvec lruvec;
692
693
694
695
696
697 unsigned int inactive_ratio;
698
699 unsigned long flags;
700
701 ZONE_PADDING(_pad2_)
702
703
704 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
705 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
706} pg_data_t;
707
708#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
709#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
710#ifdef CONFIG_FLAT_NODE_MEM_MAP
711#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
712#else
713#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
714#endif
715#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
716
717#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
718#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
719static inline spinlock_t *zone_lru_lock(struct zone *zone)
720{
721 return &zone->zone_pgdat->lru_lock;
722}
723
724static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
725{
726 return &pgdat->lruvec;
727}
728
729static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
730{
731 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
732}
733
734static inline bool pgdat_is_empty(pg_data_t *pgdat)
735{
736 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
737}
738
739static inline int zone_id(const struct zone *zone)
740{
741 struct pglist_data *pgdat = zone->zone_pgdat;
742
743 return zone - pgdat->node_zones;
744}
745
746#ifdef CONFIG_ZONE_DEVICE
747static inline bool is_dev_zone(const struct zone *zone)
748{
749 return zone_id(zone) == ZONE_DEVICE;
750}
751#else
752static inline bool is_dev_zone(const struct zone *zone)
753{
754 return false;
755}
756#endif
757
758#include <linux/memory_hotplug.h>
759
760extern struct mutex zonelists_mutex;
761void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
762void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
763bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
764 int classzone_idx, unsigned int alloc_flags,
765 long free_pages);
766bool zone_watermark_ok(struct zone *z, unsigned int order,
767 unsigned long mark, int classzone_idx,
768 unsigned int alloc_flags);
769bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
770 unsigned long mark, int classzone_idx);
771enum memmap_context {
772 MEMMAP_EARLY,
773 MEMMAP_HOTPLUG,
774};
775extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
776 unsigned long size);
777
778extern void lruvec_init(struct lruvec *lruvec);
779
780static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
781{
782#ifdef CONFIG_MEMCG
783 return lruvec->pgdat;
784#else
785 return container_of(lruvec, struct pglist_data, lruvec);
786#endif
787}
788
789extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
790
791#ifdef CONFIG_HAVE_MEMORY_PRESENT
792void memory_present(int nid, unsigned long start, unsigned long end);
793#else
794static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
795#endif
796
797#ifdef CONFIG_HAVE_MEMORYLESS_NODES
798int local_memory_node(int node_id);
799#else
800static inline int local_memory_node(int node_id) { return node_id; };
801#endif
802
803#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
804unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
805#endif
806
807
808
809
810#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
811
812
813
814
815
816
817
818static inline bool managed_zone(struct zone *zone)
819{
820 return zone->managed_pages;
821}
822
823
824static inline bool populated_zone(struct zone *zone)
825{
826 return zone->present_pages;
827}
828
829extern int movable_zone;
830
831#ifdef CONFIG_HIGHMEM
832static inline int zone_movable_is_highmem(void)
833{
834#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
835 return movable_zone == ZONE_HIGHMEM;
836#else
837 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
838#endif
839}
840#endif
841
842static inline int is_highmem_idx(enum zone_type idx)
843{
844#ifdef CONFIG_HIGHMEM
845 return (idx == ZONE_HIGHMEM ||
846 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
847#else
848 return 0;
849#endif
850}
851
852
853
854
855
856
857
858static inline int is_highmem(struct zone *zone)
859{
860#ifdef CONFIG_HIGHMEM
861 return is_highmem_idx(zone_idx(zone));
862#else
863 return 0;
864#endif
865}
866
867
868struct ctl_table;
869int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
870 void __user *, size_t *, loff_t *);
871int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
872 void __user *, size_t *, loff_t *);
873extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
874int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
875 void __user *, size_t *, loff_t *);
876int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
877 void __user *, size_t *, loff_t *);
878int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
879 void __user *, size_t *, loff_t *);
880int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
881 void __user *, size_t *, loff_t *);
882
883extern int numa_zonelist_order_handler(struct ctl_table *, int,
884 void __user *, size_t *, loff_t *);
885extern char numa_zonelist_order[];
886#define NUMA_ZONELIST_ORDER_LEN 16
887
888#ifndef CONFIG_NEED_MULTIPLE_NODES
889
890extern struct pglist_data contig_page_data;
891#define NODE_DATA(nid) (&contig_page_data)
892#define NODE_MEM_MAP(nid) mem_map
893
894#else
895
896#include <asm/mmzone.h>
897
898#endif
899
900extern struct pglist_data *first_online_pgdat(void);
901extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
902extern struct zone *next_zone(struct zone *zone);
903
904
905
906
907
908#define for_each_online_pgdat(pgdat) \
909 for (pgdat = first_online_pgdat(); \
910 pgdat; \
911 pgdat = next_online_pgdat(pgdat))
912
913
914
915
916
917
918
919#define for_each_zone(zone) \
920 for (zone = (first_online_pgdat())->node_zones; \
921 zone; \
922 zone = next_zone(zone))
923
924#define for_each_populated_zone(zone) \
925 for (zone = (first_online_pgdat())->node_zones; \
926 zone; \
927 zone = next_zone(zone)) \
928 if (!populated_zone(zone)) \
929 ; \
930 else
931
932static inline struct zone *zonelist_zone(struct zoneref *zoneref)
933{
934 return zoneref->zone;
935}
936
937static inline int zonelist_zone_idx(struct zoneref *zoneref)
938{
939 return zoneref->zone_idx;
940}
941
942static inline int zonelist_node_idx(struct zoneref *zoneref)
943{
944#ifdef CONFIG_NUMA
945
946 return zoneref->zone->node;
947#else
948 return 0;
949#endif
950}
951
952struct zoneref *__next_zones_zonelist(struct zoneref *z,
953 enum zone_type highest_zoneidx,
954 nodemask_t *nodes);
955
956
957
958
959
960
961
962
963
964
965
966
967
968static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
969 enum zone_type highest_zoneidx,
970 nodemask_t *nodes)
971{
972 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
973 return z;
974 return __next_zones_zonelist(z, highest_zoneidx, nodes);
975}
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
994 enum zone_type highest_zoneidx,
995 nodemask_t *nodes)
996{
997 return next_zones_zonelist(zonelist->_zonerefs,
998 highest_zoneidx, nodes);
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1013 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1014 zone; \
1015 z = next_zones_zonelist(++z, highidx, nodemask), \
1016 zone = zonelist_zone(z))
1017
1018#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1019 for (zone = z->zone; \
1020 zone; \
1021 z = next_zones_zonelist(++z, highidx, nodemask), \
1022 zone = zonelist_zone(z))
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1035 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1036
1037#ifdef CONFIG_SPARSEMEM
1038#include <asm/sparsemem.h>
1039#endif
1040
1041#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1042 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1043static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1044{
1045 return 0;
1046}
1047#endif
1048
1049#ifdef CONFIG_FLATMEM
1050#define pfn_to_nid(pfn) (0)
1051#endif
1052
1053#ifdef CONFIG_SPARSEMEM
1054
1055
1056
1057
1058
1059
1060
1061#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1062#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1063
1064#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1065
1066#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1067#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1068
1069#define SECTION_BLOCKFLAGS_BITS \
1070 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1071
1072#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1073#error Allocator MAX_ORDER exceeds SECTION_SIZE
1074#endif
1075
1076#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
1077#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
1078
1079#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1080#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1081
1082struct page;
1083struct page_ext;
1084struct mem_section {
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 unsigned long section_mem_map;
1098
1099
1100 unsigned long *pageblock_flags;
1101#ifdef CONFIG_PAGE_EXTENSION
1102
1103
1104
1105
1106 struct page_ext *page_ext;
1107 unsigned long pad;
1108#endif
1109
1110
1111
1112
1113};
1114
1115#ifdef CONFIG_SPARSEMEM_EXTREME
1116#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1117#else
1118#define SECTIONS_PER_ROOT 1
1119#endif
1120
1121#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1122#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1123#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1124
1125#ifdef CONFIG_SPARSEMEM_EXTREME
1126extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1127#else
1128extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1129#endif
1130
1131static inline struct mem_section *__nr_to_section(unsigned long nr)
1132{
1133 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1134 return NULL;
1135 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1136}
1137extern int __section_nr(struct mem_section* ms);
1138extern unsigned long usemap_size(void);
1139
1140
1141
1142
1143
1144
1145#define SECTION_MARKED_PRESENT (1UL<<0)
1146#define SECTION_HAS_MEM_MAP (1UL<<1)
1147#define SECTION_MAP_LAST_BIT (1UL<<2)
1148#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1149#define SECTION_NID_SHIFT 2
1150
1151static inline struct page *__section_mem_map_addr(struct mem_section *section)
1152{
1153 unsigned long map = section->section_mem_map;
1154 map &= SECTION_MAP_MASK;
1155 return (struct page *)map;
1156}
1157
1158static inline int present_section(struct mem_section *section)
1159{
1160 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1161}
1162
1163static inline int present_section_nr(unsigned long nr)
1164{
1165 return present_section(__nr_to_section(nr));
1166}
1167
1168static inline int valid_section(struct mem_section *section)
1169{
1170 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1171}
1172
1173static inline int valid_section_nr(unsigned long nr)
1174{
1175 return valid_section(__nr_to_section(nr));
1176}
1177
1178static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1179{
1180 return __nr_to_section(pfn_to_section_nr(pfn));
1181}
1182
1183#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1184static inline int pfn_valid(unsigned long pfn)
1185{
1186 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1187 return 0;
1188 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1189}
1190#endif
1191
1192static inline int pfn_present(unsigned long pfn)
1193{
1194 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1195 return 0;
1196 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1197}
1198
1199
1200
1201
1202
1203
1204#ifdef CONFIG_NUMA
1205#define pfn_to_nid(pfn) \
1206({ \
1207 unsigned long __pfn_to_nid_pfn = (pfn); \
1208 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1209})
1210#else
1211#define pfn_to_nid(pfn) (0)
1212#endif
1213
1214#define early_pfn_valid(pfn) pfn_valid(pfn)
1215void sparse_init(void);
1216#else
1217#define sparse_init() do {} while (0)
1218#define sparse_index_init(_sec, _nid) do {} while (0)
1219#endif
1220
1221
1222
1223
1224
1225
1226struct mminit_pfnnid_cache {
1227 unsigned long last_start;
1228 unsigned long last_end;
1229 int last_nid;
1230};
1231
1232#ifndef early_pfn_valid
1233#define early_pfn_valid(pfn) (1)
1234#endif
1235
1236void memory_present(int nid, unsigned long start, unsigned long end);
1237unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1238
1239
1240
1241
1242
1243
1244
1245#ifdef CONFIG_HOLES_IN_ZONE
1246#define pfn_valid_within(pfn) pfn_valid(pfn)
1247#else
1248#define pfn_valid_within(pfn) (1)
1249#endif
1250
1251#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267bool memmap_valid_within(unsigned long pfn,
1268 struct page *page, struct zone *zone);
1269#else
1270static inline bool memmap_valid_within(unsigned long pfn,
1271 struct page *page, struct zone *zone)
1272{
1273 return true;
1274}
1275#endif
1276
1277#endif
1278#endif
1279#endif
1280