1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <linux/page-flags-layout.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38enum {
39 MIGRATE_UNMOVABLE,
40 MIGRATE_MOVABLE,
41 MIGRATE_RECLAIMABLE,
42 MIGRATE_PCPTYPES,
43 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
44#ifdef CONFIG_CMA
45
46
47
48
49
50
51
52
53
54
55
56
57
58 MIGRATE_CMA,
59#endif
60#ifdef CONFIG_MEMORY_ISOLATION
61 MIGRATE_ISOLATE,
62#endif
63 MIGRATE_TYPES
64};
65
66
67extern char * const migratetype_names[MIGRATE_TYPES];
68
69#ifdef CONFIG_CMA
70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
71# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
72#else
73# define is_migrate_cma(migratetype) false
74# define is_migrate_cma_page(_page) false
75#endif
76
77#define for_each_migratetype_order(order, type) \
78 for (order = 0; order < MAX_ORDER; order++) \
79 for (type = 0; type < MIGRATE_TYPES; type++)
80
81extern int page_group_by_mobility_disabled;
82
83#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
84#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
85
86#define get_pageblock_migratetype(page) \
87 get_pfnblock_flags_mask(page, page_to_pfn(page), \
88 PB_migrate_end, MIGRATETYPE_MASK)
89
90struct free_area {
91 struct list_head free_list[MIGRATE_TYPES];
92 unsigned long nr_free;
93};
94
95struct pglist_data;
96
97
98
99
100
101
102
103#if defined(CONFIG_SMP)
104struct zone_padding {
105 char x[0];
106} ____cacheline_internodealigned_in_smp;
107#define ZONE_PADDING(name) struct zone_padding name;
108#else
109#define ZONE_PADDING(name)
110#endif
111
112enum zone_stat_item {
113
114 NR_FREE_PAGES,
115 NR_ZONE_LRU_BASE,
116 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
117 NR_ZONE_ACTIVE_ANON,
118 NR_ZONE_INACTIVE_FILE,
119 NR_ZONE_ACTIVE_FILE,
120 NR_ZONE_UNEVICTABLE,
121 NR_ZONE_WRITE_PENDING,
122 NR_MLOCK,
123 NR_SLAB_RECLAIMABLE,
124 NR_SLAB_UNRECLAIMABLE,
125 NR_PAGETABLE,
126 NR_KERNEL_STACK_KB,
127
128 NR_BOUNCE,
129#if IS_ENABLED(CONFIG_ZSMALLOC)
130 NR_ZSPAGES,
131#endif
132#ifdef CONFIG_NUMA
133 NUMA_HIT,
134 NUMA_MISS,
135 NUMA_FOREIGN,
136 NUMA_INTERLEAVE_HIT,
137 NUMA_LOCAL,
138 NUMA_OTHER,
139#endif
140 NR_FREE_CMA_PAGES,
141 NR_VM_ZONE_STAT_ITEMS };
142
143enum node_stat_item {
144 NR_LRU_BASE,
145 NR_INACTIVE_ANON = NR_LRU_BASE,
146 NR_ACTIVE_ANON,
147 NR_INACTIVE_FILE,
148 NR_ACTIVE_FILE,
149 NR_UNEVICTABLE,
150 NR_ISOLATED_ANON,
151 NR_ISOLATED_FILE,
152 NR_PAGES_SCANNED,
153 WORKINGSET_REFAULT,
154 WORKINGSET_ACTIVATE,
155 WORKINGSET_NODERECLAIM,
156 NR_ANON_MAPPED,
157 NR_FILE_MAPPED,
158
159 NR_FILE_PAGES,
160 NR_FILE_DIRTY,
161 NR_WRITEBACK,
162 NR_WRITEBACK_TEMP,
163 NR_SHMEM,
164 NR_SHMEM_THPS,
165 NR_SHMEM_PMDMAPPED,
166 NR_ANON_THPS,
167 NR_UNSTABLE_NFS,
168 NR_VMSCAN_WRITE,
169 NR_VMSCAN_IMMEDIATE,
170 NR_DIRTIED,
171 NR_WRITTEN,
172 NR_VM_NODE_STAT_ITEMS
173};
174
175
176
177
178
179
180
181
182
183
184#define LRU_BASE 0
185#define LRU_ACTIVE 1
186#define LRU_FILE 2
187
188enum lru_list {
189 LRU_INACTIVE_ANON = LRU_BASE,
190 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
191 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
192 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
193 LRU_UNEVICTABLE,
194 NR_LRU_LISTS
195};
196
197#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
198
199#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
200
201static inline int is_file_lru(enum lru_list lru)
202{
203 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
204}
205
206static inline int is_active_lru(enum lru_list lru)
207{
208 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
209}
210
211struct zone_reclaim_stat {
212
213
214
215
216
217
218
219
220 unsigned long recent_rotated[2];
221 unsigned long recent_scanned[2];
222};
223
224struct lruvec {
225 struct list_head lists[NR_LRU_LISTS];
226 struct zone_reclaim_stat reclaim_stat;
227
228 atomic_long_t inactive_age;
229#ifdef CONFIG_MEMCG
230 struct pglist_data *pgdat;
231#endif
232};
233
234
235#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
236#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
237#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
238
239
240#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
241
242#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
243
244#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
245
246#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
247
248
249typedef unsigned __bitwise__ isolate_mode_t;
250
251enum zone_watermarks {
252 WMARK_MIN,
253 WMARK_LOW,
254 WMARK_HIGH,
255 NR_WMARK
256};
257
258#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
259#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
260#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
261
262struct per_cpu_pages {
263 int count;
264 int high;
265 int batch;
266
267
268 struct list_head lists[MIGRATE_PCPTYPES];
269};
270
271struct per_cpu_pageset {
272 struct per_cpu_pages pcp;
273#ifdef CONFIG_NUMA
274 s8 expire;
275#endif
276#ifdef CONFIG_SMP
277 s8 stat_threshold;
278 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
279#endif
280};
281
282struct per_cpu_nodestat {
283 s8 stat_threshold;
284 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
285};
286
287#endif
288
289enum zone_type {
290#ifdef CONFIG_ZONE_DMA
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309 ZONE_DMA,
310#endif
311#ifdef CONFIG_ZONE_DMA32
312
313
314
315
316
317 ZONE_DMA32,
318#endif
319
320
321
322
323
324 ZONE_NORMAL,
325#ifdef CONFIG_HIGHMEM
326
327
328
329
330
331
332
333
334 ZONE_HIGHMEM,
335#endif
336 ZONE_MOVABLE,
337#ifdef CONFIG_ZONE_DEVICE
338 ZONE_DEVICE,
339#endif
340 __MAX_NR_ZONES
341
342};
343
344#ifndef __GENERATING_BOUNDS_H
345
346struct zone {
347
348
349
350 unsigned long watermark[NR_WMARK];
351
352 unsigned long nr_reserved_highatomic;
353
354
355
356
357
358
359
360
361
362
363 long lowmem_reserve[MAX_NR_ZONES];
364
365#ifdef CONFIG_NUMA
366 int node;
367#endif
368 struct pglist_data *zone_pgdat;
369 struct per_cpu_pageset __percpu *pageset;
370
371#ifndef CONFIG_SPARSEMEM
372
373
374
375
376 unsigned long *pageblock_flags;
377#endif
378
379
380 unsigned long zone_start_pfn;
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423 unsigned long managed_pages;
424 unsigned long spanned_pages;
425 unsigned long present_pages;
426
427 const char *name;
428
429#ifdef CONFIG_MEMORY_ISOLATION
430
431
432
433
434
435 unsigned long nr_isolate_pageblock;
436#endif
437
438#ifdef CONFIG_MEMORY_HOTPLUG
439
440 seqlock_t span_seqlock;
441#endif
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467 wait_queue_head_t *wait_table;
468 unsigned long wait_table_hash_nr_entries;
469 unsigned long wait_table_bits;
470
471
472 ZONE_PADDING(_pad1_)
473
474
475 struct free_area free_area[MAX_ORDER];
476
477
478 unsigned long flags;
479
480
481 spinlock_t lock;
482
483
484 ZONE_PADDING(_pad2_)
485
486
487
488
489
490
491 unsigned long percpu_drift_mark;
492
493#if defined CONFIG_COMPACTION || defined CONFIG_CMA
494
495 unsigned long compact_cached_free_pfn;
496
497 unsigned long compact_cached_migrate_pfn[2];
498#endif
499
500#ifdef CONFIG_COMPACTION
501
502
503
504
505
506 unsigned int compact_considered;
507 unsigned int compact_defer_shift;
508 int compact_order_failed;
509#endif
510
511#if defined CONFIG_COMPACTION || defined CONFIG_CMA
512
513 bool compact_blockskip_flush;
514#endif
515
516 bool contiguous;
517
518 ZONE_PADDING(_pad3_)
519
520 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
521} ____cacheline_internodealigned_in_smp;
522
523enum pgdat_flags {
524 PGDAT_CONGESTED,
525
526
527 PGDAT_DIRTY,
528
529
530
531 PGDAT_WRITEBACK,
532
533
534 PGDAT_RECLAIM_LOCKED,
535};
536
537static inline unsigned long zone_end_pfn(const struct zone *zone)
538{
539 return zone->zone_start_pfn + zone->spanned_pages;
540}
541
542static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
543{
544 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
545}
546
547static inline bool zone_is_initialized(struct zone *zone)
548{
549 return !!zone->wait_table;
550}
551
552static inline bool zone_is_empty(struct zone *zone)
553{
554 return zone->spanned_pages == 0;
555}
556
557
558
559
560
561
562#define DEF_PRIORITY 12
563
564
565#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
566
567enum {
568 ZONELIST_FALLBACK,
569#ifdef CONFIG_NUMA
570
571
572
573
574 ZONELIST_NOFALLBACK,
575#endif
576 MAX_ZONELISTS
577};
578
579
580
581
582
583struct zoneref {
584 struct zone *zone;
585 int zone_idx;
586};
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602struct zonelist {
603 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
604};
605
606#ifndef CONFIG_DISCONTIGMEM
607
608extern struct page *mem_map;
609#endif
610
611
612
613
614
615
616
617
618
619
620
621
622struct bootmem_data;
623typedef struct pglist_data {
624 struct zone node_zones[MAX_NR_ZONES];
625 struct zonelist node_zonelists[MAX_ZONELISTS];
626 int nr_zones;
627#ifdef CONFIG_FLAT_NODE_MEM_MAP
628 struct page *node_mem_map;
629#ifdef CONFIG_PAGE_EXTENSION
630 struct page_ext *node_page_ext;
631#endif
632#endif
633#ifndef CONFIG_NO_BOOTMEM
634 struct bootmem_data *bdata;
635#endif
636#ifdef CONFIG_MEMORY_HOTPLUG
637
638
639
640
641
642
643
644
645
646
647 spinlock_t node_size_lock;
648#endif
649 unsigned long node_start_pfn;
650 unsigned long node_present_pages;
651 unsigned long node_spanned_pages;
652
653 int node_id;
654 wait_queue_head_t kswapd_wait;
655 wait_queue_head_t pfmemalloc_wait;
656 struct task_struct *kswapd;
657
658 int kswapd_order;
659 enum zone_type kswapd_classzone_idx;
660
661#ifdef CONFIG_COMPACTION
662 int kcompactd_max_order;
663 enum zone_type kcompactd_classzone_idx;
664 wait_queue_head_t kcompactd_wait;
665 struct task_struct *kcompactd;
666#endif
667#ifdef CONFIG_NUMA_BALANCING
668
669 spinlock_t numabalancing_migrate_lock;
670
671
672 unsigned long numabalancing_migrate_next_window;
673
674
675 unsigned long numabalancing_migrate_nr_pages;
676#endif
677
678
679
680
681 unsigned long totalreserve_pages;
682
683#ifdef CONFIG_NUMA
684
685
686
687 unsigned long min_unmapped_pages;
688 unsigned long min_slab_pages;
689#endif
690
691
692 ZONE_PADDING(_pad1_)
693 spinlock_t lru_lock;
694
695#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
696
697
698
699
700 unsigned long first_deferred_pfn;
701#endif
702
703#ifdef CONFIG_TRANSPARENT_HUGEPAGE
704 spinlock_t split_queue_lock;
705 struct list_head split_queue;
706 unsigned long split_queue_len;
707#endif
708
709
710 struct lruvec lruvec;
711
712
713
714
715
716 unsigned int inactive_ratio;
717
718 unsigned long flags;
719
720 ZONE_PADDING(_pad2_)
721
722
723 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
724 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
725} pg_data_t;
726
727#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
728#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
729#ifdef CONFIG_FLAT_NODE_MEM_MAP
730#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
731#else
732#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
733#endif
734#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
735
736#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
737#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
738static inline spinlock_t *zone_lru_lock(struct zone *zone)
739{
740 return &zone->zone_pgdat->lru_lock;
741}
742
743static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
744{
745 return &pgdat->lruvec;
746}
747
748static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
749{
750 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
751}
752
753static inline bool pgdat_is_empty(pg_data_t *pgdat)
754{
755 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
756}
757
758static inline int zone_id(const struct zone *zone)
759{
760 struct pglist_data *pgdat = zone->zone_pgdat;
761
762 return zone - pgdat->node_zones;
763}
764
765#ifdef CONFIG_ZONE_DEVICE
766static inline bool is_dev_zone(const struct zone *zone)
767{
768 return zone_id(zone) == ZONE_DEVICE;
769}
770#else
771static inline bool is_dev_zone(const struct zone *zone)
772{
773 return false;
774}
775#endif
776
777#include <linux/memory_hotplug.h>
778
779extern struct mutex zonelists_mutex;
780void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
781void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
782bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
783 int classzone_idx, unsigned int alloc_flags,
784 long free_pages);
785bool zone_watermark_ok(struct zone *z, unsigned int order,
786 unsigned long mark, int classzone_idx,
787 unsigned int alloc_flags);
788bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
789 unsigned long mark, int classzone_idx);
790enum memmap_context {
791 MEMMAP_EARLY,
792 MEMMAP_HOTPLUG,
793};
794extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
795 unsigned long size);
796
797extern void lruvec_init(struct lruvec *lruvec);
798
799static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
800{
801#ifdef CONFIG_MEMCG
802 return lruvec->pgdat;
803#else
804 return container_of(lruvec, struct pglist_data, lruvec);
805#endif
806}
807
808extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
809
810#ifdef CONFIG_HAVE_MEMORY_PRESENT
811void memory_present(int nid, unsigned long start, unsigned long end);
812#else
813static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
814#endif
815
816#ifdef CONFIG_HAVE_MEMORYLESS_NODES
817int local_memory_node(int node_id);
818#else
819static inline int local_memory_node(int node_id) { return node_id; };
820#endif
821
822#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
823unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
824#endif
825
826
827
828
829#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
830
831
832
833
834
835
836
837static inline bool managed_zone(struct zone *zone)
838{
839 return zone->managed_pages;
840}
841
842
843static inline bool populated_zone(struct zone *zone)
844{
845 return zone->present_pages;
846}
847
848extern int movable_zone;
849
850#ifdef CONFIG_HIGHMEM
851static inline int zone_movable_is_highmem(void)
852{
853#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
854 return movable_zone == ZONE_HIGHMEM;
855#else
856 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
857#endif
858}
859#endif
860
861static inline int is_highmem_idx(enum zone_type idx)
862{
863#ifdef CONFIG_HIGHMEM
864 return (idx == ZONE_HIGHMEM ||
865 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
866#else
867 return 0;
868#endif
869}
870
871
872
873
874
875
876
877static inline int is_highmem(struct zone *zone)
878{
879#ifdef CONFIG_HIGHMEM
880 return is_highmem_idx(zone_idx(zone));
881#else
882 return 0;
883#endif
884}
885
886
887struct ctl_table;
888int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
889 void __user *, size_t *, loff_t *);
890int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
891 void __user *, size_t *, loff_t *);
892extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
893int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
894 void __user *, size_t *, loff_t *);
895int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
896 void __user *, size_t *, loff_t *);
897int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
898 void __user *, size_t *, loff_t *);
899int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
900 void __user *, size_t *, loff_t *);
901
902extern int numa_zonelist_order_handler(struct ctl_table *, int,
903 void __user *, size_t *, loff_t *);
904extern char numa_zonelist_order[];
905#define NUMA_ZONELIST_ORDER_LEN 16
906
907#ifndef CONFIG_NEED_MULTIPLE_NODES
908
909extern struct pglist_data contig_page_data;
910#define NODE_DATA(nid) (&contig_page_data)
911#define NODE_MEM_MAP(nid) mem_map
912
913#else
914
915#include <asm/mmzone.h>
916
917#endif
918
919extern struct pglist_data *first_online_pgdat(void);
920extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
921extern struct zone *next_zone(struct zone *zone);
922
923
924
925
926
927#define for_each_online_pgdat(pgdat) \
928 for (pgdat = first_online_pgdat(); \
929 pgdat; \
930 pgdat = next_online_pgdat(pgdat))
931
932
933
934
935
936
937
938#define for_each_zone(zone) \
939 for (zone = (first_online_pgdat())->node_zones; \
940 zone; \
941 zone = next_zone(zone))
942
943#define for_each_populated_zone(zone) \
944 for (zone = (first_online_pgdat())->node_zones; \
945 zone; \
946 zone = next_zone(zone)) \
947 if (!populated_zone(zone)) \
948 ; \
949 else
950
951static inline struct zone *zonelist_zone(struct zoneref *zoneref)
952{
953 return zoneref->zone;
954}
955
956static inline int zonelist_zone_idx(struct zoneref *zoneref)
957{
958 return zoneref->zone_idx;
959}
960
961static inline int zonelist_node_idx(struct zoneref *zoneref)
962{
963#ifdef CONFIG_NUMA
964
965 return zoneref->zone->node;
966#else
967 return 0;
968#endif
969}
970
971struct zoneref *__next_zones_zonelist(struct zoneref *z,
972 enum zone_type highest_zoneidx,
973 nodemask_t *nodes);
974
975
976
977
978
979
980
981
982
983
984
985
986
987static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
988 enum zone_type highest_zoneidx,
989 nodemask_t *nodes)
990{
991 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
992 return z;
993 return __next_zones_zonelist(z, highest_zoneidx, nodes);
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1009 enum zone_type highest_zoneidx,
1010 nodemask_t *nodes)
1011{
1012 return next_zones_zonelist(zonelist->_zonerefs,
1013 highest_zoneidx, nodes);
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1028 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1029 zone; \
1030 z = next_zones_zonelist(++z, highidx, nodemask), \
1031 zone = zonelist_zone(z))
1032
1033#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1034 for (zone = z->zone; \
1035 zone; \
1036 z = next_zones_zonelist(++z, highidx, nodemask), \
1037 zone = zonelist_zone(z))
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1050 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1051
1052#ifdef CONFIG_SPARSEMEM
1053#include <asm/sparsemem.h>
1054#endif
1055
1056#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1057 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1058static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1059{
1060 return 0;
1061}
1062#endif
1063
1064#ifdef CONFIG_FLATMEM
1065#define pfn_to_nid(pfn) (0)
1066#endif
1067
1068#ifdef CONFIG_SPARSEMEM
1069
1070
1071
1072
1073
1074
1075
1076#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1077#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1078
1079#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1080
1081#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1082#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1083
1084#define SECTION_BLOCKFLAGS_BITS \
1085 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1086
1087#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1088#error Allocator MAX_ORDER exceeds SECTION_SIZE
1089#endif
1090
1091#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
1092#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
1093
1094#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1095#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1096
1097struct page;
1098struct page_ext;
1099struct mem_section {
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 unsigned long section_mem_map;
1113
1114
1115 unsigned long *pageblock_flags;
1116#ifdef CONFIG_PAGE_EXTENSION
1117
1118
1119
1120
1121 struct page_ext *page_ext;
1122 unsigned long pad;
1123#endif
1124
1125
1126
1127
1128};
1129
1130#ifdef CONFIG_SPARSEMEM_EXTREME
1131#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1132#else
1133#define SECTIONS_PER_ROOT 1
1134#endif
1135
1136#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1137#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1138#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1139
1140#ifdef CONFIG_SPARSEMEM_EXTREME
1141extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1142#else
1143extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1144#endif
1145
1146static inline struct mem_section *__nr_to_section(unsigned long nr)
1147{
1148 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1149 return NULL;
1150 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1151}
1152extern int __section_nr(struct mem_section* ms);
1153extern unsigned long usemap_size(void);
1154
1155
1156
1157
1158
1159
1160#define SECTION_MARKED_PRESENT (1UL<<0)
1161#define SECTION_HAS_MEM_MAP (1UL<<1)
1162#define SECTION_MAP_LAST_BIT (1UL<<2)
1163#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1164#define SECTION_NID_SHIFT 2
1165
1166static inline struct page *__section_mem_map_addr(struct mem_section *section)
1167{
1168 unsigned long map = section->section_mem_map;
1169 map &= SECTION_MAP_MASK;
1170 return (struct page *)map;
1171}
1172
1173static inline int present_section(struct mem_section *section)
1174{
1175 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1176}
1177
1178static inline int present_section_nr(unsigned long nr)
1179{
1180 return present_section(__nr_to_section(nr));
1181}
1182
1183static inline int valid_section(struct mem_section *section)
1184{
1185 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1186}
1187
1188static inline int valid_section_nr(unsigned long nr)
1189{
1190 return valid_section(__nr_to_section(nr));
1191}
1192
1193static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1194{
1195 return __nr_to_section(pfn_to_section_nr(pfn));
1196}
1197
1198#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1199static inline int pfn_valid(unsigned long pfn)
1200{
1201 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1202 return 0;
1203 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1204}
1205#endif
1206
1207static inline int pfn_present(unsigned long pfn)
1208{
1209 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1210 return 0;
1211 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1212}
1213
1214
1215
1216
1217
1218
1219#ifdef CONFIG_NUMA
1220#define pfn_to_nid(pfn) \
1221({ \
1222 unsigned long __pfn_to_nid_pfn = (pfn); \
1223 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1224})
1225#else
1226#define pfn_to_nid(pfn) (0)
1227#endif
1228
1229#define early_pfn_valid(pfn) pfn_valid(pfn)
1230void sparse_init(void);
1231#else
1232#define sparse_init() do {} while (0)
1233#define sparse_index_init(_sec, _nid) do {} while (0)
1234#endif
1235
1236
1237
1238
1239
1240
1241struct mminit_pfnnid_cache {
1242 unsigned long last_start;
1243 unsigned long last_end;
1244 int last_nid;
1245};
1246
1247#ifndef early_pfn_valid
1248#define early_pfn_valid(pfn) (1)
1249#endif
1250
1251void memory_present(int nid, unsigned long start, unsigned long end);
1252unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1253
1254
1255
1256
1257
1258
1259
1260#ifdef CONFIG_HOLES_IN_ZONE
1261#define pfn_valid_within(pfn) pfn_valid(pfn)
1262#else
1263#define pfn_valid_within(pfn) (1)
1264#endif
1265
1266#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282bool memmap_valid_within(unsigned long pfn,
1283 struct page *page, struct zone *zone);
1284#else
1285static inline bool memmap_valid_within(unsigned long pfn,
1286 struct page *page, struct zone *zone)
1287{
1288 return true;
1289}
1290#endif
1291
1292#endif
1293#endif
1294#endif
1295