1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <linux/bounds.h>
19#include <asm/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38#define MIGRATE_UNMOVABLE 0
39#define MIGRATE_RECLAIMABLE 1
40#define MIGRATE_MOVABLE 2
41#define MIGRATE_PCPTYPES 3
42#define MIGRATE_RESERVE 3
43#define MIGRATE_ISOLATE 4
44#define MIGRATE_TYPES 5
45
46#define for_each_migratetype_order(order, type) \
47 for (order = 0; order < MAX_ORDER; order++) \
48 for (type = 0; type < MIGRATE_TYPES; type++)
49
50extern int page_group_by_mobility_disabled;
51
52static inline int get_pageblock_migratetype(struct page *page)
53{
54 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
55}
56
57struct free_area {
58 struct list_head free_list[MIGRATE_TYPES];
59 unsigned long nr_free;
60};
61
62struct pglist_data;
63
64
65
66
67
68
69
70#if defined(CONFIG_SMP)
71struct zone_padding {
72 char x[0];
73} ____cacheline_internodealigned_in_smp;
74#define ZONE_PADDING(name) struct zone_padding name;
75#else
76#define ZONE_PADDING(name)
77#endif
78
79enum zone_stat_item {
80
81 NR_FREE_PAGES,
82 NR_LRU_BASE,
83 NR_INACTIVE_ANON = NR_LRU_BASE,
84 NR_ACTIVE_ANON,
85 NR_INACTIVE_FILE,
86 NR_ACTIVE_FILE,
87 NR_UNEVICTABLE,
88 NR_MLOCK,
89 NR_ANON_PAGES,
90 NR_FILE_MAPPED,
91
92 NR_FILE_PAGES,
93 NR_FILE_DIRTY,
94 NR_WRITEBACK,
95 NR_SLAB_RECLAIMABLE,
96 NR_SLAB_UNRECLAIMABLE,
97 NR_PAGETABLE,
98 NR_KERNEL_STACK,
99
100 NR_UNSTABLE_NFS,
101 NR_BOUNCE,
102 NR_VMSCAN_WRITE,
103 NR_WRITEBACK_TEMP,
104 NR_ISOLATED_ANON,
105 NR_ISOLATED_FILE,
106 NR_SHMEM,
107#ifdef CONFIG_NUMA
108 NUMA_HIT,
109 NUMA_MISS,
110 NUMA_FOREIGN,
111 NUMA_INTERLEAVE_HIT,
112 NUMA_LOCAL,
113 NUMA_OTHER,
114#endif
115 NR_VM_ZONE_STAT_ITEMS };
116
117
118
119
120
121
122
123
124
125
126#define LRU_BASE 0
127#define LRU_ACTIVE 1
128#define LRU_FILE 2
129
130enum lru_list {
131 LRU_INACTIVE_ANON = LRU_BASE,
132 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
133 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
134 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
135 LRU_UNEVICTABLE,
136 NR_LRU_LISTS
137};
138
139#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
140
141#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
142
143static inline int is_file_lru(enum lru_list l)
144{
145 return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
146}
147
148static inline int is_active_lru(enum lru_list l)
149{
150 return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
151}
152
153static inline int is_unevictable_lru(enum lru_list l)
154{
155 return (l == LRU_UNEVICTABLE);
156}
157
158enum zone_watermarks {
159 WMARK_MIN,
160 WMARK_LOW,
161 WMARK_HIGH,
162 NR_WMARK
163};
164
165#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
166#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
167#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
168
169struct per_cpu_pages {
170 int count;
171 int high;
172 int batch;
173
174
175 struct list_head lists[MIGRATE_PCPTYPES];
176};
177
178struct per_cpu_pageset {
179 struct per_cpu_pages pcp;
180#ifdef CONFIG_NUMA
181 s8 expire;
182#endif
183#ifdef CONFIG_SMP
184 s8 stat_threshold;
185 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
186#endif
187} ____cacheline_aligned_in_smp;
188
189#ifdef CONFIG_NUMA
190#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
191#else
192#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
193#endif
194
195#endif
196
197enum zone_type {
198#ifdef CONFIG_ZONE_DMA
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217 ZONE_DMA,
218#endif
219#ifdef CONFIG_ZONE_DMA32
220
221
222
223
224
225 ZONE_DMA32,
226#endif
227
228
229
230
231
232 ZONE_NORMAL,
233#ifdef CONFIG_HIGHMEM
234
235
236
237
238
239
240
241
242 ZONE_HIGHMEM,
243#endif
244 ZONE_MOVABLE,
245 __MAX_NR_ZONES
246};
247
248#ifndef __GENERATING_BOUNDS_H
249
250
251
252
253
254
255
256
257
258#if MAX_NR_ZONES < 2
259#define ZONES_SHIFT 0
260#elif MAX_NR_ZONES <= 2
261#define ZONES_SHIFT 1
262#elif MAX_NR_ZONES <= 4
263#define ZONES_SHIFT 2
264#else
265#error ZONES_SHIFT -- too many zones configured adjust calculation
266#endif
267
268struct zone_reclaim_stat {
269
270
271
272
273
274
275
276
277 unsigned long recent_rotated[2];
278 unsigned long recent_scanned[2];
279
280
281
282
283 unsigned long nr_saved_scan[NR_LRU_LISTS];
284};
285
286struct zone {
287
288
289
290 unsigned long watermark[NR_WMARK];
291
292
293
294
295
296
297
298
299
300 unsigned long lowmem_reserve[MAX_NR_ZONES];
301
302#ifdef CONFIG_NUMA
303 int node;
304
305
306
307 unsigned long min_unmapped_pages;
308 unsigned long min_slab_pages;
309 struct per_cpu_pageset *pageset[NR_CPUS];
310#else
311 struct per_cpu_pageset pageset[NR_CPUS];
312#endif
313
314
315
316 spinlock_t lock;
317#ifdef CONFIG_MEMORY_HOTPLUG
318
319 seqlock_t span_seqlock;
320#endif
321 struct free_area free_area[MAX_ORDER];
322
323#ifndef CONFIG_SPARSEMEM
324
325
326
327
328 unsigned long *pageblock_flags;
329#endif
330
331
332 ZONE_PADDING(_pad1_)
333
334
335 spinlock_t lru_lock;
336 struct zone_lru {
337 struct list_head list;
338 } lru[NR_LRU_LISTS];
339
340 struct zone_reclaim_stat reclaim_stat;
341
342 unsigned long pages_scanned;
343 unsigned long flags;
344
345
346 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361 int prev_priority;
362
363
364
365
366
367 unsigned int inactive_ratio;
368
369
370 ZONE_PADDING(_pad2_)
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397 wait_queue_head_t * wait_table;
398 unsigned long wait_table_hash_nr_entries;
399 unsigned long wait_table_bits;
400
401
402
403
404 struct pglist_data *zone_pgdat;
405
406 unsigned long zone_start_pfn;
407
408
409
410
411
412
413
414
415
416
417
418 unsigned long spanned_pages;
419 unsigned long present_pages;
420
421
422
423
424 const char *name;
425} ____cacheline_internodealigned_in_smp;
426
427typedef enum {
428 ZONE_ALL_UNRECLAIMABLE,
429 ZONE_RECLAIM_LOCKED,
430 ZONE_OOM_LOCKED,
431} zone_flags_t;
432
433static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
434{
435 set_bit(flag, &zone->flags);
436}
437
438static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
439{
440 return test_and_set_bit(flag, &zone->flags);
441}
442
443static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
444{
445 clear_bit(flag, &zone->flags);
446}
447
448static inline int zone_is_all_unreclaimable(const struct zone *zone)
449{
450 return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
451}
452
453static inline int zone_is_reclaim_locked(const struct zone *zone)
454{
455 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
456}
457
458static inline int zone_is_oom_locked(const struct zone *zone)
459{
460 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
461}
462
463
464
465
466
467
468#define DEF_PRIORITY 12
469
470
471#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
472
473#ifdef CONFIG_NUMA
474
475
476
477
478
479
480
481
482#define MAX_ZONELISTS 2
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544struct zonelist_cache {
545 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];
546 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);
547 unsigned long last_full_zap;
548};
549#else
550#define MAX_ZONELISTS 1
551struct zonelist_cache;
552#endif
553
554
555
556
557
558struct zoneref {
559 struct zone *zone;
560 int zone_idx;
561};
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580struct zonelist {
581 struct zonelist_cache *zlcache_ptr;
582 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
583#ifdef CONFIG_NUMA
584 struct zonelist_cache zlcache;
585#endif
586};
587
588#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
589struct node_active_region {
590 unsigned long start_pfn;
591 unsigned long end_pfn;
592 int nid;
593};
594#endif
595
596#ifndef CONFIG_DISCONTIGMEM
597
598extern struct page *mem_map;
599#endif
600
601
602
603
604
605
606
607
608
609
610
611
612struct bootmem_data;
613typedef struct pglist_data {
614 struct zone node_zones[MAX_NR_ZONES];
615 struct zonelist node_zonelists[MAX_ZONELISTS];
616 int nr_zones;
617#ifdef CONFIG_FLAT_NODE_MEM_MAP
618 struct page *node_mem_map;
619#ifdef CONFIG_CGROUP_MEM_RES_CTLR
620 struct page_cgroup *node_page_cgroup;
621#endif
622#endif
623 struct bootmem_data *bdata;
624#ifdef CONFIG_MEMORY_HOTPLUG
625
626
627
628
629
630
631
632 spinlock_t node_size_lock;
633#endif
634 unsigned long node_start_pfn;
635 unsigned long node_present_pages;
636 unsigned long node_spanned_pages;
637
638 int node_id;
639 wait_queue_head_t kswapd_wait;
640 struct task_struct *kswapd;
641 int kswapd_max_order;
642} pg_data_t;
643
644#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
645#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
646#ifdef CONFIG_FLAT_NODE_MEM_MAP
647#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
648#else
649#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
650#endif
651#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
652
653#include <linux/memory_hotplug.h>
654
655void get_zone_counts(unsigned long *active, unsigned long *inactive,
656 unsigned long *free);
657void build_all_zonelists(void);
658void wakeup_kswapd(struct zone *zone, int order);
659int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
660 int classzone_idx, int alloc_flags);
661enum memmap_context {
662 MEMMAP_EARLY,
663 MEMMAP_HOTPLUG,
664};
665extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
666 unsigned long size,
667 enum memmap_context context);
668
669#ifdef CONFIG_HAVE_MEMORY_PRESENT
670void memory_present(int nid, unsigned long start, unsigned long end);
671#else
672static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
673#endif
674
675#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
676unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
677#endif
678
679
680
681
682#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
683
684static inline int populated_zone(struct zone *zone)
685{
686 return (!!zone->present_pages);
687}
688
689extern int movable_zone;
690
691static inline int zone_movable_is_highmem(void)
692{
693#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
694 return movable_zone == ZONE_HIGHMEM;
695#else
696 return 0;
697#endif
698}
699
700static inline int is_highmem_idx(enum zone_type idx)
701{
702#ifdef CONFIG_HIGHMEM
703 return (idx == ZONE_HIGHMEM ||
704 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
705#else
706 return 0;
707#endif
708}
709
710static inline int is_normal_idx(enum zone_type idx)
711{
712 return (idx == ZONE_NORMAL);
713}
714
715
716
717
718
719
720
721static inline int is_highmem(struct zone *zone)
722{
723#ifdef CONFIG_HIGHMEM
724 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
725 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
726 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
727 zone_movable_is_highmem());
728#else
729 return 0;
730#endif
731}
732
733static inline int is_normal(struct zone *zone)
734{
735 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
736}
737
738static inline int is_dma32(struct zone *zone)
739{
740#ifdef CONFIG_ZONE_DMA32
741 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
742#else
743 return 0;
744#endif
745}
746
747static inline int is_dma(struct zone *zone)
748{
749#ifdef CONFIG_ZONE_DMA
750 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
751#else
752 return 0;
753#endif
754}
755
756
757struct ctl_table;
758int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
759 void __user *, size_t *, loff_t *);
760extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
761int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
762 void __user *, size_t *, loff_t *);
763int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
764 void __user *, size_t *, loff_t *);
765int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
766 void __user *, size_t *, loff_t *);
767int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
768 void __user *, size_t *, loff_t *);
769
770extern int numa_zonelist_order_handler(struct ctl_table *, int,
771 void __user *, size_t *, loff_t *);
772extern char numa_zonelist_order[];
773#define NUMA_ZONELIST_ORDER_LEN 16
774
775#ifndef CONFIG_NEED_MULTIPLE_NODES
776
777extern struct pglist_data contig_page_data;
778#define NODE_DATA(nid) (&contig_page_data)
779#define NODE_MEM_MAP(nid) mem_map
780
781#else
782
783#include <asm/mmzone.h>
784
785#endif
786
787extern struct pglist_data *first_online_pgdat(void);
788extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
789extern struct zone *next_zone(struct zone *zone);
790
791
792
793
794
795#define for_each_online_pgdat(pgdat) \
796 for (pgdat = first_online_pgdat(); \
797 pgdat; \
798 pgdat = next_online_pgdat(pgdat))
799
800
801
802
803
804
805
806#define for_each_zone(zone) \
807 for (zone = (first_online_pgdat())->node_zones; \
808 zone; \
809 zone = next_zone(zone))
810
811#define for_each_populated_zone(zone) \
812 for (zone = (first_online_pgdat())->node_zones; \
813 zone; \
814 zone = next_zone(zone)) \
815 if (!populated_zone(zone)) \
816 ; \
817 else
818
819static inline struct zone *zonelist_zone(struct zoneref *zoneref)
820{
821 return zoneref->zone;
822}
823
824static inline int zonelist_zone_idx(struct zoneref *zoneref)
825{
826 return zoneref->zone_idx;
827}
828
829static inline int zonelist_node_idx(struct zoneref *zoneref)
830{
831#ifdef CONFIG_NUMA
832
833 return zoneref->zone->node;
834#else
835 return 0;
836#endif
837}
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852struct zoneref *next_zones_zonelist(struct zoneref *z,
853 enum zone_type highest_zoneidx,
854 nodemask_t *nodes,
855 struct zone **zone);
856
857
858
859
860
861
862
863
864
865
866
867
868
869static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
870 enum zone_type highest_zoneidx,
871 nodemask_t *nodes,
872 struct zone **zone)
873{
874 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
875 zone);
876}
877
878
879
880
881
882
883
884
885
886
887
888
889#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
890 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
891 zone; \
892 z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
893
894
895
896
897
898
899
900
901
902
903#define for_each_zone_zonelist(zone, z, zlist, highidx) \
904 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
905
906#ifdef CONFIG_SPARSEMEM
907#include <asm/sparsemem.h>
908#endif
909
910#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
911 !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
912static inline unsigned long early_pfn_to_nid(unsigned long pfn)
913{
914 return 0;
915}
916#endif
917
918#ifdef CONFIG_FLATMEM
919#define pfn_to_nid(pfn) (0)
920#endif
921
922#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
923#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
924
925#ifdef CONFIG_SPARSEMEM
926
927
928
929
930
931
932
933#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
934
935#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
936#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
937
938#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
939
940#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
941#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
942
943#define SECTION_BLOCKFLAGS_BITS \
944 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
945
946#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
947#error Allocator MAX_ORDER exceeds SECTION_SIZE
948#endif
949
950struct page;
951struct page_cgroup;
952struct mem_section {
953
954
955
956
957
958
959
960
961
962
963
964
965 unsigned long section_mem_map;
966
967
968 unsigned long *pageblock_flags;
969#ifdef CONFIG_CGROUP_MEM_RES_CTLR
970
971
972
973
974 struct page_cgroup *page_cgroup;
975 unsigned long pad;
976#endif
977};
978
979#ifdef CONFIG_SPARSEMEM_EXTREME
980#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
981#else
982#define SECTIONS_PER_ROOT 1
983#endif
984
985#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
986#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
987#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
988
989#ifdef CONFIG_SPARSEMEM_EXTREME
990extern struct mem_section *mem_section[NR_SECTION_ROOTS];
991#else
992extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
993#endif
994
995static inline struct mem_section *__nr_to_section(unsigned long nr)
996{
997 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
998 return NULL;
999 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1000}
1001extern int __section_nr(struct mem_section* ms);
1002extern unsigned long usemap_size(void);
1003
1004
1005
1006
1007
1008
1009#define SECTION_MARKED_PRESENT (1UL<<0)
1010#define SECTION_HAS_MEM_MAP (1UL<<1)
1011#define SECTION_MAP_LAST_BIT (1UL<<2)
1012#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1013#define SECTION_NID_SHIFT 2
1014
1015static inline struct page *__section_mem_map_addr(struct mem_section *section)
1016{
1017 unsigned long map = section->section_mem_map;
1018 map &= SECTION_MAP_MASK;
1019 return (struct page *)map;
1020}
1021
1022static inline int present_section(struct mem_section *section)
1023{
1024 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1025}
1026
1027static inline int present_section_nr(unsigned long nr)
1028{
1029 return present_section(__nr_to_section(nr));
1030}
1031
1032static inline int valid_section(struct mem_section *section)
1033{
1034 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1035}
1036
1037static inline int valid_section_nr(unsigned long nr)
1038{
1039 return valid_section(__nr_to_section(nr));
1040}
1041
1042static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1043{
1044 return __nr_to_section(pfn_to_section_nr(pfn));
1045}
1046
1047static inline int pfn_valid(unsigned long pfn)
1048{
1049 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1050 return 0;
1051 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1052}
1053
1054static inline int pfn_present(unsigned long pfn)
1055{
1056 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1057 return 0;
1058 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1059}
1060
1061
1062
1063
1064
1065
1066#ifdef CONFIG_NUMA
1067#define pfn_to_nid(pfn) \
1068({ \
1069 unsigned long __pfn_to_nid_pfn = (pfn); \
1070 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1071})
1072#else
1073#define pfn_to_nid(pfn) (0)
1074#endif
1075
1076#define early_pfn_valid(pfn) pfn_valid(pfn)
1077void sparse_init(void);
1078#else
1079#define sparse_init() do {} while (0)
1080#define sparse_index_init(_sec, _nid) do {} while (0)
1081#endif
1082
1083#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1084bool early_pfn_in_nid(unsigned long pfn, int nid);
1085#else
1086#define early_pfn_in_nid(pfn, nid) (1)
1087#endif
1088
1089#ifndef early_pfn_valid
1090#define early_pfn_valid(pfn) (1)
1091#endif
1092
1093void memory_present(int nid, unsigned long start, unsigned long end);
1094unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1095
1096
1097
1098
1099
1100
1101
1102#ifdef CONFIG_HOLES_IN_ZONE
1103#define pfn_valid_within(pfn) pfn_valid(pfn)
1104#else
1105#define pfn_valid_within(pfn) (1)
1106#endif
1107
1108#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124int memmap_valid_within(unsigned long pfn,
1125 struct page *page, struct zone *zone);
1126#else
1127static inline int memmap_valid_within(unsigned long pfn,
1128 struct page *page, struct zone *zone)
1129{
1130 return 1;
1131}
1132#endif
1133
1134#endif
1135#endif
1136#endif
1137