1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <generated/bounds.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38#define MIGRATE_UNMOVABLE 0
39#define MIGRATE_RECLAIMABLE 1
40#define MIGRATE_MOVABLE 2
41#define MIGRATE_PCPTYPES 3
42#define MIGRATE_RESERVE 3
43#define MIGRATE_ISOLATE 4
44#define MIGRATE_TYPES 5
45
46#define for_each_migratetype_order(order, type) \
47 for (order = 0; order < MAX_ORDER; order++) \
48 for (type = 0; type < MIGRATE_TYPES; type++)
49
50extern int page_group_by_mobility_disabled;
51
52static inline int get_pageblock_migratetype(struct page *page)
53{
54 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
55}
56
57struct free_area {
58 struct list_head free_list[MIGRATE_TYPES];
59 unsigned long nr_free;
60};
61
62struct pglist_data;
63
64
65
66
67
68
69
70#if defined(CONFIG_SMP)
71struct zone_padding {
72 char x[0];
73} ____cacheline_internodealigned_in_smp;
74#define ZONE_PADDING(name) struct zone_padding name;
75#else
76#define ZONE_PADDING(name)
77#endif
78
79enum zone_stat_item {
80
81 NR_FREE_PAGES,
82 NR_LRU_BASE,
83 NR_INACTIVE_ANON = NR_LRU_BASE,
84 NR_ACTIVE_ANON,
85 NR_INACTIVE_FILE,
86 NR_ACTIVE_FILE,
87 NR_UNEVICTABLE,
88 NR_MLOCK,
89 NR_ANON_PAGES,
90 NR_FILE_MAPPED,
91
92 NR_FILE_PAGES,
93 NR_FILE_DIRTY,
94 NR_WRITEBACK,
95 NR_SLAB_RECLAIMABLE,
96 NR_SLAB_UNRECLAIMABLE,
97 NR_PAGETABLE,
98 NR_KERNEL_STACK,
99
100 NR_UNSTABLE_NFS,
101 NR_BOUNCE,
102 NR_VMSCAN_WRITE,
103 NR_WRITEBACK_TEMP,
104 NR_ISOLATED_ANON,
105 NR_ISOLATED_FILE,
106 NR_SHMEM,
107 NR_DIRTIED,
108 NR_WRITTEN,
109#ifdef CONFIG_NUMA
110 NUMA_HIT,
111 NUMA_MISS,
112 NUMA_FOREIGN,
113 NUMA_INTERLEAVE_HIT,
114 NUMA_LOCAL,
115 NUMA_OTHER,
116#endif
117 NR_ANON_TRANSPARENT_HUGEPAGES,
118 NR_VM_ZONE_STAT_ITEMS };
119
120
121
122
123
124
125
126
127
128
129#define LRU_BASE 0
130#define LRU_ACTIVE 1
131#define LRU_FILE 2
132
133enum lru_list {
134 LRU_INACTIVE_ANON = LRU_BASE,
135 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
136 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
137 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
138 LRU_UNEVICTABLE,
139 NR_LRU_LISTS
140};
141
142#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
143
144#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
145
146static inline int is_file_lru(enum lru_list l)
147{
148 return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
149}
150
151static inline int is_active_lru(enum lru_list l)
152{
153 return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
154}
155
156static inline int is_unevictable_lru(enum lru_list l)
157{
158 return (l == LRU_UNEVICTABLE);
159}
160
161
162#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
163#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
164#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
165#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
166
167enum zone_watermarks {
168 WMARK_MIN,
169 WMARK_LOW,
170 WMARK_HIGH,
171 NR_WMARK
172};
173
174#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
175#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
176#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
177
178struct per_cpu_pages {
179 int count;
180 int high;
181 int batch;
182
183
184 struct list_head lists[MIGRATE_PCPTYPES];
185};
186
187struct per_cpu_pageset {
188 struct per_cpu_pages pcp;
189#ifdef CONFIG_NUMA
190 s8 expire;
191#endif
192#ifdef CONFIG_SMP
193 s8 stat_threshold;
194 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
195#endif
196};
197
198#endif
199
200enum zone_type {
201#ifdef CONFIG_ZONE_DMA
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220 ZONE_DMA,
221#endif
222#ifdef CONFIG_ZONE_DMA32
223
224
225
226
227
228 ZONE_DMA32,
229#endif
230
231
232
233
234
235 ZONE_NORMAL,
236#ifdef CONFIG_HIGHMEM
237
238
239
240
241
242
243
244
245 ZONE_HIGHMEM,
246#endif
247 ZONE_MOVABLE,
248 __MAX_NR_ZONES
249};
250
251#ifndef __GENERATING_BOUNDS_H
252
253
254
255
256
257
258
259
260
261#if MAX_NR_ZONES < 2
262#define ZONES_SHIFT 0
263#elif MAX_NR_ZONES <= 2
264#define ZONES_SHIFT 1
265#elif MAX_NR_ZONES <= 4
266#define ZONES_SHIFT 2
267#else
268#error ZONES_SHIFT -- too many zones configured adjust calculation
269#endif
270
271struct zone_reclaim_stat {
272
273
274
275
276
277
278
279
280 unsigned long recent_rotated[2];
281 unsigned long recent_scanned[2];
282};
283
284struct zone {
285
286
287
288 unsigned long watermark[NR_WMARK];
289
290
291
292
293
294
295 unsigned long percpu_drift_mark;
296
297
298
299
300
301
302
303
304
305 unsigned long lowmem_reserve[MAX_NR_ZONES];
306
307#ifdef CONFIG_NUMA
308 int node;
309
310
311
312 unsigned long min_unmapped_pages;
313 unsigned long min_slab_pages;
314#endif
315 struct per_cpu_pageset __percpu *pageset;
316
317
318
319 spinlock_t lock;
320 int all_unreclaimable;
321#ifdef CONFIG_MEMORY_HOTPLUG
322
323 seqlock_t span_seqlock;
324#endif
325 struct free_area free_area[MAX_ORDER];
326
327#ifndef CONFIG_SPARSEMEM
328
329
330
331
332 unsigned long *pageblock_flags;
333#endif
334
335#ifdef CONFIG_COMPACTION
336
337
338
339
340
341 unsigned int compact_considered;
342 unsigned int compact_defer_shift;
343#endif
344
345 ZONE_PADDING(_pad1_)
346
347
348 spinlock_t lru_lock;
349 struct zone_lru {
350 struct list_head list;
351 } lru[NR_LRU_LISTS];
352
353 struct zone_reclaim_stat reclaim_stat;
354
355 unsigned long pages_scanned;
356 unsigned long flags;
357
358
359 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
360
361
362
363
364
365 unsigned int inactive_ratio;
366
367
368 ZONE_PADDING(_pad2_)
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395 wait_queue_head_t * wait_table;
396 unsigned long wait_table_hash_nr_entries;
397 unsigned long wait_table_bits;
398
399
400
401
402 struct pglist_data *zone_pgdat;
403
404 unsigned long zone_start_pfn;
405
406
407
408
409
410
411
412
413
414
415
416 unsigned long spanned_pages;
417 unsigned long present_pages;
418
419
420
421
422 const char *name;
423} ____cacheline_internodealigned_in_smp;
424
425typedef enum {
426 ZONE_RECLAIM_LOCKED,
427 ZONE_OOM_LOCKED,
428 ZONE_CONGESTED,
429
430
431} zone_flags_t;
432
433static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
434{
435 set_bit(flag, &zone->flags);
436}
437
438static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
439{
440 return test_and_set_bit(flag, &zone->flags);
441}
442
443static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
444{
445 clear_bit(flag, &zone->flags);
446}
447
448static inline int zone_is_reclaim_congested(const struct zone *zone)
449{
450 return test_bit(ZONE_CONGESTED, &zone->flags);
451}
452
453static inline int zone_is_reclaim_locked(const struct zone *zone)
454{
455 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
456}
457
458static inline int zone_is_oom_locked(const struct zone *zone)
459{
460 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
461}
462
463
464
465
466
467
468#define DEF_PRIORITY 12
469
470
471#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
472
473#ifdef CONFIG_NUMA
474
475
476
477
478
479
480
481
482#define MAX_ZONELISTS 2
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544struct zonelist_cache {
545 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];
546 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);
547 unsigned long last_full_zap;
548};
549#else
550#define MAX_ZONELISTS 1
551struct zonelist_cache;
552#endif
553
554
555
556
557
558struct zoneref {
559 struct zone *zone;
560 int zone_idx;
561};
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580struct zonelist {
581 struct zonelist_cache *zlcache_ptr;
582 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
583#ifdef CONFIG_NUMA
584 struct zonelist_cache zlcache;
585#endif
586};
587
588#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
589struct node_active_region {
590 unsigned long start_pfn;
591 unsigned long end_pfn;
592 int nid;
593};
594#endif
595
596#ifndef CONFIG_DISCONTIGMEM
597
598extern struct page *mem_map;
599#endif
600
601
602
603
604
605
606
607
608
609
610
611
612struct bootmem_data;
613typedef struct pglist_data {
614 struct zone node_zones[MAX_NR_ZONES];
615 struct zonelist node_zonelists[MAX_ZONELISTS];
616 int nr_zones;
617#ifdef CONFIG_FLAT_NODE_MEM_MAP
618 struct page *node_mem_map;
619#ifdef CONFIG_CGROUP_MEM_RES_CTLR
620 struct page_cgroup *node_page_cgroup;
621#endif
622#endif
623#ifndef CONFIG_NO_BOOTMEM
624 struct bootmem_data *bdata;
625#endif
626#ifdef CONFIG_MEMORY_HOTPLUG
627
628
629
630
631
632
633
634 spinlock_t node_size_lock;
635#endif
636 unsigned long node_start_pfn;
637 unsigned long node_present_pages;
638 unsigned long node_spanned_pages;
639
640 int node_id;
641 wait_queue_head_t kswapd_wait;
642 struct task_struct *kswapd;
643 int kswapd_max_order;
644 enum zone_type classzone_idx;
645} pg_data_t;
646
647#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
648#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
649#ifdef CONFIG_FLAT_NODE_MEM_MAP
650#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
651#else
652#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
653#endif
654#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
655
656#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
657
658#define node_end_pfn(nid) ({\
659 pg_data_t *__pgdat = NODE_DATA(nid);\
660 __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
661})
662
663#include <linux/memory_hotplug.h>
664
665extern struct mutex zonelists_mutex;
666void build_all_zonelists(void *data);
667void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
668bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
669 int classzone_idx, int alloc_flags);
670bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
671 int classzone_idx, int alloc_flags);
672enum memmap_context {
673 MEMMAP_EARLY,
674 MEMMAP_HOTPLUG,
675};
676extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
677 unsigned long size,
678 enum memmap_context context);
679
680#ifdef CONFIG_HAVE_MEMORY_PRESENT
681void memory_present(int nid, unsigned long start, unsigned long end);
682#else
683static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
684#endif
685
686#ifdef CONFIG_HAVE_MEMORYLESS_NODES
687int local_memory_node(int node_id);
688#else
689static inline int local_memory_node(int node_id) { return node_id; };
690#endif
691
692#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
693unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
694#endif
695
696
697
698
699#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
700
701static inline int populated_zone(struct zone *zone)
702{
703 return (!!zone->present_pages);
704}
705
706extern int movable_zone;
707
708static inline int zone_movable_is_highmem(void)
709{
710#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
711 return movable_zone == ZONE_HIGHMEM;
712#else
713 return 0;
714#endif
715}
716
717static inline int is_highmem_idx(enum zone_type idx)
718{
719#ifdef CONFIG_HIGHMEM
720 return (idx == ZONE_HIGHMEM ||
721 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
722#else
723 return 0;
724#endif
725}
726
727static inline int is_normal_idx(enum zone_type idx)
728{
729 return (idx == ZONE_NORMAL);
730}
731
732
733
734
735
736
737
738static inline int is_highmem(struct zone *zone)
739{
740#ifdef CONFIG_HIGHMEM
741 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
742 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
743 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
744 zone_movable_is_highmem());
745#else
746 return 0;
747#endif
748}
749
750static inline int is_normal(struct zone *zone)
751{
752 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
753}
754
755static inline int is_dma32(struct zone *zone)
756{
757#ifdef CONFIG_ZONE_DMA32
758 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
759#else
760 return 0;
761#endif
762}
763
764static inline int is_dma(struct zone *zone)
765{
766#ifdef CONFIG_ZONE_DMA
767 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
768#else
769 return 0;
770#endif
771}
772
773
774struct ctl_table;
775int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
776 void __user *, size_t *, loff_t *);
777extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
778int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
779 void __user *, size_t *, loff_t *);
780int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
781 void __user *, size_t *, loff_t *);
782int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
783 void __user *, size_t *, loff_t *);
784int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
785 void __user *, size_t *, loff_t *);
786
787extern int numa_zonelist_order_handler(struct ctl_table *, int,
788 void __user *, size_t *, loff_t *);
789extern char numa_zonelist_order[];
790#define NUMA_ZONELIST_ORDER_LEN 16
791
792#ifndef CONFIG_NEED_MULTIPLE_NODES
793
794extern struct pglist_data contig_page_data;
795#define NODE_DATA(nid) (&contig_page_data)
796#define NODE_MEM_MAP(nid) mem_map
797
798#else
799
800#include <asm/mmzone.h>
801
802#endif
803
804extern struct pglist_data *first_online_pgdat(void);
805extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
806extern struct zone *next_zone(struct zone *zone);
807
808
809
810
811
812#define for_each_online_pgdat(pgdat) \
813 for (pgdat = first_online_pgdat(); \
814 pgdat; \
815 pgdat = next_online_pgdat(pgdat))
816
817
818
819
820
821
822
823#define for_each_zone(zone) \
824 for (zone = (first_online_pgdat())->node_zones; \
825 zone; \
826 zone = next_zone(zone))
827
828#define for_each_populated_zone(zone) \
829 for (zone = (first_online_pgdat())->node_zones; \
830 zone; \
831 zone = next_zone(zone)) \
832 if (!populated_zone(zone)) \
833 ; \
834 else
835
836static inline struct zone *zonelist_zone(struct zoneref *zoneref)
837{
838 return zoneref->zone;
839}
840
841static inline int zonelist_zone_idx(struct zoneref *zoneref)
842{
843 return zoneref->zone_idx;
844}
845
846static inline int zonelist_node_idx(struct zoneref *zoneref)
847{
848#ifdef CONFIG_NUMA
849
850 return zoneref->zone->node;
851#else
852 return 0;
853#endif
854}
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869struct zoneref *next_zones_zonelist(struct zoneref *z,
870 enum zone_type highest_zoneidx,
871 nodemask_t *nodes,
872 struct zone **zone);
873
874
875
876
877
878
879
880
881
882
883
884
885
886static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
887 enum zone_type highest_zoneidx,
888 nodemask_t *nodes,
889 struct zone **zone)
890{
891 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
892 zone);
893}
894
895
896
897
898
899
900
901
902
903
904
905
906#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
907 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
908 zone; \
909 z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
910
911
912
913
914
915
916
917
918
919
920#define for_each_zone_zonelist(zone, z, zlist, highidx) \
921 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
922
923#ifdef CONFIG_SPARSEMEM
924#include <asm/sparsemem.h>
925#endif
926
927#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
928 !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
929static inline unsigned long early_pfn_to_nid(unsigned long pfn)
930{
931 return 0;
932}
933#endif
934
935#ifdef CONFIG_FLATMEM
936#define pfn_to_nid(pfn) (0)
937#endif
938
939#ifdef CONFIG_SPARSEMEM
940
941
942
943
944
945
946
947#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
948
949#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
950#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
951
952#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
953
954#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
955#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
956
957#define SECTION_BLOCKFLAGS_BITS \
958 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
959
960#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
961#error Allocator MAX_ORDER exceeds SECTION_SIZE
962#endif
963
964#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
965#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
966
967#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
968#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
969
970struct page;
971struct page_cgroup;
972struct mem_section {
973
974
975
976
977
978
979
980
981
982
983
984
985 unsigned long section_mem_map;
986
987
988 unsigned long *pageblock_flags;
989#ifdef CONFIG_CGROUP_MEM_RES_CTLR
990
991
992
993
994 struct page_cgroup *page_cgroup;
995 unsigned long pad;
996#endif
997};
998
999#ifdef CONFIG_SPARSEMEM_EXTREME
1000#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1001#else
1002#define SECTIONS_PER_ROOT 1
1003#endif
1004
1005#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1006#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1007#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1008
1009#ifdef CONFIG_SPARSEMEM_EXTREME
1010extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1011#else
1012extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1013#endif
1014
1015static inline struct mem_section *__nr_to_section(unsigned long nr)
1016{
1017 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1018 return NULL;
1019 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1020}
1021extern int __section_nr(struct mem_section* ms);
1022extern unsigned long usemap_size(void);
1023
1024
1025
1026
1027
1028
1029#define SECTION_MARKED_PRESENT (1UL<<0)
1030#define SECTION_HAS_MEM_MAP (1UL<<1)
1031#define SECTION_MAP_LAST_BIT (1UL<<2)
1032#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1033#define SECTION_NID_SHIFT 2
1034
1035static inline struct page *__section_mem_map_addr(struct mem_section *section)
1036{
1037 unsigned long map = section->section_mem_map;
1038 map &= SECTION_MAP_MASK;
1039 return (struct page *)map;
1040}
1041
1042static inline int present_section(struct mem_section *section)
1043{
1044 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1045}
1046
1047static inline int present_section_nr(unsigned long nr)
1048{
1049 return present_section(__nr_to_section(nr));
1050}
1051
1052static inline int valid_section(struct mem_section *section)
1053{
1054 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1055}
1056
1057static inline int valid_section_nr(unsigned long nr)
1058{
1059 return valid_section(__nr_to_section(nr));
1060}
1061
1062static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1063{
1064 return __nr_to_section(pfn_to_section_nr(pfn));
1065}
1066
1067#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1068static inline int pfn_valid(unsigned long pfn)
1069{
1070 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1071 return 0;
1072 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1073}
1074#endif
1075
1076static inline int pfn_present(unsigned long pfn)
1077{
1078 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1079 return 0;
1080 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1081}
1082
1083
1084
1085
1086
1087
1088#ifdef CONFIG_NUMA
1089#define pfn_to_nid(pfn) \
1090({ \
1091 unsigned long __pfn_to_nid_pfn = (pfn); \
1092 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1093})
1094#else
1095#define pfn_to_nid(pfn) (0)
1096#endif
1097
1098#define early_pfn_valid(pfn) pfn_valid(pfn)
1099void sparse_init(void);
1100#else
1101#define sparse_init() do {} while (0)
1102#define sparse_index_init(_sec, _nid) do {} while (0)
1103#endif
1104
1105#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1106bool early_pfn_in_nid(unsigned long pfn, int nid);
1107#else
1108#define early_pfn_in_nid(pfn, nid) (1)
1109#endif
1110
1111#ifndef early_pfn_valid
1112#define early_pfn_valid(pfn) (1)
1113#endif
1114
1115void memory_present(int nid, unsigned long start, unsigned long end);
1116unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1117
1118
1119
1120
1121
1122
1123
1124#ifdef CONFIG_HOLES_IN_ZONE
1125#define pfn_valid_within(pfn) pfn_valid(pfn)
1126#else
1127#define pfn_valid_within(pfn) (1)
1128#endif
1129
1130#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146int memmap_valid_within(unsigned long pfn,
1147 struct page *page, struct zone *zone);
1148#else
1149static inline int memmap_valid_within(unsigned long pfn,
1150 struct page *page, struct zone *zone)
1151{
1152 return 1;
1153}
1154#endif
1155
1156#endif
1157#endif
1158#endif
1159