1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <generated/bounds.h>
19#include <asm/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38#define MIGRATE_UNMOVABLE 0
39#define MIGRATE_RECLAIMABLE 1
40#define MIGRATE_MOVABLE 2
41#define MIGRATE_PCPTYPES 3
42#define MIGRATE_RESERVE 3
43#define MIGRATE_ISOLATE 4
44#define MIGRATE_TYPES 5
45
46#define for_each_migratetype_order(order, type) \
47 for (order = 0; order < MAX_ORDER; order++) \
48 for (type = 0; type < MIGRATE_TYPES; type++)
49
50extern int page_group_by_mobility_disabled;
51
52static inline int get_pageblock_migratetype(struct page *page)
53{
54 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
55}
56
57struct free_area {
58 struct list_head free_list[MIGRATE_TYPES];
59 unsigned long nr_free;
60};
61
62struct pglist_data;
63
64
65
66
67
68
69
70#if defined(CONFIG_SMP)
71struct zone_padding {
72 char x[0];
73} ____cacheline_internodealigned_in_smp;
74#define ZONE_PADDING(name) struct zone_padding name;
75#else
76#define ZONE_PADDING(name)
77#endif
78
79enum zone_stat_item {
80
81 NR_FREE_PAGES,
82 NR_LRU_BASE,
83 NR_INACTIVE_ANON = NR_LRU_BASE,
84 NR_ACTIVE_ANON,
85 NR_INACTIVE_FILE,
86 NR_ACTIVE_FILE,
87 NR_UNEVICTABLE,
88 NR_MLOCK,
89 NR_ANON_PAGES,
90 NR_FILE_MAPPED,
91
92 NR_FILE_PAGES,
93 NR_FILE_DIRTY,
94 NR_WRITEBACK,
95 NR_SLAB_RECLAIMABLE,
96 NR_SLAB_UNRECLAIMABLE,
97 NR_PAGETABLE,
98 NR_KERNEL_STACK,
99
100 NR_UNSTABLE_NFS,
101 NR_BOUNCE,
102 NR_VMSCAN_WRITE,
103 NR_WRITEBACK_TEMP,
104 NR_ISOLATED_ANON,
105 NR_ISOLATED_FILE,
106 NR_SHMEM,
107 NR_DIRTIED,
108 NR_WRITTEN,
109#ifdef CONFIG_NUMA
110 NUMA_HIT,
111 NUMA_MISS,
112 NUMA_FOREIGN,
113 NUMA_INTERLEAVE_HIT,
114 NUMA_LOCAL,
115 NUMA_OTHER,
116#endif
117 NR_ANON_TRANSPARENT_HUGEPAGES,
118 NR_VM_ZONE_STAT_ITEMS };
119
120
121
122
123
124
125
126
127
128
129#define LRU_BASE 0
130#define LRU_ACTIVE 1
131#define LRU_FILE 2
132
133enum lru_list {
134 LRU_INACTIVE_ANON = LRU_BASE,
135 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
136 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
137 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
138 LRU_UNEVICTABLE,
139 NR_LRU_LISTS
140};
141
142#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
143
144#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
145
146static inline int is_file_lru(enum lru_list l)
147{
148 return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
149}
150
151static inline int is_active_lru(enum lru_list l)
152{
153 return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
154}
155
156static inline int is_unevictable_lru(enum lru_list l)
157{
158 return (l == LRU_UNEVICTABLE);
159}
160
161enum zone_watermarks {
162 WMARK_MIN,
163 WMARK_LOW,
164 WMARK_HIGH,
165 NR_WMARK
166};
167
168#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
169#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
170#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
171
172struct per_cpu_pages {
173 int count;
174 int high;
175 int batch;
176
177
178 struct list_head lists[MIGRATE_PCPTYPES];
179};
180
181struct per_cpu_pageset {
182 struct per_cpu_pages pcp;
183#ifdef CONFIG_NUMA
184 s8 expire;
185#endif
186#ifdef CONFIG_SMP
187 s8 stat_threshold;
188 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
189#endif
190};
191
192#endif
193
194enum zone_type {
195#ifdef CONFIG_ZONE_DMA
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214 ZONE_DMA,
215#endif
216#ifdef CONFIG_ZONE_DMA32
217
218
219
220
221
222 ZONE_DMA32,
223#endif
224
225
226
227
228
229 ZONE_NORMAL,
230#ifdef CONFIG_HIGHMEM
231
232
233
234
235
236
237
238
239 ZONE_HIGHMEM,
240#endif
241 ZONE_MOVABLE,
242 __MAX_NR_ZONES
243};
244
245#ifndef __GENERATING_BOUNDS_H
246
247
248
249
250
251
252
253
254
255#if MAX_NR_ZONES < 2
256#define ZONES_SHIFT 0
257#elif MAX_NR_ZONES <= 2
258#define ZONES_SHIFT 1
259#elif MAX_NR_ZONES <= 4
260#define ZONES_SHIFT 2
261#else
262#error ZONES_SHIFT -- too many zones configured adjust calculation
263#endif
264
265struct zone_reclaim_stat {
266
267
268
269
270
271
272
273
274 unsigned long recent_rotated[2];
275 unsigned long recent_scanned[2];
276
277
278
279
280 unsigned long nr_saved_scan[NR_LRU_LISTS];
281};
282
283struct zone {
284
285
286
287 unsigned long watermark[NR_WMARK];
288
289
290
291
292
293
294 unsigned long percpu_drift_mark;
295
296
297
298
299
300
301
302
303
304 unsigned long lowmem_reserve[MAX_NR_ZONES];
305
306#ifdef CONFIG_NUMA
307 int node;
308
309
310
311 unsigned long min_unmapped_pages;
312 unsigned long min_slab_pages;
313#endif
314 struct per_cpu_pageset __percpu *pageset;
315
316
317
318 spinlock_t lock;
319 int all_unreclaimable;
320#ifdef CONFIG_MEMORY_HOTPLUG
321
322 seqlock_t span_seqlock;
323#endif
324 struct free_area free_area[MAX_ORDER];
325
326#ifndef CONFIG_SPARSEMEM
327
328
329
330
331 unsigned long *pageblock_flags;
332#endif
333
334#ifdef CONFIG_COMPACTION
335
336
337
338
339
340 unsigned int compact_considered;
341 unsigned int compact_defer_shift;
342#endif
343
344 ZONE_PADDING(_pad1_)
345
346
347 spinlock_t lru_lock;
348 struct zone_lru {
349 struct list_head list;
350 } lru[NR_LRU_LISTS];
351
352 struct zone_reclaim_stat reclaim_stat;
353
354 unsigned long pages_scanned;
355 unsigned long flags;
356
357
358 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
359
360
361
362
363
364 unsigned int inactive_ratio;
365
366
367 ZONE_PADDING(_pad2_)
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394 wait_queue_head_t * wait_table;
395 unsigned long wait_table_hash_nr_entries;
396 unsigned long wait_table_bits;
397
398
399
400
401 struct pglist_data *zone_pgdat;
402
403 unsigned long zone_start_pfn;
404
405
406
407
408
409
410
411
412
413
414
415 unsigned long spanned_pages;
416 unsigned long present_pages;
417
418
419
420
421 const char *name;
422} ____cacheline_internodealigned_in_smp;
423
424typedef enum {
425 ZONE_RECLAIM_LOCKED,
426 ZONE_OOM_LOCKED,
427 ZONE_CONGESTED,
428
429
430} zone_flags_t;
431
432static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
433{
434 set_bit(flag, &zone->flags);
435}
436
437static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
438{
439 return test_and_set_bit(flag, &zone->flags);
440}
441
442static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
443{
444 clear_bit(flag, &zone->flags);
445}
446
447static inline int zone_is_reclaim_congested(const struct zone *zone)
448{
449 return test_bit(ZONE_CONGESTED, &zone->flags);
450}
451
452static inline int zone_is_reclaim_locked(const struct zone *zone)
453{
454 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
455}
456
457static inline int zone_is_oom_locked(const struct zone *zone)
458{
459 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
460}
461
462
463
464
465
466
467#define DEF_PRIORITY 12
468
469
470#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
471
472#ifdef CONFIG_NUMA
473
474
475
476
477
478
479
480
481#define MAX_ZONELISTS 2
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543struct zonelist_cache {
544 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];
545 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);
546 unsigned long last_full_zap;
547};
548#else
549#define MAX_ZONELISTS 1
550struct zonelist_cache;
551#endif
552
553
554
555
556
557struct zoneref {
558 struct zone *zone;
559 int zone_idx;
560};
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579struct zonelist {
580 struct zonelist_cache *zlcache_ptr;
581 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
582#ifdef CONFIG_NUMA
583 struct zonelist_cache zlcache;
584#endif
585};
586
587#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
588struct node_active_region {
589 unsigned long start_pfn;
590 unsigned long end_pfn;
591 int nid;
592};
593#endif
594
595#ifndef CONFIG_DISCONTIGMEM
596
597extern struct page *mem_map;
598#endif
599
600
601
602
603
604
605
606
607
608
609
610
611struct bootmem_data;
612typedef struct pglist_data {
613 struct zone node_zones[MAX_NR_ZONES];
614 struct zonelist node_zonelists[MAX_ZONELISTS];
615 int nr_zones;
616#ifdef CONFIG_FLAT_NODE_MEM_MAP
617 struct page *node_mem_map;
618#ifdef CONFIG_CGROUP_MEM_RES_CTLR
619 struct page_cgroup *node_page_cgroup;
620#endif
621#endif
622#ifndef CONFIG_NO_BOOTMEM
623 struct bootmem_data *bdata;
624#endif
625#ifdef CONFIG_MEMORY_HOTPLUG
626
627
628
629
630
631
632
633 spinlock_t node_size_lock;
634#endif
635 unsigned long node_start_pfn;
636 unsigned long node_present_pages;
637 unsigned long node_spanned_pages;
638
639 int node_id;
640 wait_queue_head_t kswapd_wait;
641 struct task_struct *kswapd;
642 int kswapd_max_order;
643 enum zone_type classzone_idx;
644} pg_data_t;
645
646#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
647#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
648#ifdef CONFIG_FLAT_NODE_MEM_MAP
649#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
650#else
651#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
652#endif
653#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
654
655#include <linux/memory_hotplug.h>
656
657extern struct mutex zonelists_mutex;
658void build_all_zonelists(void *data);
659void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
660bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
661 int classzone_idx, int alloc_flags);
662bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
663 int classzone_idx, int alloc_flags);
664enum memmap_context {
665 MEMMAP_EARLY,
666 MEMMAP_HOTPLUG,
667};
668extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
669 unsigned long size,
670 enum memmap_context context);
671
672#ifdef CONFIG_HAVE_MEMORY_PRESENT
673void memory_present(int nid, unsigned long start, unsigned long end);
674#else
675static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
676#endif
677
678#ifdef CONFIG_HAVE_MEMORYLESS_NODES
679int local_memory_node(int node_id);
680#else
681static inline int local_memory_node(int node_id) { return node_id; };
682#endif
683
684#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
685unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
686#endif
687
688
689
690
691#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
692
693static inline int populated_zone(struct zone *zone)
694{
695 return (!!zone->present_pages);
696}
697
698extern int movable_zone;
699
700static inline int zone_movable_is_highmem(void)
701{
702#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
703 return movable_zone == ZONE_HIGHMEM;
704#else
705 return 0;
706#endif
707}
708
709static inline int is_highmem_idx(enum zone_type idx)
710{
711#ifdef CONFIG_HIGHMEM
712 return (idx == ZONE_HIGHMEM ||
713 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
714#else
715 return 0;
716#endif
717}
718
719static inline int is_normal_idx(enum zone_type idx)
720{
721 return (idx == ZONE_NORMAL);
722}
723
724
725
726
727
728
729
730static inline int is_highmem(struct zone *zone)
731{
732#ifdef CONFIG_HIGHMEM
733 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
734 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
735 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
736 zone_movable_is_highmem());
737#else
738 return 0;
739#endif
740}
741
742static inline int is_normal(struct zone *zone)
743{
744 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
745}
746
747static inline int is_dma32(struct zone *zone)
748{
749#ifdef CONFIG_ZONE_DMA32
750 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
751#else
752 return 0;
753#endif
754}
755
756static inline int is_dma(struct zone *zone)
757{
758#ifdef CONFIG_ZONE_DMA
759 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
760#else
761 return 0;
762#endif
763}
764
765
766struct ctl_table;
767int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
768 void __user *, size_t *, loff_t *);
769extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
770int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
771 void __user *, size_t *, loff_t *);
772int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
773 void __user *, size_t *, loff_t *);
774int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
775 void __user *, size_t *, loff_t *);
776int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
777 void __user *, size_t *, loff_t *);
778
779extern int numa_zonelist_order_handler(struct ctl_table *, int,
780 void __user *, size_t *, loff_t *);
781extern char numa_zonelist_order[];
782#define NUMA_ZONELIST_ORDER_LEN 16
783
784#ifndef CONFIG_NEED_MULTIPLE_NODES
785
786extern struct pglist_data contig_page_data;
787#define NODE_DATA(nid) (&contig_page_data)
788#define NODE_MEM_MAP(nid) mem_map
789
790#else
791
792#include <asm/mmzone.h>
793
794#endif
795
796extern struct pglist_data *first_online_pgdat(void);
797extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
798extern struct zone *next_zone(struct zone *zone);
799
800
801
802
803
804#define for_each_online_pgdat(pgdat) \
805 for (pgdat = first_online_pgdat(); \
806 pgdat; \
807 pgdat = next_online_pgdat(pgdat))
808
809
810
811
812
813
814
815#define for_each_zone(zone) \
816 for (zone = (first_online_pgdat())->node_zones; \
817 zone; \
818 zone = next_zone(zone))
819
820#define for_each_populated_zone(zone) \
821 for (zone = (first_online_pgdat())->node_zones; \
822 zone; \
823 zone = next_zone(zone)) \
824 if (!populated_zone(zone)) \
825 ; \
826 else
827
828static inline struct zone *zonelist_zone(struct zoneref *zoneref)
829{
830 return zoneref->zone;
831}
832
833static inline int zonelist_zone_idx(struct zoneref *zoneref)
834{
835 return zoneref->zone_idx;
836}
837
838static inline int zonelist_node_idx(struct zoneref *zoneref)
839{
840#ifdef CONFIG_NUMA
841
842 return zoneref->zone->node;
843#else
844 return 0;
845#endif
846}
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861struct zoneref *next_zones_zonelist(struct zoneref *z,
862 enum zone_type highest_zoneidx,
863 nodemask_t *nodes,
864 struct zone **zone);
865
866
867
868
869
870
871
872
873
874
875
876
877
878static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
879 enum zone_type highest_zoneidx,
880 nodemask_t *nodes,
881 struct zone **zone)
882{
883 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
884 zone);
885}
886
887
888
889
890
891
892
893
894
895
896
897
898#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
899 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
900 zone; \
901 z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
902
903
904
905
906
907
908
909
910
911
912#define for_each_zone_zonelist(zone, z, zlist, highidx) \
913 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
914
915#ifdef CONFIG_SPARSEMEM
916#include <asm/sparsemem.h>
917#endif
918
919#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
920 !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
921static inline unsigned long early_pfn_to_nid(unsigned long pfn)
922{
923 return 0;
924}
925#endif
926
927#ifdef CONFIG_FLATMEM
928#define pfn_to_nid(pfn) (0)
929#endif
930
931#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
932#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
933
934#ifdef CONFIG_SPARSEMEM
935
936
937
938
939
940
941
942#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
943
944#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
945#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
946
947#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
948
949#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
950#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
951
952#define SECTION_BLOCKFLAGS_BITS \
953 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
954
955#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
956#error Allocator MAX_ORDER exceeds SECTION_SIZE
957#endif
958
959struct page;
960struct page_cgroup;
961struct mem_section {
962
963
964
965
966
967
968
969
970
971
972
973
974 unsigned long section_mem_map;
975
976
977 unsigned long *pageblock_flags;
978#ifdef CONFIG_CGROUP_MEM_RES_CTLR
979
980
981
982
983 struct page_cgroup *page_cgroup;
984 unsigned long pad;
985#endif
986};
987
988#ifdef CONFIG_SPARSEMEM_EXTREME
989#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
990#else
991#define SECTIONS_PER_ROOT 1
992#endif
993
994#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
995#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
996#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
997
998#ifdef CONFIG_SPARSEMEM_EXTREME
999extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1000#else
1001extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1002#endif
1003
1004static inline struct mem_section *__nr_to_section(unsigned long nr)
1005{
1006 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1007 return NULL;
1008 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1009}
1010extern int __section_nr(struct mem_section* ms);
1011extern unsigned long usemap_size(void);
1012
1013
1014
1015
1016
1017
1018#define SECTION_MARKED_PRESENT (1UL<<0)
1019#define SECTION_HAS_MEM_MAP (1UL<<1)
1020#define SECTION_MAP_LAST_BIT (1UL<<2)
1021#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1022#define SECTION_NID_SHIFT 2
1023
1024static inline struct page *__section_mem_map_addr(struct mem_section *section)
1025{
1026 unsigned long map = section->section_mem_map;
1027 map &= SECTION_MAP_MASK;
1028 return (struct page *)map;
1029}
1030
1031static inline int present_section(struct mem_section *section)
1032{
1033 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1034}
1035
1036static inline int present_section_nr(unsigned long nr)
1037{
1038 return present_section(__nr_to_section(nr));
1039}
1040
1041static inline int valid_section(struct mem_section *section)
1042{
1043 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1044}
1045
1046static inline int valid_section_nr(unsigned long nr)
1047{
1048 return valid_section(__nr_to_section(nr));
1049}
1050
1051static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1052{
1053 return __nr_to_section(pfn_to_section_nr(pfn));
1054}
1055
1056static inline int pfn_valid(unsigned long pfn)
1057{
1058 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1059 return 0;
1060 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1061}
1062
1063static inline int pfn_present(unsigned long pfn)
1064{
1065 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1066 return 0;
1067 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1068}
1069
1070
1071
1072
1073
1074
1075#ifdef CONFIG_NUMA
1076#define pfn_to_nid(pfn) \
1077({ \
1078 unsigned long __pfn_to_nid_pfn = (pfn); \
1079 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1080})
1081#else
1082#define pfn_to_nid(pfn) (0)
1083#endif
1084
1085#define early_pfn_valid(pfn) pfn_valid(pfn)
1086void sparse_init(void);
1087#else
1088#define sparse_init() do {} while (0)
1089#define sparse_index_init(_sec, _nid) do {} while (0)
1090#endif
1091
1092#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1093bool early_pfn_in_nid(unsigned long pfn, int nid);
1094#else
1095#define early_pfn_in_nid(pfn, nid) (1)
1096#endif
1097
1098#ifndef early_pfn_valid
1099#define early_pfn_valid(pfn) (1)
1100#endif
1101
1102void memory_present(int nid, unsigned long start, unsigned long end);
1103unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1104
1105
1106
1107
1108
1109
1110
1111#ifdef CONFIG_HOLES_IN_ZONE
1112#define pfn_valid_within(pfn) pfn_valid(pfn)
1113#else
1114#define pfn_valid_within(pfn) (1)
1115#endif
1116
1117#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133int memmap_valid_within(unsigned long pfn,
1134 struct page *page, struct zone *zone);
1135#else
1136static inline int memmap_valid_within(unsigned long pfn,
1137 struct page *page, struct zone *zone)
1138{
1139 return 1;
1140}
1141#endif
1142
1143#endif
1144#endif
1145#endif
1146