1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <generated/bounds.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38#define MIGRATE_UNMOVABLE 0
39#define MIGRATE_RECLAIMABLE 1
40#define MIGRATE_MOVABLE 2
41#define MIGRATE_PCPTYPES 3
42#define MIGRATE_RESERVE 3
43#define MIGRATE_ISOLATE 4
44#define MIGRATE_TYPES 5
45
46#define for_each_migratetype_order(order, type) \
47 for (order = 0; order < MAX_ORDER; order++) \
48 for (type = 0; type < MIGRATE_TYPES; type++)
49
50extern int page_group_by_mobility_disabled;
51
52static inline int get_pageblock_migratetype(struct page *page)
53{
54 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
55}
56
57struct free_area {
58 struct list_head free_list[MIGRATE_TYPES];
59 unsigned long nr_free;
60};
61
62struct pglist_data;
63
64
65
66
67
68
69
70#if defined(CONFIG_SMP)
71struct zone_padding {
72 char x[0];
73} ____cacheline_internodealigned_in_smp;
74#define ZONE_PADDING(name) struct zone_padding name;
75#else
76#define ZONE_PADDING(name)
77#endif
78
79enum zone_stat_item {
80
81 NR_FREE_PAGES,
82 NR_LRU_BASE,
83 NR_INACTIVE_ANON = NR_LRU_BASE,
84 NR_ACTIVE_ANON,
85 NR_INACTIVE_FILE,
86 NR_ACTIVE_FILE,
87 NR_UNEVICTABLE,
88 NR_MLOCK,
89 NR_ANON_PAGES,
90 NR_FILE_MAPPED,
91
92 NR_FILE_PAGES,
93 NR_FILE_DIRTY,
94 NR_WRITEBACK,
95 NR_SLAB_RECLAIMABLE,
96 NR_SLAB_UNRECLAIMABLE,
97 NR_PAGETABLE,
98 NR_KERNEL_STACK,
99
100 NR_UNSTABLE_NFS,
101 NR_BOUNCE,
102 NR_VMSCAN_WRITE,
103 NR_VMSCAN_IMMEDIATE,
104 NR_WRITEBACK_TEMP,
105 NR_ISOLATED_ANON,
106 NR_ISOLATED_FILE,
107 NR_SHMEM,
108 NR_DIRTIED,
109 NR_WRITTEN,
110#ifdef CONFIG_NUMA
111 NUMA_HIT,
112 NUMA_MISS,
113 NUMA_FOREIGN,
114 NUMA_INTERLEAVE_HIT,
115 NUMA_LOCAL,
116 NUMA_OTHER,
117#endif
118 NR_ANON_TRANSPARENT_HUGEPAGES,
119 NR_VM_ZONE_STAT_ITEMS };
120
121
122
123
124
125
126
127
128
129
130#define LRU_BASE 0
131#define LRU_ACTIVE 1
132#define LRU_FILE 2
133
134enum lru_list {
135 LRU_INACTIVE_ANON = LRU_BASE,
136 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
137 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
138 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
139 LRU_UNEVICTABLE,
140 NR_LRU_LISTS
141};
142
143#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
144
145#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
146
147static inline int is_file_lru(enum lru_list lru)
148{
149 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
150}
151
152static inline int is_active_lru(enum lru_list lru)
153{
154 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
155}
156
157static inline int is_unevictable_lru(enum lru_list lru)
158{
159 return (lru == LRU_UNEVICTABLE);
160}
161
162struct lruvec {
163 struct list_head lists[NR_LRU_LISTS];
164};
165
166
167#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
168#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
169#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
170#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
171
172
173#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1)
174
175#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2)
176
177#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
178
179#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
180
181#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
182
183
184typedef unsigned __bitwise__ isolate_mode_t;
185
186enum zone_watermarks {
187 WMARK_MIN,
188 WMARK_LOW,
189 WMARK_HIGH,
190 NR_WMARK
191};
192
193#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
194#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
195#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
196
197struct per_cpu_pages {
198 int count;
199 int high;
200 int batch;
201
202
203 struct list_head lists[MIGRATE_PCPTYPES];
204};
205
206struct per_cpu_pageset {
207 struct per_cpu_pages pcp;
208#ifdef CONFIG_NUMA
209 s8 expire;
210#endif
211#ifdef CONFIG_SMP
212 s8 stat_threshold;
213 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
214#endif
215};
216
217#endif
218
219enum zone_type {
220#ifdef CONFIG_ZONE_DMA
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239 ZONE_DMA,
240#endif
241#ifdef CONFIG_ZONE_DMA32
242
243
244
245
246
247 ZONE_DMA32,
248#endif
249
250
251
252
253
254 ZONE_NORMAL,
255#ifdef CONFIG_HIGHMEM
256
257
258
259
260
261
262
263
264 ZONE_HIGHMEM,
265#endif
266 ZONE_MOVABLE,
267 __MAX_NR_ZONES
268};
269
270#ifndef __GENERATING_BOUNDS_H
271
272
273
274
275
276
277
278
279
280#if MAX_NR_ZONES < 2
281#define ZONES_SHIFT 0
282#elif MAX_NR_ZONES <= 2
283#define ZONES_SHIFT 1
284#elif MAX_NR_ZONES <= 4
285#define ZONES_SHIFT 2
286#else
287#error ZONES_SHIFT -- too many zones configured adjust calculation
288#endif
289
290struct zone_reclaim_stat {
291
292
293
294
295
296
297
298
299 unsigned long recent_rotated[2];
300 unsigned long recent_scanned[2];
301};
302
303struct zone {
304
305
306
307 unsigned long watermark[NR_WMARK];
308
309
310
311
312
313
314 unsigned long percpu_drift_mark;
315
316
317
318
319
320
321
322
323
324 unsigned long lowmem_reserve[MAX_NR_ZONES];
325
326
327
328
329
330 unsigned long dirty_balance_reserve;
331
332#ifdef CONFIG_NUMA
333 int node;
334
335
336
337 unsigned long min_unmapped_pages;
338 unsigned long min_slab_pages;
339#endif
340 struct per_cpu_pageset __percpu *pageset;
341
342
343
344 spinlock_t lock;
345 int all_unreclaimable;
346#ifdef CONFIG_MEMORY_HOTPLUG
347
348 seqlock_t span_seqlock;
349#endif
350 struct free_area free_area[MAX_ORDER];
351
352#ifndef CONFIG_SPARSEMEM
353
354
355
356
357 unsigned long *pageblock_flags;
358#endif
359
360#ifdef CONFIG_COMPACTION
361
362
363
364
365
366 unsigned int compact_considered;
367 unsigned int compact_defer_shift;
368#endif
369
370 ZONE_PADDING(_pad1_)
371
372
373 spinlock_t lru_lock;
374 struct lruvec lruvec;
375
376 struct zone_reclaim_stat reclaim_stat;
377
378 unsigned long pages_scanned;
379 unsigned long flags;
380
381
382 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
383
384
385
386
387
388 unsigned int inactive_ratio;
389
390
391 ZONE_PADDING(_pad2_)
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 wait_queue_head_t * wait_table;
419 unsigned long wait_table_hash_nr_entries;
420 unsigned long wait_table_bits;
421
422
423
424
425 struct pglist_data *zone_pgdat;
426
427 unsigned long zone_start_pfn;
428
429
430
431
432
433
434
435
436
437
438
439 unsigned long spanned_pages;
440 unsigned long present_pages;
441
442
443
444
445 const char *name;
446} ____cacheline_internodealigned_in_smp;
447
448typedef enum {
449 ZONE_RECLAIM_LOCKED,
450 ZONE_OOM_LOCKED,
451 ZONE_CONGESTED,
452
453
454} zone_flags_t;
455
456static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
457{
458 set_bit(flag, &zone->flags);
459}
460
461static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
462{
463 return test_and_set_bit(flag, &zone->flags);
464}
465
466static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
467{
468 clear_bit(flag, &zone->flags);
469}
470
471static inline int zone_is_reclaim_congested(const struct zone *zone)
472{
473 return test_bit(ZONE_CONGESTED, &zone->flags);
474}
475
476static inline int zone_is_reclaim_locked(const struct zone *zone)
477{
478 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
479}
480
481static inline int zone_is_oom_locked(const struct zone *zone)
482{
483 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
484}
485
486
487
488
489
490
491#define DEF_PRIORITY 12
492
493
494#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
495
496#ifdef CONFIG_NUMA
497
498
499
500
501
502
503
504
505#define MAX_ZONELISTS 2
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567struct zonelist_cache {
568 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];
569 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);
570 unsigned long last_full_zap;
571};
572#else
573#define MAX_ZONELISTS 1
574struct zonelist_cache;
575#endif
576
577
578
579
580
581struct zoneref {
582 struct zone *zone;
583 int zone_idx;
584};
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603struct zonelist {
604 struct zonelist_cache *zlcache_ptr;
605 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
606#ifdef CONFIG_NUMA
607 struct zonelist_cache zlcache;
608#endif
609};
610
611#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
612struct node_active_region {
613 unsigned long start_pfn;
614 unsigned long end_pfn;
615 int nid;
616};
617#endif
618
619#ifndef CONFIG_DISCONTIGMEM
620
621extern struct page *mem_map;
622#endif
623
624
625
626
627
628
629
630
631
632
633
634
635struct bootmem_data;
636typedef struct pglist_data {
637 struct zone node_zones[MAX_NR_ZONES];
638 struct zonelist node_zonelists[MAX_ZONELISTS];
639 int nr_zones;
640#ifdef CONFIG_FLAT_NODE_MEM_MAP
641 struct page *node_mem_map;
642#ifdef CONFIG_CGROUP_MEM_RES_CTLR
643 struct page_cgroup *node_page_cgroup;
644#endif
645#endif
646#ifndef CONFIG_NO_BOOTMEM
647 struct bootmem_data *bdata;
648#endif
649#ifdef CONFIG_MEMORY_HOTPLUG
650
651
652
653
654
655
656
657 spinlock_t node_size_lock;
658#endif
659 unsigned long node_start_pfn;
660 unsigned long node_present_pages;
661 unsigned long node_spanned_pages;
662
663 int node_id;
664 wait_queue_head_t kswapd_wait;
665 struct task_struct *kswapd;
666 int kswapd_max_order;
667 enum zone_type classzone_idx;
668} pg_data_t;
669
670#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
671#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
672#ifdef CONFIG_FLAT_NODE_MEM_MAP
673#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
674#else
675#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
676#endif
677#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
678
679#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
680
681#define node_end_pfn(nid) ({\
682 pg_data_t *__pgdat = NODE_DATA(nid);\
683 __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
684})
685
686#include <linux/memory_hotplug.h>
687
688extern struct mutex zonelists_mutex;
689void build_all_zonelists(void *data);
690void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
691bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
692 int classzone_idx, int alloc_flags);
693bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
694 int classzone_idx, int alloc_flags);
695enum memmap_context {
696 MEMMAP_EARLY,
697 MEMMAP_HOTPLUG,
698};
699extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
700 unsigned long size,
701 enum memmap_context context);
702
703#ifdef CONFIG_HAVE_MEMORY_PRESENT
704void memory_present(int nid, unsigned long start, unsigned long end);
705#else
706static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
707#endif
708
709#ifdef CONFIG_HAVE_MEMORYLESS_NODES
710int local_memory_node(int node_id);
711#else
712static inline int local_memory_node(int node_id) { return node_id; };
713#endif
714
715#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
716unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
717#endif
718
719
720
721
722#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
723
724static inline int populated_zone(struct zone *zone)
725{
726 return (!!zone->present_pages);
727}
728
729extern int movable_zone;
730
731static inline int zone_movable_is_highmem(void)
732{
733#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
734 return movable_zone == ZONE_HIGHMEM;
735#else
736 return 0;
737#endif
738}
739
740static inline int is_highmem_idx(enum zone_type idx)
741{
742#ifdef CONFIG_HIGHMEM
743 return (idx == ZONE_HIGHMEM ||
744 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
745#else
746 return 0;
747#endif
748}
749
750static inline int is_normal_idx(enum zone_type idx)
751{
752 return (idx == ZONE_NORMAL);
753}
754
755
756
757
758
759
760
761static inline int is_highmem(struct zone *zone)
762{
763#ifdef CONFIG_HIGHMEM
764 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
765 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
766 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
767 zone_movable_is_highmem());
768#else
769 return 0;
770#endif
771}
772
773static inline int is_normal(struct zone *zone)
774{
775 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
776}
777
778static inline int is_dma32(struct zone *zone)
779{
780#ifdef CONFIG_ZONE_DMA32
781 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
782#else
783 return 0;
784#endif
785}
786
787static inline int is_dma(struct zone *zone)
788{
789#ifdef CONFIG_ZONE_DMA
790 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
791#else
792 return 0;
793#endif
794}
795
796
797struct ctl_table;
798int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
799 void __user *, size_t *, loff_t *);
800extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
801int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
802 void __user *, size_t *, loff_t *);
803int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
804 void __user *, size_t *, loff_t *);
805int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
806 void __user *, size_t *, loff_t *);
807int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
808 void __user *, size_t *, loff_t *);
809
810extern int numa_zonelist_order_handler(struct ctl_table *, int,
811 void __user *, size_t *, loff_t *);
812extern char numa_zonelist_order[];
813#define NUMA_ZONELIST_ORDER_LEN 16
814
815#ifndef CONFIG_NEED_MULTIPLE_NODES
816
817extern struct pglist_data contig_page_data;
818#define NODE_DATA(nid) (&contig_page_data)
819#define NODE_MEM_MAP(nid) mem_map
820
821#else
822
823#include <asm/mmzone.h>
824
825#endif
826
827extern struct pglist_data *first_online_pgdat(void);
828extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
829extern struct zone *next_zone(struct zone *zone);
830
831
832
833
834
835#define for_each_online_pgdat(pgdat) \
836 for (pgdat = first_online_pgdat(); \
837 pgdat; \
838 pgdat = next_online_pgdat(pgdat))
839
840
841
842
843
844
845
846#define for_each_zone(zone) \
847 for (zone = (first_online_pgdat())->node_zones; \
848 zone; \
849 zone = next_zone(zone))
850
851#define for_each_populated_zone(zone) \
852 for (zone = (first_online_pgdat())->node_zones; \
853 zone; \
854 zone = next_zone(zone)) \
855 if (!populated_zone(zone)) \
856 ; \
857 else
858
859static inline struct zone *zonelist_zone(struct zoneref *zoneref)
860{
861 return zoneref->zone;
862}
863
864static inline int zonelist_zone_idx(struct zoneref *zoneref)
865{
866 return zoneref->zone_idx;
867}
868
869static inline int zonelist_node_idx(struct zoneref *zoneref)
870{
871#ifdef CONFIG_NUMA
872
873 return zoneref->zone->node;
874#else
875 return 0;
876#endif
877}
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892struct zoneref *next_zones_zonelist(struct zoneref *z,
893 enum zone_type highest_zoneidx,
894 nodemask_t *nodes,
895 struct zone **zone);
896
897
898
899
900
901
902
903
904
905
906
907
908
909static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
910 enum zone_type highest_zoneidx,
911 nodemask_t *nodes,
912 struct zone **zone)
913{
914 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
915 zone);
916}
917
918
919
920
921
922
923
924
925
926
927
928
929#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
930 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
931 zone; \
932 z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
933
934
935
936
937
938
939
940
941
942
943#define for_each_zone_zonelist(zone, z, zlist, highidx) \
944 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
945
946#ifdef CONFIG_SPARSEMEM
947#include <asm/sparsemem.h>
948#endif
949
950#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
951 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
952static inline unsigned long early_pfn_to_nid(unsigned long pfn)
953{
954 return 0;
955}
956#endif
957
958#ifdef CONFIG_FLATMEM
959#define pfn_to_nid(pfn) (0)
960#endif
961
962#ifdef CONFIG_SPARSEMEM
963
964
965
966
967
968
969
970#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
971
972#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
973#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
974
975#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
976
977#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
978#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
979
980#define SECTION_BLOCKFLAGS_BITS \
981 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
982
983#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
984#error Allocator MAX_ORDER exceeds SECTION_SIZE
985#endif
986
987#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
988#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
989
990#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
991#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
992
993struct page;
994struct page_cgroup;
995struct mem_section {
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008 unsigned long section_mem_map;
1009
1010
1011 unsigned long *pageblock_flags;
1012#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1013
1014
1015
1016
1017 struct page_cgroup *page_cgroup;
1018 unsigned long pad;
1019#endif
1020};
1021
1022#ifdef CONFIG_SPARSEMEM_EXTREME
1023#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1024#else
1025#define SECTIONS_PER_ROOT 1
1026#endif
1027
1028#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1029#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1030#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1031
1032#ifdef CONFIG_SPARSEMEM_EXTREME
1033extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1034#else
1035extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1036#endif
1037
1038static inline struct mem_section *__nr_to_section(unsigned long nr)
1039{
1040 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1041 return NULL;
1042 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1043}
1044extern int __section_nr(struct mem_section* ms);
1045extern unsigned long usemap_size(void);
1046
1047
1048
1049
1050
1051
1052#define SECTION_MARKED_PRESENT (1UL<<0)
1053#define SECTION_HAS_MEM_MAP (1UL<<1)
1054#define SECTION_MAP_LAST_BIT (1UL<<2)
1055#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1056#define SECTION_NID_SHIFT 2
1057
1058static inline struct page *__section_mem_map_addr(struct mem_section *section)
1059{
1060 unsigned long map = section->section_mem_map;
1061 map &= SECTION_MAP_MASK;
1062 return (struct page *)map;
1063}
1064
1065static inline int present_section(struct mem_section *section)
1066{
1067 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1068}
1069
1070static inline int present_section_nr(unsigned long nr)
1071{
1072 return present_section(__nr_to_section(nr));
1073}
1074
1075static inline int valid_section(struct mem_section *section)
1076{
1077 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1078}
1079
1080static inline int valid_section_nr(unsigned long nr)
1081{
1082 return valid_section(__nr_to_section(nr));
1083}
1084
1085static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1086{
1087 return __nr_to_section(pfn_to_section_nr(pfn));
1088}
1089
1090#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1091static inline int pfn_valid(unsigned long pfn)
1092{
1093 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1094 return 0;
1095 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1096}
1097#endif
1098
1099static inline int pfn_present(unsigned long pfn)
1100{
1101 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1102 return 0;
1103 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1104}
1105
1106
1107
1108
1109
1110
1111#ifdef CONFIG_NUMA
1112#define pfn_to_nid(pfn) \
1113({ \
1114 unsigned long __pfn_to_nid_pfn = (pfn); \
1115 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1116})
1117#else
1118#define pfn_to_nid(pfn) (0)
1119#endif
1120
1121#define early_pfn_valid(pfn) pfn_valid(pfn)
1122void sparse_init(void);
1123#else
1124#define sparse_init() do {} while (0)
1125#define sparse_index_init(_sec, _nid) do {} while (0)
1126#endif
1127
1128#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1129bool early_pfn_in_nid(unsigned long pfn, int nid);
1130#else
1131#define early_pfn_in_nid(pfn, nid) (1)
1132#endif
1133
1134#ifndef early_pfn_valid
1135#define early_pfn_valid(pfn) (1)
1136#endif
1137
1138void memory_present(int nid, unsigned long start, unsigned long end);
1139unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1140
1141
1142
1143
1144
1145
1146
1147#ifdef CONFIG_HOLES_IN_ZONE
1148#define pfn_valid_within(pfn) pfn_valid(pfn)
1149#else
1150#define pfn_valid_within(pfn) (1)
1151#endif
1152
1153#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169int memmap_valid_within(unsigned long pfn,
1170 struct page *page, struct zone *zone);
1171#else
1172static inline int memmap_valid_within(unsigned long pfn,
1173 struct page *page, struct zone *zone)
1174{
1175 return 1;
1176}
1177#endif
1178
1179#endif
1180#endif
1181#endif
1182