1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <linux/page-flags-layout.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38enum {
39 MIGRATE_UNMOVABLE,
40 MIGRATE_MOVABLE,
41 MIGRATE_RECLAIMABLE,
42 MIGRATE_PCPTYPES,
43 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
44#ifdef CONFIG_CMA
45
46
47
48
49
50
51
52
53
54
55
56
57
58 MIGRATE_CMA,
59#endif
60#ifdef CONFIG_MEMORY_ISOLATION
61 MIGRATE_ISOLATE,
62#endif
63 MIGRATE_TYPES
64};
65
66
67extern char * const migratetype_names[MIGRATE_TYPES];
68
69#ifdef CONFIG_CMA
70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
71#else
72# define is_migrate_cma(migratetype) false
73#endif
74
75#define for_each_migratetype_order(order, type) \
76 for (order = 0; order < MAX_ORDER; order++) \
77 for (type = 0; type < MIGRATE_TYPES; type++)
78
79extern int page_group_by_mobility_disabled;
80
81#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
82#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
83
84#define get_pageblock_migratetype(page) \
85 get_pfnblock_flags_mask(page, page_to_pfn(page), \
86 PB_migrate_end, MIGRATETYPE_MASK)
87
88static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
89{
90 BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
91 return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
92 MIGRATETYPE_MASK);
93}
94
95struct free_area {
96 struct list_head free_list[MIGRATE_TYPES];
97 unsigned long nr_free;
98};
99
100struct pglist_data;
101
102
103
104
105
106
107
108#if defined(CONFIG_SMP)
109struct zone_padding {
110 char x[0];
111} ____cacheline_internodealigned_in_smp;
112#define ZONE_PADDING(name) struct zone_padding name;
113#else
114#define ZONE_PADDING(name)
115#endif
116
117enum zone_stat_item {
118
119 NR_FREE_PAGES,
120 NR_ALLOC_BATCH,
121 NR_LRU_BASE,
122 NR_INACTIVE_ANON = NR_LRU_BASE,
123 NR_ACTIVE_ANON,
124 NR_INACTIVE_FILE,
125 NR_ACTIVE_FILE,
126 NR_UNEVICTABLE,
127 NR_MLOCK,
128 NR_ANON_PAGES,
129 NR_FILE_MAPPED,
130
131 NR_FILE_PAGES,
132 NR_FILE_DIRTY,
133 NR_WRITEBACK,
134 NR_SLAB_RECLAIMABLE,
135 NR_SLAB_UNRECLAIMABLE,
136 NR_PAGETABLE,
137 NR_KERNEL_STACK,
138
139 NR_UNSTABLE_NFS,
140 NR_BOUNCE,
141 NR_VMSCAN_WRITE,
142 NR_VMSCAN_IMMEDIATE,
143 NR_WRITEBACK_TEMP,
144 NR_ISOLATED_ANON,
145 NR_ISOLATED_FILE,
146 NR_SHMEM,
147 NR_DIRTIED,
148 NR_WRITTEN,
149 NR_PAGES_SCANNED,
150#ifdef CONFIG_NUMA
151 NUMA_HIT,
152 NUMA_MISS,
153 NUMA_FOREIGN,
154 NUMA_INTERLEAVE_HIT,
155 NUMA_LOCAL,
156 NUMA_OTHER,
157#endif
158 WORKINGSET_REFAULT,
159 WORKINGSET_ACTIVATE,
160 WORKINGSET_NODERECLAIM,
161 NR_ANON_TRANSPARENT_HUGEPAGES,
162 NR_FREE_CMA_PAGES,
163 NR_VM_ZONE_STAT_ITEMS };
164
165
166
167
168
169
170
171
172
173
174#define LRU_BASE 0
175#define LRU_ACTIVE 1
176#define LRU_FILE 2
177
178enum lru_list {
179 LRU_INACTIVE_ANON = LRU_BASE,
180 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
181 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
182 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
183 LRU_UNEVICTABLE,
184 NR_LRU_LISTS
185};
186
187#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
188
189#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
190
191static inline int is_file_lru(enum lru_list lru)
192{
193 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
194}
195
196static inline int is_active_lru(enum lru_list lru)
197{
198 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
199}
200
201struct zone_reclaim_stat {
202
203
204
205
206
207
208
209
210 unsigned long recent_rotated[2];
211 unsigned long recent_scanned[2];
212};
213
214struct lruvec {
215 struct list_head lists[NR_LRU_LISTS];
216 struct zone_reclaim_stat reclaim_stat;
217
218 atomic_long_t inactive_age;
219#ifdef CONFIG_MEMCG
220 struct zone *zone;
221#endif
222};
223
224
225#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
226#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
227#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
228
229
230#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
231
232#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
233
234#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
235
236#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
237
238
239typedef unsigned __bitwise__ isolate_mode_t;
240
241enum zone_watermarks {
242 WMARK_MIN,
243 WMARK_LOW,
244 WMARK_HIGH,
245 NR_WMARK
246};
247
248#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
249#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
250#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
251
252struct per_cpu_pages {
253 int count;
254 int high;
255 int batch;
256
257
258 struct list_head lists[MIGRATE_PCPTYPES];
259};
260
261struct per_cpu_pageset {
262 struct per_cpu_pages pcp;
263#ifdef CONFIG_NUMA
264 s8 expire;
265#endif
266#ifdef CONFIG_SMP
267 s8 stat_threshold;
268 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
269#endif
270};
271
272#endif
273
274enum zone_type {
275#ifdef CONFIG_ZONE_DMA
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 ZONE_DMA,
295#endif
296#ifdef CONFIG_ZONE_DMA32
297
298
299
300
301
302 ZONE_DMA32,
303#endif
304
305
306
307
308
309 ZONE_NORMAL,
310#ifdef CONFIG_HIGHMEM
311
312
313
314
315
316
317
318
319 ZONE_HIGHMEM,
320#endif
321 ZONE_MOVABLE,
322#ifdef CONFIG_ZONE_DEVICE
323 ZONE_DEVICE,
324#endif
325 __MAX_NR_ZONES
326
327};
328
329#ifndef __GENERATING_BOUNDS_H
330
331struct zone {
332
333
334
335 unsigned long watermark[NR_WMARK];
336
337 unsigned long nr_reserved_highatomic;
338
339
340
341
342
343
344
345
346
347
348 long lowmem_reserve[MAX_NR_ZONES];
349
350#ifdef CONFIG_NUMA
351 int node;
352#endif
353
354
355
356
357
358 unsigned int inactive_ratio;
359
360 struct pglist_data *zone_pgdat;
361 struct per_cpu_pageset __percpu *pageset;
362
363
364
365
366
367 unsigned long totalreserve_pages;
368
369#ifndef CONFIG_SPARSEMEM
370
371
372
373
374 unsigned long *pageblock_flags;
375#endif
376
377#ifdef CONFIG_NUMA
378
379
380
381 unsigned long min_unmapped_pages;
382 unsigned long min_slab_pages;
383#endif
384
385
386 unsigned long zone_start_pfn;
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429 unsigned long managed_pages;
430 unsigned long spanned_pages;
431 unsigned long present_pages;
432
433 const char *name;
434
435#ifdef CONFIG_MEMORY_ISOLATION
436
437
438
439
440
441 unsigned long nr_isolate_pageblock;
442#endif
443
444#ifdef CONFIG_MEMORY_HOTPLUG
445
446 seqlock_t span_seqlock;
447#endif
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473 wait_queue_head_t *wait_table;
474 unsigned long wait_table_hash_nr_entries;
475 unsigned long wait_table_bits;
476
477 ZONE_PADDING(_pad1_)
478
479 struct free_area free_area[MAX_ORDER];
480
481
482 unsigned long flags;
483
484
485 spinlock_t lock;
486
487 ZONE_PADDING(_pad2_)
488
489
490
491
492 spinlock_t lru_lock;
493 struct lruvec lruvec;
494
495
496
497
498
499
500 unsigned long percpu_drift_mark;
501
502#if defined CONFIG_COMPACTION || defined CONFIG_CMA
503
504 unsigned long compact_cached_free_pfn;
505
506 unsigned long compact_cached_migrate_pfn[2];
507#endif
508
509#ifdef CONFIG_COMPACTION
510
511
512
513
514
515 unsigned int compact_considered;
516 unsigned int compact_defer_shift;
517 int compact_order_failed;
518#endif
519
520#if defined CONFIG_COMPACTION || defined CONFIG_CMA
521
522 bool compact_blockskip_flush;
523#endif
524
525 bool contiguous;
526
527 ZONE_PADDING(_pad3_)
528
529 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
530} ____cacheline_internodealigned_in_smp;
531
532enum zone_flags {
533 ZONE_RECLAIM_LOCKED,
534 ZONE_OOM_LOCKED,
535 ZONE_CONGESTED,
536
537
538 ZONE_DIRTY,
539
540
541
542 ZONE_WRITEBACK,
543
544
545 ZONE_FAIR_DEPLETED,
546};
547
548static inline unsigned long zone_end_pfn(const struct zone *zone)
549{
550 return zone->zone_start_pfn + zone->spanned_pages;
551}
552
553static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
554{
555 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
556}
557
558static inline bool zone_is_initialized(struct zone *zone)
559{
560 return !!zone->wait_table;
561}
562
563static inline bool zone_is_empty(struct zone *zone)
564{
565 return zone->spanned_pages == 0;
566}
567
568
569
570
571
572
573#define DEF_PRIORITY 12
574
575
576#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
577
578enum {
579 ZONELIST_FALLBACK,
580#ifdef CONFIG_NUMA
581
582
583
584
585 ZONELIST_NOFALLBACK,
586#endif
587 MAX_ZONELISTS
588};
589
590
591
592
593
594struct zoneref {
595 struct zone *zone;
596 int zone_idx;
597};
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613struct zonelist {
614 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
615};
616
617#ifndef CONFIG_DISCONTIGMEM
618
619extern struct page *mem_map;
620#endif
621
622
623
624
625
626
627
628
629
630
631
632
633struct bootmem_data;
634typedef struct pglist_data {
635 struct zone node_zones[MAX_NR_ZONES];
636 struct zonelist node_zonelists[MAX_ZONELISTS];
637 int nr_zones;
638#ifdef CONFIG_FLAT_NODE_MEM_MAP
639 struct page *node_mem_map;
640#ifdef CONFIG_PAGE_EXTENSION
641 struct page_ext *node_page_ext;
642#endif
643#endif
644#ifndef CONFIG_NO_BOOTMEM
645 struct bootmem_data *bdata;
646#endif
647#ifdef CONFIG_MEMORY_HOTPLUG
648
649
650
651
652
653
654
655
656
657
658 spinlock_t node_size_lock;
659#endif
660 unsigned long node_start_pfn;
661 unsigned long node_present_pages;
662 unsigned long node_spanned_pages;
663
664 int node_id;
665 wait_queue_head_t kswapd_wait;
666 wait_queue_head_t pfmemalloc_wait;
667 struct task_struct *kswapd;
668
669 int kswapd_max_order;
670 enum zone_type classzone_idx;
671#ifdef CONFIG_COMPACTION
672 int kcompactd_max_order;
673 enum zone_type kcompactd_classzone_idx;
674 wait_queue_head_t kcompactd_wait;
675 struct task_struct *kcompactd;
676#endif
677#ifdef CONFIG_NUMA_BALANCING
678
679 spinlock_t numabalancing_migrate_lock;
680
681
682 unsigned long numabalancing_migrate_next_window;
683
684
685 unsigned long numabalancing_migrate_nr_pages;
686#endif
687
688#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
689
690
691
692
693 unsigned long first_deferred_pfn;
694#endif
695
696#ifdef CONFIG_TRANSPARENT_HUGEPAGE
697 spinlock_t split_queue_lock;
698 struct list_head split_queue;
699 unsigned long split_queue_len;
700#endif
701} pg_data_t;
702
703#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
704#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
705#ifdef CONFIG_FLAT_NODE_MEM_MAP
706#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
707#else
708#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
709#endif
710#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
711
712#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
713#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
714
715static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
716{
717 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
718}
719
720static inline bool pgdat_is_empty(pg_data_t *pgdat)
721{
722 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
723}
724
725static inline int zone_id(const struct zone *zone)
726{
727 struct pglist_data *pgdat = zone->zone_pgdat;
728
729 return zone - pgdat->node_zones;
730}
731
732#ifdef CONFIG_ZONE_DEVICE
733static inline bool is_dev_zone(const struct zone *zone)
734{
735 return zone_id(zone) == ZONE_DEVICE;
736}
737#else
738static inline bool is_dev_zone(const struct zone *zone)
739{
740 return false;
741}
742#endif
743
744#include <linux/memory_hotplug.h>
745
746extern struct mutex zonelists_mutex;
747void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
748void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
749bool zone_watermark_ok(struct zone *z, unsigned int order,
750 unsigned long mark, int classzone_idx, int alloc_flags);
751bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
752 unsigned long mark, int classzone_idx);
753enum memmap_context {
754 MEMMAP_EARLY,
755 MEMMAP_HOTPLUG,
756};
757extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
758 unsigned long size);
759
760extern void lruvec_init(struct lruvec *lruvec);
761
762static inline struct zone *lruvec_zone(struct lruvec *lruvec)
763{
764#ifdef CONFIG_MEMCG
765 return lruvec->zone;
766#else
767 return container_of(lruvec, struct zone, lruvec);
768#endif
769}
770
771extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
772
773#ifdef CONFIG_HAVE_MEMORY_PRESENT
774void memory_present(int nid, unsigned long start, unsigned long end);
775#else
776static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
777#endif
778
779#ifdef CONFIG_HAVE_MEMORYLESS_NODES
780int local_memory_node(int node_id);
781#else
782static inline int local_memory_node(int node_id) { return node_id; };
783#endif
784
785#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
786unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
787#endif
788
789
790
791
792#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
793
794static inline int populated_zone(struct zone *zone)
795{
796 return (!!zone->present_pages);
797}
798
799extern int movable_zone;
800
801#ifdef CONFIG_HIGHMEM
802static inline int zone_movable_is_highmem(void)
803{
804#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
805 return movable_zone == ZONE_HIGHMEM;
806#else
807 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
808#endif
809}
810#endif
811
812static inline int is_highmem_idx(enum zone_type idx)
813{
814#ifdef CONFIG_HIGHMEM
815 return (idx == ZONE_HIGHMEM ||
816 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
817#else
818 return 0;
819#endif
820}
821
822
823
824
825
826
827
828static inline int is_highmem(struct zone *zone)
829{
830#ifdef CONFIG_HIGHMEM
831 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
832 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
833 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
834 zone_movable_is_highmem());
835#else
836 return 0;
837#endif
838}
839
840
841struct ctl_table;
842int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
843 void __user *, size_t *, loff_t *);
844int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
845 void __user *, size_t *, loff_t *);
846extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
847int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
848 void __user *, size_t *, loff_t *);
849int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
850 void __user *, size_t *, loff_t *);
851int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
852 void __user *, size_t *, loff_t *);
853int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
854 void __user *, size_t *, loff_t *);
855
856extern int numa_zonelist_order_handler(struct ctl_table *, int,
857 void __user *, size_t *, loff_t *);
858extern char numa_zonelist_order[];
859#define NUMA_ZONELIST_ORDER_LEN 16
860
861#ifndef CONFIG_NEED_MULTIPLE_NODES
862
863extern struct pglist_data contig_page_data;
864#define NODE_DATA(nid) (&contig_page_data)
865#define NODE_MEM_MAP(nid) mem_map
866
867#else
868
869#include <asm/mmzone.h>
870
871#endif
872
873extern struct pglist_data *first_online_pgdat(void);
874extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
875extern struct zone *next_zone(struct zone *zone);
876
877
878
879
880
881#define for_each_online_pgdat(pgdat) \
882 for (pgdat = first_online_pgdat(); \
883 pgdat; \
884 pgdat = next_online_pgdat(pgdat))
885
886
887
888
889
890
891
892#define for_each_zone(zone) \
893 for (zone = (first_online_pgdat())->node_zones; \
894 zone; \
895 zone = next_zone(zone))
896
897#define for_each_populated_zone(zone) \
898 for (zone = (first_online_pgdat())->node_zones; \
899 zone; \
900 zone = next_zone(zone)) \
901 if (!populated_zone(zone)) \
902 ; \
903 else
904
905static inline struct zone *zonelist_zone(struct zoneref *zoneref)
906{
907 return zoneref->zone;
908}
909
910static inline int zonelist_zone_idx(struct zoneref *zoneref)
911{
912 return zoneref->zone_idx;
913}
914
915static inline int zonelist_node_idx(struct zoneref *zoneref)
916{
917#ifdef CONFIG_NUMA
918
919 return zoneref->zone->node;
920#else
921 return 0;
922#endif
923}
924
925
926
927
928
929
930
931
932
933
934
935
936
937struct zoneref *next_zones_zonelist(struct zoneref *z,
938 enum zone_type highest_zoneidx,
939 nodemask_t *nodes);
940
941
942
943
944
945
946
947
948
949
950
951
952
953static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
954 enum zone_type highest_zoneidx,
955 nodemask_t *nodes,
956 struct zone **zone)
957{
958 struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
959 highest_zoneidx, nodes);
960 *zone = zonelist_zone(z);
961 return z;
962}
963
964
965
966
967
968
969
970
971
972
973
974
975#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
976 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
977 zone; \
978 z = next_zones_zonelist(++z, highidx, nodemask), \
979 zone = zonelist_zone(z)) \
980
981
982
983
984
985
986
987
988
989
990#define for_each_zone_zonelist(zone, z, zlist, highidx) \
991 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
992
993#ifdef CONFIG_SPARSEMEM
994#include <asm/sparsemem.h>
995#endif
996
997#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
998 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
999static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1000{
1001 return 0;
1002}
1003#endif
1004
1005#ifdef CONFIG_FLATMEM
1006#define pfn_to_nid(pfn) (0)
1007#endif
1008
1009#ifdef CONFIG_SPARSEMEM
1010
1011
1012
1013
1014
1015
1016
1017#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1018#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1019
1020#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1021
1022#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1023#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1024
1025#define SECTION_BLOCKFLAGS_BITS \
1026 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1027
1028#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1029#error Allocator MAX_ORDER exceeds SECTION_SIZE
1030#endif
1031
1032#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
1033#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
1034
1035#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1036#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1037
1038struct page;
1039struct page_ext;
1040struct mem_section {
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053 unsigned long section_mem_map;
1054
1055
1056 unsigned long *pageblock_flags;
1057#ifdef CONFIG_PAGE_EXTENSION
1058
1059
1060
1061
1062 struct page_ext *page_ext;
1063 unsigned long pad;
1064#endif
1065
1066
1067
1068
1069};
1070
1071#ifdef CONFIG_SPARSEMEM_EXTREME
1072#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1073#else
1074#define SECTIONS_PER_ROOT 1
1075#endif
1076
1077#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1078#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1079#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1080
1081#ifdef CONFIG_SPARSEMEM_EXTREME
1082extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1083#else
1084extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1085#endif
1086
1087static inline struct mem_section *__nr_to_section(unsigned long nr)
1088{
1089 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1090 return NULL;
1091 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1092}
1093extern int __section_nr(struct mem_section* ms);
1094extern unsigned long usemap_size(void);
1095
1096
1097
1098
1099
1100
1101#define SECTION_MARKED_PRESENT (1UL<<0)
1102#define SECTION_HAS_MEM_MAP (1UL<<1)
1103#define SECTION_MAP_LAST_BIT (1UL<<2)
1104#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1105#define SECTION_NID_SHIFT 2
1106
1107static inline struct page *__section_mem_map_addr(struct mem_section *section)
1108{
1109 unsigned long map = section->section_mem_map;
1110 map &= SECTION_MAP_MASK;
1111 return (struct page *)map;
1112}
1113
1114static inline int present_section(struct mem_section *section)
1115{
1116 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1117}
1118
1119static inline int present_section_nr(unsigned long nr)
1120{
1121 return present_section(__nr_to_section(nr));
1122}
1123
1124static inline int valid_section(struct mem_section *section)
1125{
1126 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1127}
1128
1129static inline int valid_section_nr(unsigned long nr)
1130{
1131 return valid_section(__nr_to_section(nr));
1132}
1133
1134static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1135{
1136 return __nr_to_section(pfn_to_section_nr(pfn));
1137}
1138
1139#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1140static inline int pfn_valid(unsigned long pfn)
1141{
1142 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1143 return 0;
1144 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1145}
1146#endif
1147
1148static inline int pfn_present(unsigned long pfn)
1149{
1150 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1151 return 0;
1152 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1153}
1154
1155
1156
1157
1158
1159
1160#ifdef CONFIG_NUMA
1161#define pfn_to_nid(pfn) \
1162({ \
1163 unsigned long __pfn_to_nid_pfn = (pfn); \
1164 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1165})
1166#else
1167#define pfn_to_nid(pfn) (0)
1168#endif
1169
1170#define early_pfn_valid(pfn) pfn_valid(pfn)
1171void sparse_init(void);
1172#else
1173#define sparse_init() do {} while (0)
1174#define sparse_index_init(_sec, _nid) do {} while (0)
1175#endif
1176
1177
1178
1179
1180
1181
1182struct mminit_pfnnid_cache {
1183 unsigned long last_start;
1184 unsigned long last_end;
1185 int last_nid;
1186};
1187
1188#ifndef early_pfn_valid
1189#define early_pfn_valid(pfn) (1)
1190#endif
1191
1192void memory_present(int nid, unsigned long start, unsigned long end);
1193unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1194
1195
1196
1197
1198
1199
1200
1201#ifdef CONFIG_HOLES_IN_ZONE
1202#define pfn_valid_within(pfn) pfn_valid(pfn)
1203#else
1204#define pfn_valid_within(pfn) (1)
1205#endif
1206
1207#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223bool memmap_valid_within(unsigned long pfn,
1224 struct page *page, struct zone *zone);
1225#else
1226static inline bool memmap_valid_within(unsigned long pfn,
1227 struct page *page, struct zone *zone)
1228{
1229 return true;
1230}
1231#endif
1232
1233#endif
1234#endif
1235#endif
1236