1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <linux/page-flags-layout.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38enum {
39 MIGRATE_UNMOVABLE,
40 MIGRATE_MOVABLE,
41 MIGRATE_RECLAIMABLE,
42 MIGRATE_PCPTYPES,
43 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
44#ifdef CONFIG_CMA
45
46
47
48
49
50
51
52
53
54
55
56
57
58 MIGRATE_CMA,
59#endif
60#ifdef CONFIG_MEMORY_ISOLATION
61 MIGRATE_ISOLATE,
62#endif
63 MIGRATE_TYPES
64};
65
66
67extern char * const migratetype_names[MIGRATE_TYPES];
68
69#ifdef CONFIG_CMA
70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
71#else
72# define is_migrate_cma(migratetype) false
73#endif
74
75#define for_each_migratetype_order(order, type) \
76 for (order = 0; order < MAX_ORDER; order++) \
77 for (type = 0; type < MIGRATE_TYPES; type++)
78
79extern int page_group_by_mobility_disabled;
80
81#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
82#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
83
84#define get_pageblock_migratetype(page) \
85 get_pfnblock_flags_mask(page, page_to_pfn(page), \
86 PB_migrate_end, MIGRATETYPE_MASK)
87
88struct free_area {
89 struct list_head free_list[MIGRATE_TYPES];
90 unsigned long nr_free;
91};
92
93struct pglist_data;
94
95
96
97
98
99
100
101#if defined(CONFIG_SMP)
102struct zone_padding {
103 char x[0];
104} ____cacheline_internodealigned_in_smp;
105#define ZONE_PADDING(name) struct zone_padding name;
106#else
107#define ZONE_PADDING(name)
108#endif
109
110enum zone_stat_item {
111
112 NR_FREE_PAGES,
113 NR_ALLOC_BATCH,
114 NR_LRU_BASE,
115 NR_INACTIVE_ANON = NR_LRU_BASE,
116 NR_ACTIVE_ANON,
117 NR_INACTIVE_FILE,
118 NR_ACTIVE_FILE,
119 NR_UNEVICTABLE,
120 NR_MLOCK,
121 NR_ANON_PAGES,
122 NR_FILE_MAPPED,
123
124 NR_FILE_PAGES,
125 NR_FILE_DIRTY,
126 NR_WRITEBACK,
127 NR_SLAB_RECLAIMABLE,
128 NR_SLAB_UNRECLAIMABLE,
129 NR_PAGETABLE,
130 NR_KERNEL_STACK,
131
132 NR_UNSTABLE_NFS,
133 NR_BOUNCE,
134 NR_VMSCAN_WRITE,
135 NR_VMSCAN_IMMEDIATE,
136 NR_WRITEBACK_TEMP,
137 NR_ISOLATED_ANON,
138 NR_ISOLATED_FILE,
139 NR_SHMEM,
140 NR_DIRTIED,
141 NR_WRITTEN,
142 NR_PAGES_SCANNED,
143#ifdef CONFIG_NUMA
144 NUMA_HIT,
145 NUMA_MISS,
146 NUMA_FOREIGN,
147 NUMA_INTERLEAVE_HIT,
148 NUMA_LOCAL,
149 NUMA_OTHER,
150#endif
151 WORKINGSET_REFAULT,
152 WORKINGSET_ACTIVATE,
153 WORKINGSET_NODERECLAIM,
154 NR_ANON_TRANSPARENT_HUGEPAGES,
155 NR_FREE_CMA_PAGES,
156 NR_VM_ZONE_STAT_ITEMS };
157
158
159
160
161
162
163
164
165
166
167#define LRU_BASE 0
168#define LRU_ACTIVE 1
169#define LRU_FILE 2
170
171enum lru_list {
172 LRU_INACTIVE_ANON = LRU_BASE,
173 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
174 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
175 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
176 LRU_UNEVICTABLE,
177 NR_LRU_LISTS
178};
179
180#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
181
182#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
183
184static inline int is_file_lru(enum lru_list lru)
185{
186 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
187}
188
189static inline int is_active_lru(enum lru_list lru)
190{
191 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
192}
193
194struct zone_reclaim_stat {
195
196
197
198
199
200
201
202
203 unsigned long recent_rotated[2];
204 unsigned long recent_scanned[2];
205};
206
207struct lruvec {
208 struct list_head lists[NR_LRU_LISTS];
209 struct zone_reclaim_stat reclaim_stat;
210
211 atomic_long_t inactive_age;
212#ifdef CONFIG_MEMCG
213 struct zone *zone;
214#endif
215};
216
217
218#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
219#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
220#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
221
222
223#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
224
225#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
226
227#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
228
229#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
230
231
232typedef unsigned __bitwise__ isolate_mode_t;
233
234enum zone_watermarks {
235 WMARK_MIN,
236 WMARK_LOW,
237 WMARK_HIGH,
238 NR_WMARK
239};
240
241#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
242#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
243#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
244
245struct per_cpu_pages {
246 int count;
247 int high;
248 int batch;
249
250
251 struct list_head lists[MIGRATE_PCPTYPES];
252};
253
254struct per_cpu_pageset {
255 struct per_cpu_pages pcp;
256#ifdef CONFIG_NUMA
257 s8 expire;
258#endif
259#ifdef CONFIG_SMP
260 s8 stat_threshold;
261 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
262#endif
263};
264
265#endif
266
267enum zone_type {
268#ifdef CONFIG_ZONE_DMA
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287 ZONE_DMA,
288#endif
289#ifdef CONFIG_ZONE_DMA32
290
291
292
293
294
295 ZONE_DMA32,
296#endif
297
298
299
300
301
302 ZONE_NORMAL,
303#ifdef CONFIG_HIGHMEM
304
305
306
307
308
309
310
311
312 ZONE_HIGHMEM,
313#endif
314 ZONE_MOVABLE,
315#ifdef CONFIG_ZONE_DEVICE
316 ZONE_DEVICE,
317#endif
318 __MAX_NR_ZONES
319
320};
321
322#ifndef __GENERATING_BOUNDS_H
323
324struct zone {
325
326
327
328 unsigned long watermark[NR_WMARK];
329
330 unsigned long nr_reserved_highatomic;
331
332
333
334
335
336
337
338
339
340
341 long lowmem_reserve[MAX_NR_ZONES];
342
343#ifdef CONFIG_NUMA
344 int node;
345#endif
346
347
348
349
350
351 unsigned int inactive_ratio;
352
353 struct pglist_data *zone_pgdat;
354 struct per_cpu_pageset __percpu *pageset;
355
356
357
358
359
360 unsigned long totalreserve_pages;
361
362#ifndef CONFIG_SPARSEMEM
363
364
365
366
367 unsigned long *pageblock_flags;
368#endif
369
370#ifdef CONFIG_NUMA
371
372
373
374 unsigned long min_unmapped_pages;
375 unsigned long min_slab_pages;
376#endif
377
378
379 unsigned long zone_start_pfn;
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422 unsigned long managed_pages;
423 unsigned long spanned_pages;
424 unsigned long present_pages;
425
426 const char *name;
427
428#ifdef CONFIG_MEMORY_ISOLATION
429
430
431
432
433
434 unsigned long nr_isolate_pageblock;
435#endif
436
437#ifdef CONFIG_MEMORY_HOTPLUG
438
439 seqlock_t span_seqlock;
440#endif
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466 wait_queue_head_t *wait_table;
467 unsigned long wait_table_hash_nr_entries;
468 unsigned long wait_table_bits;
469
470 ZONE_PADDING(_pad1_)
471
472 struct free_area free_area[MAX_ORDER];
473
474
475 unsigned long flags;
476
477
478 spinlock_t lock;
479
480 ZONE_PADDING(_pad2_)
481
482
483
484
485 spinlock_t lru_lock;
486 struct lruvec lruvec;
487
488
489
490
491
492
493 unsigned long percpu_drift_mark;
494
495#if defined CONFIG_COMPACTION || defined CONFIG_CMA
496
497 unsigned long compact_cached_free_pfn;
498
499 unsigned long compact_cached_migrate_pfn[2];
500#endif
501
502#ifdef CONFIG_COMPACTION
503
504
505
506
507
508 unsigned int compact_considered;
509 unsigned int compact_defer_shift;
510 int compact_order_failed;
511#endif
512
513#if defined CONFIG_COMPACTION || defined CONFIG_CMA
514
515 bool compact_blockskip_flush;
516#endif
517
518 bool contiguous;
519
520 ZONE_PADDING(_pad3_)
521
522 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
523} ____cacheline_internodealigned_in_smp;
524
525enum zone_flags {
526 ZONE_RECLAIM_LOCKED,
527 ZONE_OOM_LOCKED,
528 ZONE_CONGESTED,
529
530
531 ZONE_DIRTY,
532
533
534
535 ZONE_WRITEBACK,
536
537
538 ZONE_FAIR_DEPLETED,
539};
540
541static inline unsigned long zone_end_pfn(const struct zone *zone)
542{
543 return zone->zone_start_pfn + zone->spanned_pages;
544}
545
546static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
547{
548 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
549}
550
551static inline bool zone_is_initialized(struct zone *zone)
552{
553 return !!zone->wait_table;
554}
555
556static inline bool zone_is_empty(struct zone *zone)
557{
558 return zone->spanned_pages == 0;
559}
560
561
562
563
564
565
566#define DEF_PRIORITY 12
567
568
569#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
570
571enum {
572 ZONELIST_FALLBACK,
573#ifdef CONFIG_NUMA
574
575
576
577
578 ZONELIST_NOFALLBACK,
579#endif
580 MAX_ZONELISTS
581};
582
583
584
585
586
587struct zoneref {
588 struct zone *zone;
589 int zone_idx;
590};
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606struct zonelist {
607 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
608};
609
610#ifndef CONFIG_DISCONTIGMEM
611
612extern struct page *mem_map;
613#endif
614
615
616
617
618
619
620
621
622
623
624
625
626struct bootmem_data;
627typedef struct pglist_data {
628 struct zone node_zones[MAX_NR_ZONES];
629 struct zonelist node_zonelists[MAX_ZONELISTS];
630 int nr_zones;
631#ifdef CONFIG_FLAT_NODE_MEM_MAP
632 struct page *node_mem_map;
633#ifdef CONFIG_PAGE_EXTENSION
634 struct page_ext *node_page_ext;
635#endif
636#endif
637#ifndef CONFIG_NO_BOOTMEM
638 struct bootmem_data *bdata;
639#endif
640#ifdef CONFIG_MEMORY_HOTPLUG
641
642
643
644
645
646
647
648
649
650
651 spinlock_t node_size_lock;
652#endif
653 unsigned long node_start_pfn;
654 unsigned long node_present_pages;
655 unsigned long node_spanned_pages;
656
657 int node_id;
658 wait_queue_head_t kswapd_wait;
659 wait_queue_head_t pfmemalloc_wait;
660 struct task_struct *kswapd;
661
662 int kswapd_max_order;
663 enum zone_type classzone_idx;
664#ifdef CONFIG_COMPACTION
665 int kcompactd_max_order;
666 enum zone_type kcompactd_classzone_idx;
667 wait_queue_head_t kcompactd_wait;
668 struct task_struct *kcompactd;
669#endif
670#ifdef CONFIG_NUMA_BALANCING
671
672 spinlock_t numabalancing_migrate_lock;
673
674
675 unsigned long numabalancing_migrate_next_window;
676
677
678 unsigned long numabalancing_migrate_nr_pages;
679#endif
680
681#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
682
683
684
685
686 unsigned long first_deferred_pfn;
687#endif
688
689#ifdef CONFIG_TRANSPARENT_HUGEPAGE
690 spinlock_t split_queue_lock;
691 struct list_head split_queue;
692 unsigned long split_queue_len;
693#endif
694} pg_data_t;
695
696#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
697#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
698#ifdef CONFIG_FLAT_NODE_MEM_MAP
699#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
700#else
701#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
702#endif
703#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
704
705#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
706#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
707
708static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
709{
710 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
711}
712
713static inline bool pgdat_is_empty(pg_data_t *pgdat)
714{
715 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
716}
717
718static inline int zone_id(const struct zone *zone)
719{
720 struct pglist_data *pgdat = zone->zone_pgdat;
721
722 return zone - pgdat->node_zones;
723}
724
725#ifdef CONFIG_ZONE_DEVICE
726static inline bool is_dev_zone(const struct zone *zone)
727{
728 return zone_id(zone) == ZONE_DEVICE;
729}
730#else
731static inline bool is_dev_zone(const struct zone *zone)
732{
733 return false;
734}
735#endif
736
737#include <linux/memory_hotplug.h>
738
739extern struct mutex zonelists_mutex;
740void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
741void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
742bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
743 int classzone_idx, unsigned int alloc_flags,
744 long free_pages);
745bool zone_watermark_ok(struct zone *z, unsigned int order,
746 unsigned long mark, int classzone_idx,
747 unsigned int alloc_flags);
748bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
749 unsigned long mark, int classzone_idx);
750enum memmap_context {
751 MEMMAP_EARLY,
752 MEMMAP_HOTPLUG,
753};
754extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
755 unsigned long size);
756
757extern void lruvec_init(struct lruvec *lruvec);
758
759static inline struct zone *lruvec_zone(struct lruvec *lruvec)
760{
761#ifdef CONFIG_MEMCG
762 return lruvec->zone;
763#else
764 return container_of(lruvec, struct zone, lruvec);
765#endif
766}
767
768extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
769
770#ifdef CONFIG_HAVE_MEMORY_PRESENT
771void memory_present(int nid, unsigned long start, unsigned long end);
772#else
773static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
774#endif
775
776#ifdef CONFIG_HAVE_MEMORYLESS_NODES
777int local_memory_node(int node_id);
778#else
779static inline int local_memory_node(int node_id) { return node_id; };
780#endif
781
782#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
783unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
784#endif
785
786
787
788
789#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
790
791static inline int populated_zone(struct zone *zone)
792{
793 return (!!zone->present_pages);
794}
795
796extern int movable_zone;
797
798#ifdef CONFIG_HIGHMEM
799static inline int zone_movable_is_highmem(void)
800{
801#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
802 return movable_zone == ZONE_HIGHMEM;
803#else
804 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
805#endif
806}
807#endif
808
809static inline int is_highmem_idx(enum zone_type idx)
810{
811#ifdef CONFIG_HIGHMEM
812 return (idx == ZONE_HIGHMEM ||
813 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
814#else
815 return 0;
816#endif
817}
818
819
820
821
822
823
824
825static inline int is_highmem(struct zone *zone)
826{
827#ifdef CONFIG_HIGHMEM
828 return is_highmem_idx(zone_idx(zone));
829#else
830 return 0;
831#endif
832}
833
834
835struct ctl_table;
836int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
837 void __user *, size_t *, loff_t *);
838int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
839 void __user *, size_t *, loff_t *);
840extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
841int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
842 void __user *, size_t *, loff_t *);
843int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
844 void __user *, size_t *, loff_t *);
845int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
846 void __user *, size_t *, loff_t *);
847int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
848 void __user *, size_t *, loff_t *);
849
850extern int numa_zonelist_order_handler(struct ctl_table *, int,
851 void __user *, size_t *, loff_t *);
852extern char numa_zonelist_order[];
853#define NUMA_ZONELIST_ORDER_LEN 16
854
855#ifndef CONFIG_NEED_MULTIPLE_NODES
856
857extern struct pglist_data contig_page_data;
858#define NODE_DATA(nid) (&contig_page_data)
859#define NODE_MEM_MAP(nid) mem_map
860
861#else
862
863#include <asm/mmzone.h>
864
865#endif
866
867extern struct pglist_data *first_online_pgdat(void);
868extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
869extern struct zone *next_zone(struct zone *zone);
870
871
872
873
874
875#define for_each_online_pgdat(pgdat) \
876 for (pgdat = first_online_pgdat(); \
877 pgdat; \
878 pgdat = next_online_pgdat(pgdat))
879
880
881
882
883
884
885
886#define for_each_zone(zone) \
887 for (zone = (first_online_pgdat())->node_zones; \
888 zone; \
889 zone = next_zone(zone))
890
891#define for_each_populated_zone(zone) \
892 for (zone = (first_online_pgdat())->node_zones; \
893 zone; \
894 zone = next_zone(zone)) \
895 if (!populated_zone(zone)) \
896 ; \
897 else
898
899static inline struct zone *zonelist_zone(struct zoneref *zoneref)
900{
901 return zoneref->zone;
902}
903
904static inline int zonelist_zone_idx(struct zoneref *zoneref)
905{
906 return zoneref->zone_idx;
907}
908
909static inline int zonelist_node_idx(struct zoneref *zoneref)
910{
911#ifdef CONFIG_NUMA
912
913 return zoneref->zone->node;
914#else
915 return 0;
916#endif
917}
918
919struct zoneref *__next_zones_zonelist(struct zoneref *z,
920 enum zone_type highest_zoneidx,
921 nodemask_t *nodes);
922
923
924
925
926
927
928
929
930
931
932
933
934
935static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
936 enum zone_type highest_zoneidx,
937 nodemask_t *nodes)
938{
939 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
940 return z;
941 return __next_zones_zonelist(z, highest_zoneidx, nodes);
942}
943
944
945
946
947
948
949
950
951
952
953
954
955
956static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
957 enum zone_type highest_zoneidx,
958 nodemask_t *nodes)
959{
960 return next_zones_zonelist(zonelist->_zonerefs,
961 highest_zoneidx, nodes);
962}
963
964
965
966
967
968
969
970
971
972
973
974
975#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
976 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
977 zone; \
978 z = next_zones_zonelist(++z, highidx, nodemask), \
979 zone = zonelist_zone(z))
980
981#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
982 for (zone = z->zone; \
983 zone; \
984 z = next_zones_zonelist(++z, highidx, nodemask), \
985 zone = zonelist_zone(z))
986
987
988
989
990
991
992
993
994
995
996
997#define for_each_zone_zonelist(zone, z, zlist, highidx) \
998 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
999
1000#ifdef CONFIG_SPARSEMEM
1001#include <asm/sparsemem.h>
1002#endif
1003
1004#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1005 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1006static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1007{
1008 return 0;
1009}
1010#endif
1011
1012#ifdef CONFIG_FLATMEM
1013#define pfn_to_nid(pfn) (0)
1014#endif
1015
1016#ifdef CONFIG_SPARSEMEM
1017
1018
1019
1020
1021
1022
1023
1024#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1025#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1026
1027#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1028
1029#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1030#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1031
1032#define SECTION_BLOCKFLAGS_BITS \
1033 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1034
1035#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1036#error Allocator MAX_ORDER exceeds SECTION_SIZE
1037#endif
1038
1039#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
1040#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
1041
1042#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1043#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1044
1045struct page;
1046struct page_ext;
1047struct mem_section {
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 unsigned long section_mem_map;
1061
1062
1063 unsigned long *pageblock_flags;
1064#ifdef CONFIG_PAGE_EXTENSION
1065
1066
1067
1068
1069 struct page_ext *page_ext;
1070 unsigned long pad;
1071#endif
1072
1073
1074
1075
1076};
1077
1078#ifdef CONFIG_SPARSEMEM_EXTREME
1079#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1080#else
1081#define SECTIONS_PER_ROOT 1
1082#endif
1083
1084#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1085#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1086#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1087
1088#ifdef CONFIG_SPARSEMEM_EXTREME
1089extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1090#else
1091extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1092#endif
1093
1094static inline struct mem_section *__nr_to_section(unsigned long nr)
1095{
1096 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1097 return NULL;
1098 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1099}
1100extern int __section_nr(struct mem_section* ms);
1101extern unsigned long usemap_size(void);
1102
1103
1104
1105
1106
1107
1108#define SECTION_MARKED_PRESENT (1UL<<0)
1109#define SECTION_HAS_MEM_MAP (1UL<<1)
1110#define SECTION_MAP_LAST_BIT (1UL<<2)
1111#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1112#define SECTION_NID_SHIFT 2
1113
1114static inline struct page *__section_mem_map_addr(struct mem_section *section)
1115{
1116 unsigned long map = section->section_mem_map;
1117 map &= SECTION_MAP_MASK;
1118 return (struct page *)map;
1119}
1120
1121static inline int present_section(struct mem_section *section)
1122{
1123 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1124}
1125
1126static inline int present_section_nr(unsigned long nr)
1127{
1128 return present_section(__nr_to_section(nr));
1129}
1130
1131static inline int valid_section(struct mem_section *section)
1132{
1133 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1134}
1135
1136static inline int valid_section_nr(unsigned long nr)
1137{
1138 return valid_section(__nr_to_section(nr));
1139}
1140
1141static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1142{
1143 return __nr_to_section(pfn_to_section_nr(pfn));
1144}
1145
1146#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1147static inline int pfn_valid(unsigned long pfn)
1148{
1149 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1150 return 0;
1151 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1152}
1153#endif
1154
1155static inline int pfn_present(unsigned long pfn)
1156{
1157 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1158 return 0;
1159 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1160}
1161
1162
1163
1164
1165
1166
1167#ifdef CONFIG_NUMA
1168#define pfn_to_nid(pfn) \
1169({ \
1170 unsigned long __pfn_to_nid_pfn = (pfn); \
1171 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1172})
1173#else
1174#define pfn_to_nid(pfn) (0)
1175#endif
1176
1177#define early_pfn_valid(pfn) pfn_valid(pfn)
1178void sparse_init(void);
1179#else
1180#define sparse_init() do {} while (0)
1181#define sparse_index_init(_sec, _nid) do {} while (0)
1182#endif
1183
1184
1185
1186
1187
1188
1189struct mminit_pfnnid_cache {
1190 unsigned long last_start;
1191 unsigned long last_end;
1192 int last_nid;
1193};
1194
1195#ifndef early_pfn_valid
1196#define early_pfn_valid(pfn) (1)
1197#endif
1198
1199void memory_present(int nid, unsigned long start, unsigned long end);
1200unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1201
1202
1203
1204
1205
1206
1207
1208#ifdef CONFIG_HOLES_IN_ZONE
1209#define pfn_valid_within(pfn) pfn_valid(pfn)
1210#else
1211#define pfn_valid_within(pfn) (1)
1212#endif
1213
1214#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230bool memmap_valid_within(unsigned long pfn,
1231 struct page *page, struct zone *zone);
1232#else
1233static inline bool memmap_valid_within(unsigned long pfn,
1234 struct page *page, struct zone *zone)
1235{
1236 return true;
1237}
1238#endif
1239
1240#endif
1241#endif
1242#endif
1243