1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <linux/page-flags-layout.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38enum {
39 MIGRATE_UNMOVABLE,
40 MIGRATE_RECLAIMABLE,
41 MIGRATE_MOVABLE,
42 MIGRATE_PCPTYPES,
43 MIGRATE_RESERVE = MIGRATE_PCPTYPES,
44#ifdef CONFIG_CMA
45
46
47
48
49
50
51
52
53
54
55
56
57
58 MIGRATE_CMA,
59#endif
60#ifdef CONFIG_MEMORY_ISOLATION
61 MIGRATE_ISOLATE,
62#endif
63 MIGRATE_TYPES
64};
65
66#ifdef CONFIG_CMA
67# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
68#else
69# define is_migrate_cma(migratetype) false
70#endif
71
72#define for_each_migratetype_order(order, type) \
73 for (order = 0; order < MAX_ORDER; order++) \
74 for (type = 0; type < MIGRATE_TYPES; type++)
75
76extern int page_group_by_mobility_disabled;
77
78#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
79#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
80
81#define get_pageblock_migratetype(page) \
82 get_pfnblock_flags_mask(page, page_to_pfn(page), \
83 PB_migrate_end, MIGRATETYPE_MASK)
84
85static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
86{
87 BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
88 return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
89 MIGRATETYPE_MASK);
90}
91
92struct free_area {
93 struct list_head free_list[MIGRATE_TYPES];
94 unsigned long nr_free;
95};
96
97struct pglist_data;
98
99
100
101
102
103
104
105#if defined(CONFIG_SMP)
106struct zone_padding {
107 char x[0];
108} ____cacheline_internodealigned_in_smp;
109#define ZONE_PADDING(name) struct zone_padding name;
110#else
111#define ZONE_PADDING(name)
112#endif
113
114enum zone_stat_item {
115
116 NR_FREE_PAGES,
117 NR_ALLOC_BATCH,
118 NR_LRU_BASE,
119 NR_INACTIVE_ANON = NR_LRU_BASE,
120 NR_ACTIVE_ANON,
121 NR_INACTIVE_FILE,
122 NR_ACTIVE_FILE,
123 NR_UNEVICTABLE,
124 NR_MLOCK,
125 NR_ANON_PAGES,
126 NR_FILE_MAPPED,
127
128 NR_FILE_PAGES,
129 NR_FILE_DIRTY,
130 NR_WRITEBACK,
131 NR_SLAB_RECLAIMABLE,
132 NR_SLAB_UNRECLAIMABLE,
133 NR_PAGETABLE,
134 NR_KERNEL_STACK,
135
136 NR_UNSTABLE_NFS,
137 NR_BOUNCE,
138 NR_VMSCAN_WRITE,
139 NR_VMSCAN_IMMEDIATE,
140 NR_WRITEBACK_TEMP,
141 NR_ISOLATED_ANON,
142 NR_ISOLATED_FILE,
143 NR_SHMEM,
144 NR_DIRTIED,
145 NR_WRITTEN,
146 NR_PAGES_SCANNED,
147#ifdef CONFIG_NUMA
148 NUMA_HIT,
149 NUMA_MISS,
150 NUMA_FOREIGN,
151 NUMA_INTERLEAVE_HIT,
152 NUMA_LOCAL,
153 NUMA_OTHER,
154#endif
155 WORKINGSET_REFAULT,
156 WORKINGSET_ACTIVATE,
157 WORKINGSET_NODERECLAIM,
158 NR_ANON_TRANSPARENT_HUGEPAGES,
159 NR_FREE_CMA_PAGES,
160 NR_VM_ZONE_STAT_ITEMS };
161
162
163
164
165
166
167
168
169
170
171#define LRU_BASE 0
172#define LRU_ACTIVE 1
173#define LRU_FILE 2
174
175enum lru_list {
176 LRU_INACTIVE_ANON = LRU_BASE,
177 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
178 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
179 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
180 LRU_UNEVICTABLE,
181 NR_LRU_LISTS
182};
183
184#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
185
186#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
187
188static inline int is_file_lru(enum lru_list lru)
189{
190 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
191}
192
193static inline int is_active_lru(enum lru_list lru)
194{
195 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
196}
197
198static inline int is_unevictable_lru(enum lru_list lru)
199{
200 return (lru == LRU_UNEVICTABLE);
201}
202
203struct zone_reclaim_stat {
204
205
206
207
208
209
210
211
212 unsigned long recent_rotated[2];
213 unsigned long recent_scanned[2];
214};
215
216struct lruvec {
217 struct list_head lists[NR_LRU_LISTS];
218 struct zone_reclaim_stat reclaim_stat;
219#ifdef CONFIG_MEMCG
220 struct zone *zone;
221#endif
222};
223
224
225#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
226#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
227#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
228
229
230#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
231
232#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
233
234#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
235
236#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
237
238
239typedef unsigned __bitwise__ isolate_mode_t;
240
241enum zone_watermarks {
242 WMARK_MIN,
243 WMARK_LOW,
244 WMARK_HIGH,
245 NR_WMARK
246};
247
248#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
249#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
250#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
251
252struct per_cpu_pages {
253 int count;
254 int high;
255 int batch;
256
257
258 struct list_head lists[MIGRATE_PCPTYPES];
259};
260
261struct per_cpu_pageset {
262 struct per_cpu_pages pcp;
263#ifdef CONFIG_NUMA
264 s8 expire;
265#endif
266#ifdef CONFIG_SMP
267 s8 stat_threshold;
268 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
269#endif
270};
271
272#endif
273
274enum zone_type {
275#ifdef CONFIG_ZONE_DMA
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 ZONE_DMA,
295#endif
296#ifdef CONFIG_ZONE_DMA32
297
298
299
300
301
302 ZONE_DMA32,
303#endif
304
305
306
307
308
309 ZONE_NORMAL,
310#ifdef CONFIG_HIGHMEM
311
312
313
314
315
316
317
318
319 ZONE_HIGHMEM,
320#endif
321 ZONE_MOVABLE,
322 __MAX_NR_ZONES
323};
324
325#ifndef __GENERATING_BOUNDS_H
326
327struct zone {
328
329
330
331 unsigned long watermark[NR_WMARK];
332
333
334
335
336
337
338
339
340
341 long lowmem_reserve[MAX_NR_ZONES];
342
343#ifdef CONFIG_NUMA
344 int node;
345#endif
346
347
348
349
350
351 unsigned int inactive_ratio;
352
353 struct pglist_data *zone_pgdat;
354 struct per_cpu_pageset __percpu *pageset;
355
356
357
358
359
360 unsigned long dirty_balance_reserve;
361
362#ifndef CONFIG_SPARSEMEM
363
364
365
366
367 unsigned long *pageblock_flags;
368#endif
369
370#ifdef CONFIG_NUMA
371
372
373
374 unsigned long min_unmapped_pages;
375 unsigned long min_slab_pages;
376#endif
377
378
379 unsigned long zone_start_pfn;
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422 unsigned long managed_pages;
423 unsigned long spanned_pages;
424 unsigned long present_pages;
425
426 const char *name;
427
428
429
430
431
432 int nr_migrate_reserve_block;
433
434#ifdef CONFIG_MEMORY_ISOLATION
435
436
437
438
439
440 unsigned long nr_isolate_pageblock;
441#endif
442
443#ifdef CONFIG_MEMORY_HOTPLUG
444
445 seqlock_t span_seqlock;
446#endif
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472 wait_queue_head_t *wait_table;
473 unsigned long wait_table_hash_nr_entries;
474 unsigned long wait_table_bits;
475
476 ZONE_PADDING(_pad1_)
477
478 struct free_area free_area[MAX_ORDER];
479
480
481 unsigned long flags;
482
483
484 spinlock_t lock;
485
486 ZONE_PADDING(_pad2_)
487
488
489
490
491 spinlock_t lru_lock;
492 struct lruvec lruvec;
493
494
495 atomic_long_t inactive_age;
496
497
498
499
500
501
502 unsigned long percpu_drift_mark;
503
504#if defined CONFIG_COMPACTION || defined CONFIG_CMA
505
506 unsigned long compact_cached_free_pfn;
507
508 unsigned long compact_cached_migrate_pfn[2];
509#endif
510
511#ifdef CONFIG_COMPACTION
512
513
514
515
516
517 unsigned int compact_considered;
518 unsigned int compact_defer_shift;
519 int compact_order_failed;
520#endif
521
522#if defined CONFIG_COMPACTION || defined CONFIG_CMA
523
524 bool compact_blockskip_flush;
525#endif
526
527 ZONE_PADDING(_pad3_)
528
529 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
530} ____cacheline_internodealigned_in_smp;
531
532enum zone_flags {
533 ZONE_RECLAIM_LOCKED,
534 ZONE_OOM_LOCKED,
535 ZONE_CONGESTED,
536
537
538 ZONE_DIRTY,
539
540
541
542 ZONE_WRITEBACK,
543
544
545 ZONE_FAIR_DEPLETED,
546};
547
548static inline unsigned long zone_end_pfn(const struct zone *zone)
549{
550 return zone->zone_start_pfn + zone->spanned_pages;
551}
552
553static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
554{
555 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
556}
557
558static inline bool zone_is_initialized(struct zone *zone)
559{
560 return !!zone->wait_table;
561}
562
563static inline bool zone_is_empty(struct zone *zone)
564{
565 return zone->spanned_pages == 0;
566}
567
568
569
570
571
572
573#define DEF_PRIORITY 12
574
575
576#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
577
578#ifdef CONFIG_NUMA
579
580
581
582
583
584
585
586
587#define MAX_ZONELISTS 2
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649struct zonelist_cache {
650 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];
651 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);
652 unsigned long last_full_zap;
653};
654#else
655#define MAX_ZONELISTS 1
656struct zonelist_cache;
657#endif
658
659
660
661
662
663struct zoneref {
664 struct zone *zone;
665 int zone_idx;
666};
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685struct zonelist {
686 struct zonelist_cache *zlcache_ptr;
687 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
688#ifdef CONFIG_NUMA
689 struct zonelist_cache zlcache;
690#endif
691};
692
693#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
694struct node_active_region {
695 unsigned long start_pfn;
696 unsigned long end_pfn;
697 int nid;
698};
699#endif
700
701#ifndef CONFIG_DISCONTIGMEM
702
703extern struct page *mem_map;
704#endif
705
706
707
708
709
710
711
712
713
714
715
716
717struct bootmem_data;
718typedef struct pglist_data {
719 struct zone node_zones[MAX_NR_ZONES];
720 struct zonelist node_zonelists[MAX_ZONELISTS];
721 int nr_zones;
722#ifdef CONFIG_FLAT_NODE_MEM_MAP
723 struct page *node_mem_map;
724#ifdef CONFIG_PAGE_EXTENSION
725 struct page_ext *node_page_ext;
726#endif
727#endif
728#ifndef CONFIG_NO_BOOTMEM
729 struct bootmem_data *bdata;
730#endif
731#ifdef CONFIG_MEMORY_HOTPLUG
732
733
734
735
736
737
738
739
740
741
742 spinlock_t node_size_lock;
743#endif
744 unsigned long node_start_pfn;
745 unsigned long node_present_pages;
746 unsigned long node_spanned_pages;
747
748 int node_id;
749 wait_queue_head_t kswapd_wait;
750 wait_queue_head_t pfmemalloc_wait;
751 struct task_struct *kswapd;
752
753 int kswapd_max_order;
754 enum zone_type classzone_idx;
755#ifdef CONFIG_NUMA_BALANCING
756
757 spinlock_t numabalancing_migrate_lock;
758
759
760 unsigned long numabalancing_migrate_next_window;
761
762
763 unsigned long numabalancing_migrate_nr_pages;
764#endif
765} pg_data_t;
766
767#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
768#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
769#ifdef CONFIG_FLAT_NODE_MEM_MAP
770#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
771#else
772#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
773#endif
774#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
775
776#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
777#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
778
779static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
780{
781 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
782}
783
784static inline bool pgdat_is_empty(pg_data_t *pgdat)
785{
786 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
787}
788
789#include <linux/memory_hotplug.h>
790
791extern struct mutex zonelists_mutex;
792void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
793void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
794bool zone_watermark_ok(struct zone *z, unsigned int order,
795 unsigned long mark, int classzone_idx, int alloc_flags);
796bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
797 unsigned long mark, int classzone_idx, int alloc_flags);
798enum memmap_context {
799 MEMMAP_EARLY,
800 MEMMAP_HOTPLUG,
801};
802extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
803 unsigned long size,
804 enum memmap_context context);
805
806extern void lruvec_init(struct lruvec *lruvec);
807
808static inline struct zone *lruvec_zone(struct lruvec *lruvec)
809{
810#ifdef CONFIG_MEMCG
811 return lruvec->zone;
812#else
813 return container_of(lruvec, struct zone, lruvec);
814#endif
815}
816
817#ifdef CONFIG_HAVE_MEMORY_PRESENT
818void memory_present(int nid, unsigned long start, unsigned long end);
819#else
820static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
821#endif
822
823#ifdef CONFIG_HAVE_MEMORYLESS_NODES
824int local_memory_node(int node_id);
825#else
826static inline int local_memory_node(int node_id) { return node_id; };
827#endif
828
829#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
830unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
831#endif
832
833
834
835
836#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
837
838static inline int populated_zone(struct zone *zone)
839{
840 return (!!zone->present_pages);
841}
842
843extern int movable_zone;
844
845static inline int zone_movable_is_highmem(void)
846{
847#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
848 return movable_zone == ZONE_HIGHMEM;
849#elif defined(CONFIG_HIGHMEM)
850 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
851#else
852 return 0;
853#endif
854}
855
856static inline int is_highmem_idx(enum zone_type idx)
857{
858#ifdef CONFIG_HIGHMEM
859 return (idx == ZONE_HIGHMEM ||
860 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
861#else
862 return 0;
863#endif
864}
865
866
867
868
869
870
871
872static inline int is_highmem(struct zone *zone)
873{
874#ifdef CONFIG_HIGHMEM
875 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
876 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
877 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
878 zone_movable_is_highmem());
879#else
880 return 0;
881#endif
882}
883
884
885struct ctl_table;
886int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
887 void __user *, size_t *, loff_t *);
888extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
889int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
890 void __user *, size_t *, loff_t *);
891int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
892 void __user *, size_t *, loff_t *);
893int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
894 void __user *, size_t *, loff_t *);
895int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
896 void __user *, size_t *, loff_t *);
897
898extern int numa_zonelist_order_handler(struct ctl_table *, int,
899 void __user *, size_t *, loff_t *);
900extern char numa_zonelist_order[];
901#define NUMA_ZONELIST_ORDER_LEN 16
902
903#ifndef CONFIG_NEED_MULTIPLE_NODES
904
905extern struct pglist_data contig_page_data;
906#define NODE_DATA(nid) (&contig_page_data)
907#define NODE_MEM_MAP(nid) mem_map
908
909#else
910
911#include <asm/mmzone.h>
912
913#endif
914
915extern struct pglist_data *first_online_pgdat(void);
916extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
917extern struct zone *next_zone(struct zone *zone);
918
919
920
921
922
923#define for_each_online_pgdat(pgdat) \
924 for (pgdat = first_online_pgdat(); \
925 pgdat; \
926 pgdat = next_online_pgdat(pgdat))
927
928
929
930
931
932
933
934#define for_each_zone(zone) \
935 for (zone = (first_online_pgdat())->node_zones; \
936 zone; \
937 zone = next_zone(zone))
938
939#define for_each_populated_zone(zone) \
940 for (zone = (first_online_pgdat())->node_zones; \
941 zone; \
942 zone = next_zone(zone)) \
943 if (!populated_zone(zone)) \
944 ; \
945 else
946
947static inline struct zone *zonelist_zone(struct zoneref *zoneref)
948{
949 return zoneref->zone;
950}
951
952static inline int zonelist_zone_idx(struct zoneref *zoneref)
953{
954 return zoneref->zone_idx;
955}
956
957static inline int zonelist_node_idx(struct zoneref *zoneref)
958{
959#ifdef CONFIG_NUMA
960
961 return zoneref->zone->node;
962#else
963 return 0;
964#endif
965}
966
967
968
969
970
971
972
973
974
975
976
977
978
979struct zoneref *next_zones_zonelist(struct zoneref *z,
980 enum zone_type highest_zoneidx,
981 nodemask_t *nodes);
982
983
984
985
986
987
988
989
990
991
992
993
994
995static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
996 enum zone_type highest_zoneidx,
997 nodemask_t *nodes,
998 struct zone **zone)
999{
1000 struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
1001 highest_zoneidx, nodes);
1002 *zone = zonelist_zone(z);
1003 return z;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1018 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
1019 zone; \
1020 z = next_zones_zonelist(++z, highidx, nodemask), \
1021 zone = zonelist_zone(z)) \
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1033 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1034
1035#ifdef CONFIG_SPARSEMEM
1036#include <asm/sparsemem.h>
1037#endif
1038
1039#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1040 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1041static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1042{
1043 return 0;
1044}
1045#endif
1046
1047#ifdef CONFIG_FLATMEM
1048#define pfn_to_nid(pfn) (0)
1049#endif
1050
1051#ifdef CONFIG_SPARSEMEM
1052
1053
1054
1055
1056
1057
1058
1059#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1060#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1061
1062#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1063
1064#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1065#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1066
1067#define SECTION_BLOCKFLAGS_BITS \
1068 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1069
1070#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1071#error Allocator MAX_ORDER exceeds SECTION_SIZE
1072#endif
1073
1074#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
1075#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
1076
1077#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1078#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1079
1080struct page;
1081struct page_ext;
1082struct mem_section {
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 unsigned long section_mem_map;
1096
1097
1098 unsigned long *pageblock_flags;
1099#ifdef CONFIG_PAGE_EXTENSION
1100
1101
1102
1103
1104 struct page_ext *page_ext;
1105 unsigned long pad;
1106#endif
1107
1108
1109
1110
1111};
1112
1113#ifdef CONFIG_SPARSEMEM_EXTREME
1114#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1115#else
1116#define SECTIONS_PER_ROOT 1
1117#endif
1118
1119#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1120#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1121#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1122
1123#ifdef CONFIG_SPARSEMEM_EXTREME
1124extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1125#else
1126extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1127#endif
1128
1129static inline struct mem_section *__nr_to_section(unsigned long nr)
1130{
1131 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1132 return NULL;
1133 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1134}
1135extern int __section_nr(struct mem_section* ms);
1136extern unsigned long usemap_size(void);
1137
1138
1139
1140
1141
1142
1143#define SECTION_MARKED_PRESENT (1UL<<0)
1144#define SECTION_HAS_MEM_MAP (1UL<<1)
1145#define SECTION_MAP_LAST_BIT (1UL<<2)
1146#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1147#define SECTION_NID_SHIFT 2
1148
1149static inline struct page *__section_mem_map_addr(struct mem_section *section)
1150{
1151 unsigned long map = section->section_mem_map;
1152 map &= SECTION_MAP_MASK;
1153 return (struct page *)map;
1154}
1155
1156static inline int present_section(struct mem_section *section)
1157{
1158 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1159}
1160
1161static inline int present_section_nr(unsigned long nr)
1162{
1163 return present_section(__nr_to_section(nr));
1164}
1165
1166static inline int valid_section(struct mem_section *section)
1167{
1168 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1169}
1170
1171static inline int valid_section_nr(unsigned long nr)
1172{
1173 return valid_section(__nr_to_section(nr));
1174}
1175
1176static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1177{
1178 return __nr_to_section(pfn_to_section_nr(pfn));
1179}
1180
1181#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1182static inline int pfn_valid(unsigned long pfn)
1183{
1184 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1185 return 0;
1186 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1187}
1188#endif
1189
1190static inline int pfn_present(unsigned long pfn)
1191{
1192 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1193 return 0;
1194 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1195}
1196
1197
1198
1199
1200
1201
1202#ifdef CONFIG_NUMA
1203#define pfn_to_nid(pfn) \
1204({ \
1205 unsigned long __pfn_to_nid_pfn = (pfn); \
1206 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1207})
1208#else
1209#define pfn_to_nid(pfn) (0)
1210#endif
1211
1212#define early_pfn_valid(pfn) pfn_valid(pfn)
1213void sparse_init(void);
1214#else
1215#define sparse_init() do {} while (0)
1216#define sparse_index_init(_sec, _nid) do {} while (0)
1217#endif
1218
1219#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1220bool early_pfn_in_nid(unsigned long pfn, int nid);
1221#else
1222#define early_pfn_in_nid(pfn, nid) (1)
1223#endif
1224
1225#ifndef early_pfn_valid
1226#define early_pfn_valid(pfn) (1)
1227#endif
1228
1229void memory_present(int nid, unsigned long start, unsigned long end);
1230unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1231
1232
1233
1234
1235
1236
1237
1238#ifdef CONFIG_HOLES_IN_ZONE
1239#define pfn_valid_within(pfn) pfn_valid(pfn)
1240#else
1241#define pfn_valid_within(pfn) (1)
1242#endif
1243
1244#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260int memmap_valid_within(unsigned long pfn,
1261 struct page *page, struct zone *zone);
1262#else
1263static inline int memmap_valid_within(unsigned long pfn,
1264 struct page *page, struct zone *zone)
1265{
1266 return 1;
1267}
1268#endif
1269
1270#endif
1271#endif
1272#endif
1273