1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <linux/page-flags-layout.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38enum {
39 MIGRATE_UNMOVABLE,
40 MIGRATE_RECLAIMABLE,
41 MIGRATE_MOVABLE,
42 MIGRATE_PCPTYPES,
43 MIGRATE_RESERVE = MIGRATE_PCPTYPES,
44#ifdef CONFIG_CMA
45
46
47
48
49
50
51
52
53
54
55
56
57
58 MIGRATE_CMA,
59#endif
60#ifdef CONFIG_MEMORY_ISOLATION
61 MIGRATE_ISOLATE,
62#endif
63 MIGRATE_TYPES
64};
65
66#ifdef CONFIG_CMA
67# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
68#else
69# define is_migrate_cma(migratetype) false
70#endif
71
72#define for_each_migratetype_order(order, type) \
73 for (order = 0; order < MAX_ORDER; order++) \
74 for (type = 0; type < MIGRATE_TYPES; type++)
75
76extern int page_group_by_mobility_disabled;
77
78static inline int get_pageblock_migratetype(struct page *page)
79{
80 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
81}
82
83struct free_area {
84 struct list_head free_list[MIGRATE_TYPES];
85 unsigned long nr_free;
86};
87
88struct pglist_data;
89
90
91
92
93
94
95
96#if defined(CONFIG_SMP)
97struct zone_padding {
98 char x[0];
99} ____cacheline_internodealigned_in_smp;
100#define ZONE_PADDING(name) struct zone_padding name;
101#else
102#define ZONE_PADDING(name)
103#endif
104
105enum zone_stat_item {
106
107 NR_FREE_PAGES,
108 NR_LRU_BASE,
109 NR_INACTIVE_ANON = NR_LRU_BASE,
110 NR_ACTIVE_ANON,
111 NR_INACTIVE_FILE,
112 NR_ACTIVE_FILE,
113 NR_UNEVICTABLE,
114 NR_MLOCK,
115 NR_ANON_PAGES,
116 NR_FILE_MAPPED,
117
118 NR_FILE_PAGES,
119 NR_FILE_DIRTY,
120 NR_WRITEBACK,
121 NR_SLAB_RECLAIMABLE,
122 NR_SLAB_UNRECLAIMABLE,
123 NR_PAGETABLE,
124 NR_KERNEL_STACK,
125
126 NR_UNSTABLE_NFS,
127 NR_BOUNCE,
128 NR_VMSCAN_WRITE,
129 NR_VMSCAN_IMMEDIATE,
130 NR_WRITEBACK_TEMP,
131 NR_ISOLATED_ANON,
132 NR_ISOLATED_FILE,
133 NR_SHMEM,
134 NR_DIRTIED,
135 NR_WRITTEN,
136#ifdef CONFIG_NUMA
137 NUMA_HIT,
138 NUMA_MISS,
139 NUMA_FOREIGN,
140 NUMA_INTERLEAVE_HIT,
141 NUMA_LOCAL,
142 NUMA_OTHER,
143#endif
144 NR_ANON_TRANSPARENT_HUGEPAGES,
145 NR_FREE_CMA_PAGES,
146 NR_VM_ZONE_STAT_ITEMS };
147
148
149
150
151
152
153
154
155
156
157#define LRU_BASE 0
158#define LRU_ACTIVE 1
159#define LRU_FILE 2
160
161enum lru_list {
162 LRU_INACTIVE_ANON = LRU_BASE,
163 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
164 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
165 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
166 LRU_UNEVICTABLE,
167 NR_LRU_LISTS
168};
169
170#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
171
172#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
173
174static inline int is_file_lru(enum lru_list lru)
175{
176 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
177}
178
179static inline int is_active_lru(enum lru_list lru)
180{
181 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
182}
183
184static inline int is_unevictable_lru(enum lru_list lru)
185{
186 return (lru == LRU_UNEVICTABLE);
187}
188
189struct zone_reclaim_stat {
190
191
192
193
194
195
196
197
198 unsigned long recent_rotated[2];
199 unsigned long recent_scanned[2];
200};
201
202struct lruvec {
203 struct list_head lists[NR_LRU_LISTS];
204 struct zone_reclaim_stat reclaim_stat;
205#ifdef CONFIG_MEMCG
206 struct zone *zone;
207#endif
208};
209
210
211#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
212#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
213#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
214
215
216#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
217
218#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
219
220#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
221
222#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
223
224
225typedef unsigned __bitwise__ isolate_mode_t;
226
227enum zone_watermarks {
228 WMARK_MIN,
229 WMARK_LOW,
230 WMARK_HIGH,
231 NR_WMARK
232};
233
234#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
235#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
236#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
237
238struct per_cpu_pages {
239 int count;
240 int high;
241 int batch;
242
243
244 struct list_head lists[MIGRATE_PCPTYPES];
245};
246
247struct per_cpu_pageset {
248 struct per_cpu_pages pcp;
249#ifdef CONFIG_NUMA
250 s8 expire;
251#endif
252#ifdef CONFIG_SMP
253 s8 stat_threshold;
254 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
255#endif
256};
257
258#endif
259
260enum zone_type {
261#ifdef CONFIG_ZONE_DMA
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280 ZONE_DMA,
281#endif
282#ifdef CONFIG_ZONE_DMA32
283
284
285
286
287
288 ZONE_DMA32,
289#endif
290
291
292
293
294
295 ZONE_NORMAL,
296#ifdef CONFIG_HIGHMEM
297
298
299
300
301
302
303
304
305 ZONE_HIGHMEM,
306#endif
307 ZONE_MOVABLE,
308 __MAX_NR_ZONES
309};
310
311#ifndef __GENERATING_BOUNDS_H
312
313struct zone {
314
315
316
317 unsigned long watermark[NR_WMARK];
318
319
320
321
322
323
324 unsigned long percpu_drift_mark;
325
326
327
328
329
330
331
332
333
334 unsigned long lowmem_reserve[MAX_NR_ZONES];
335
336
337
338
339
340 unsigned long dirty_balance_reserve;
341
342#ifdef CONFIG_NUMA
343 int node;
344
345
346
347 unsigned long min_unmapped_pages;
348 unsigned long min_slab_pages;
349#endif
350 struct per_cpu_pageset __percpu *pageset;
351
352
353
354 spinlock_t lock;
355 int all_unreclaimable;
356#if defined CONFIG_COMPACTION || defined CONFIG_CMA
357
358 bool compact_blockskip_flush;
359
360
361 unsigned long compact_cached_free_pfn;
362 unsigned long compact_cached_migrate_pfn;
363#endif
364#ifdef CONFIG_MEMORY_HOTPLUG
365
366 seqlock_t span_seqlock;
367#endif
368 struct free_area free_area[MAX_ORDER];
369
370#ifndef CONFIG_SPARSEMEM
371
372
373
374
375 unsigned long *pageblock_flags;
376#endif
377
378#ifdef CONFIG_COMPACTION
379
380
381
382
383
384 unsigned int compact_considered;
385 unsigned int compact_defer_shift;
386 int compact_order_failed;
387#endif
388
389 ZONE_PADDING(_pad1_)
390
391
392 spinlock_t lru_lock;
393 struct lruvec lruvec;
394
395 unsigned long pages_scanned;
396 unsigned long flags;
397
398
399 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
400
401
402
403
404
405 unsigned int inactive_ratio;
406
407
408 ZONE_PADDING(_pad2_)
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435 wait_queue_head_t * wait_table;
436 unsigned long wait_table_hash_nr_entries;
437 unsigned long wait_table_bits;
438
439
440
441
442 struct pglist_data *zone_pgdat;
443
444 unsigned long zone_start_pfn;
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488 unsigned long spanned_pages;
489 unsigned long present_pages;
490 unsigned long managed_pages;
491
492
493
494
495 const char *name;
496} ____cacheline_internodealigned_in_smp;
497
498typedef enum {
499 ZONE_RECLAIM_LOCKED,
500 ZONE_OOM_LOCKED,
501 ZONE_CONGESTED,
502
503
504 ZONE_TAIL_LRU_DIRTY,
505
506
507
508 ZONE_WRITEBACK,
509
510
511} zone_flags_t;
512
513static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
514{
515 set_bit(flag, &zone->flags);
516}
517
518static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
519{
520 return test_and_set_bit(flag, &zone->flags);
521}
522
523static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
524{
525 clear_bit(flag, &zone->flags);
526}
527
528static inline int zone_is_reclaim_congested(const struct zone *zone)
529{
530 return test_bit(ZONE_CONGESTED, &zone->flags);
531}
532
533static inline int zone_is_reclaim_dirty(const struct zone *zone)
534{
535 return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
536}
537
538static inline int zone_is_reclaim_writeback(const struct zone *zone)
539{
540 return test_bit(ZONE_WRITEBACK, &zone->flags);
541}
542
543static inline int zone_is_reclaim_locked(const struct zone *zone)
544{
545 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
546}
547
548static inline int zone_is_oom_locked(const struct zone *zone)
549{
550 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
551}
552
553static inline unsigned long zone_end_pfn(const struct zone *zone)
554{
555 return zone->zone_start_pfn + zone->spanned_pages;
556}
557
558static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
559{
560 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
561}
562
563static inline bool zone_is_initialized(struct zone *zone)
564{
565 return !!zone->wait_table;
566}
567
568static inline bool zone_is_empty(struct zone *zone)
569{
570 return zone->spanned_pages == 0;
571}
572
573
574
575
576
577
578#define DEF_PRIORITY 12
579
580
581#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
582
583#ifdef CONFIG_NUMA
584
585
586
587
588
589
590
591
592#define MAX_ZONELISTS 2
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654struct zonelist_cache {
655 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];
656 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);
657 unsigned long last_full_zap;
658};
659#else
660#define MAX_ZONELISTS 1
661struct zonelist_cache;
662#endif
663
664
665
666
667
668struct zoneref {
669 struct zone *zone;
670 int zone_idx;
671};
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690struct zonelist {
691 struct zonelist_cache *zlcache_ptr;
692 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
693#ifdef CONFIG_NUMA
694 struct zonelist_cache zlcache;
695#endif
696};
697
698#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
699struct node_active_region {
700 unsigned long start_pfn;
701 unsigned long end_pfn;
702 int nid;
703};
704#endif
705
706#ifndef CONFIG_DISCONTIGMEM
707
708extern struct page *mem_map;
709#endif
710
711
712
713
714
715
716
717
718
719
720
721
722struct bootmem_data;
723typedef struct pglist_data {
724 struct zone node_zones[MAX_NR_ZONES];
725 struct zonelist node_zonelists[MAX_ZONELISTS];
726 int nr_zones;
727#ifdef CONFIG_FLAT_NODE_MEM_MAP
728 struct page *node_mem_map;
729#ifdef CONFIG_MEMCG
730 struct page_cgroup *node_page_cgroup;
731#endif
732#endif
733#ifndef CONFIG_NO_BOOTMEM
734 struct bootmem_data *bdata;
735#endif
736#ifdef CONFIG_MEMORY_HOTPLUG
737
738
739
740
741
742
743
744
745
746
747 spinlock_t node_size_lock;
748#endif
749 unsigned long node_start_pfn;
750 unsigned long node_present_pages;
751 unsigned long node_spanned_pages;
752
753 int node_id;
754 nodemask_t reclaim_nodes;
755 wait_queue_head_t kswapd_wait;
756 wait_queue_head_t pfmemalloc_wait;
757 struct task_struct *kswapd;
758 int kswapd_max_order;
759 enum zone_type classzone_idx;
760#ifdef CONFIG_NUMA_BALANCING
761
762
763
764
765 spinlock_t numabalancing_migrate_lock;
766
767
768 unsigned long numabalancing_migrate_next_window;
769
770
771 unsigned long numabalancing_migrate_nr_pages;
772#endif
773} pg_data_t;
774
775#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
776#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
777#ifdef CONFIG_FLAT_NODE_MEM_MAP
778#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
779#else
780#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
781#endif
782#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
783
784#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
785#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
786
787static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
788{
789 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
790}
791
792static inline bool pgdat_is_empty(pg_data_t *pgdat)
793{
794 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
795}
796
797#include <linux/memory_hotplug.h>
798
799extern struct mutex zonelists_mutex;
800void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
801void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
802bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
803 int classzone_idx, int alloc_flags);
804bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
805 int classzone_idx, int alloc_flags);
806enum memmap_context {
807 MEMMAP_EARLY,
808 MEMMAP_HOTPLUG,
809};
810extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
811 unsigned long size,
812 enum memmap_context context);
813
814extern void lruvec_init(struct lruvec *lruvec);
815
816static inline struct zone *lruvec_zone(struct lruvec *lruvec)
817{
818#ifdef CONFIG_MEMCG
819 return lruvec->zone;
820#else
821 return container_of(lruvec, struct zone, lruvec);
822#endif
823}
824
825#ifdef CONFIG_HAVE_MEMORY_PRESENT
826void memory_present(int nid, unsigned long start, unsigned long end);
827#else
828static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
829#endif
830
831#ifdef CONFIG_HAVE_MEMORYLESS_NODES
832int local_memory_node(int node_id);
833#else
834static inline int local_memory_node(int node_id) { return node_id; };
835#endif
836
837#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
838unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
839#endif
840
841
842
843
844#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
845
846static inline int populated_zone(struct zone *zone)
847{
848 return (!!zone->present_pages);
849}
850
851extern int movable_zone;
852
853static inline int zone_movable_is_highmem(void)
854{
855#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
856 return movable_zone == ZONE_HIGHMEM;
857#else
858 return 0;
859#endif
860}
861
862static inline int is_highmem_idx(enum zone_type idx)
863{
864#ifdef CONFIG_HIGHMEM
865 return (idx == ZONE_HIGHMEM ||
866 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
867#else
868 return 0;
869#endif
870}
871
872
873
874
875
876
877
878static inline int is_highmem(struct zone *zone)
879{
880#ifdef CONFIG_HIGHMEM
881 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
882 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
883 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
884 zone_movable_is_highmem());
885#else
886 return 0;
887#endif
888}
889
890
891struct ctl_table;
892int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
893 void __user *, size_t *, loff_t *);
894extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
895int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
896 void __user *, size_t *, loff_t *);
897int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
898 void __user *, size_t *, loff_t *);
899int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
900 void __user *, size_t *, loff_t *);
901int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
902 void __user *, size_t *, loff_t *);
903
904extern int numa_zonelist_order_handler(struct ctl_table *, int,
905 void __user *, size_t *, loff_t *);
906extern char numa_zonelist_order[];
907#define NUMA_ZONELIST_ORDER_LEN 16
908
909#ifndef CONFIG_NEED_MULTIPLE_NODES
910
911extern struct pglist_data contig_page_data;
912#define NODE_DATA(nid) (&contig_page_data)
913#define NODE_MEM_MAP(nid) mem_map
914
915#else
916
917#include <asm/mmzone.h>
918
919#endif
920
921extern struct pglist_data *first_online_pgdat(void);
922extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
923extern struct zone *next_zone(struct zone *zone);
924
925
926
927
928
929#define for_each_online_pgdat(pgdat) \
930 for (pgdat = first_online_pgdat(); \
931 pgdat; \
932 pgdat = next_online_pgdat(pgdat))
933
934
935
936
937
938
939
940#define for_each_zone(zone) \
941 for (zone = (first_online_pgdat())->node_zones; \
942 zone; \
943 zone = next_zone(zone))
944
945#define for_each_populated_zone(zone) \
946 for (zone = (first_online_pgdat())->node_zones; \
947 zone; \
948 zone = next_zone(zone)) \
949 if (!populated_zone(zone)) \
950 ; \
951 else
952
953static inline struct zone *zonelist_zone(struct zoneref *zoneref)
954{
955 return zoneref->zone;
956}
957
958static inline int zonelist_zone_idx(struct zoneref *zoneref)
959{
960 return zoneref->zone_idx;
961}
962
963static inline int zonelist_node_idx(struct zoneref *zoneref)
964{
965#ifdef CONFIG_NUMA
966
967 return zoneref->zone->node;
968#else
969 return 0;
970#endif
971}
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986struct zoneref *next_zones_zonelist(struct zoneref *z,
987 enum zone_type highest_zoneidx,
988 nodemask_t *nodes,
989 struct zone **zone);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1004 enum zone_type highest_zoneidx,
1005 nodemask_t *nodes,
1006 struct zone **zone)
1007{
1008 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
1009 zone);
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1024 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
1025 zone; \
1026 z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1038 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1039
1040#ifdef CONFIG_SPARSEMEM
1041#include <asm/sparsemem.h>
1042#endif
1043
1044#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1045 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1046static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1047{
1048 return 0;
1049}
1050#endif
1051
1052#ifdef CONFIG_FLATMEM
1053#define pfn_to_nid(pfn) (0)
1054#endif
1055
1056#ifdef CONFIG_SPARSEMEM
1057
1058
1059
1060
1061
1062
1063
1064#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1065#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1066
1067#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1068
1069#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1070#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1071
1072#define SECTION_BLOCKFLAGS_BITS \
1073 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1074
1075#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1076#error Allocator MAX_ORDER exceeds SECTION_SIZE
1077#endif
1078
1079#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
1080#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
1081
1082#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1083#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1084
1085struct page;
1086struct page_cgroup;
1087struct mem_section {
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 unsigned long section_mem_map;
1101
1102
1103 unsigned long *pageblock_flags;
1104#ifdef CONFIG_MEMCG
1105
1106
1107
1108
1109 struct page_cgroup *page_cgroup;
1110 unsigned long pad;
1111#endif
1112
1113
1114
1115
1116};
1117
1118#ifdef CONFIG_SPARSEMEM_EXTREME
1119#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1120#else
1121#define SECTIONS_PER_ROOT 1
1122#endif
1123
1124#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1125#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1126#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1127
1128#ifdef CONFIG_SPARSEMEM_EXTREME
1129extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1130#else
1131extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1132#endif
1133
1134static inline struct mem_section *__nr_to_section(unsigned long nr)
1135{
1136 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1137 return NULL;
1138 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1139}
1140extern int __section_nr(struct mem_section* ms);
1141extern unsigned long usemap_size(void);
1142
1143
1144
1145
1146
1147
1148#define SECTION_MARKED_PRESENT (1UL<<0)
1149#define SECTION_HAS_MEM_MAP (1UL<<1)
1150#define SECTION_MAP_LAST_BIT (1UL<<2)
1151#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1152#define SECTION_NID_SHIFT 2
1153
1154static inline struct page *__section_mem_map_addr(struct mem_section *section)
1155{
1156 unsigned long map = section->section_mem_map;
1157 map &= SECTION_MAP_MASK;
1158 return (struct page *)map;
1159}
1160
1161static inline int present_section(struct mem_section *section)
1162{
1163 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1164}
1165
1166static inline int present_section_nr(unsigned long nr)
1167{
1168 return present_section(__nr_to_section(nr));
1169}
1170
1171static inline int valid_section(struct mem_section *section)
1172{
1173 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1174}
1175
1176static inline int valid_section_nr(unsigned long nr)
1177{
1178 return valid_section(__nr_to_section(nr));
1179}
1180
1181static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1182{
1183 return __nr_to_section(pfn_to_section_nr(pfn));
1184}
1185
1186#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1187static inline int pfn_valid(unsigned long pfn)
1188{
1189 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1190 return 0;
1191 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1192}
1193#endif
1194
1195static inline int pfn_present(unsigned long pfn)
1196{
1197 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1198 return 0;
1199 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1200}
1201
1202
1203
1204
1205
1206
1207#ifdef CONFIG_NUMA
1208#define pfn_to_nid(pfn) \
1209({ \
1210 unsigned long __pfn_to_nid_pfn = (pfn); \
1211 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1212})
1213#else
1214#define pfn_to_nid(pfn) (0)
1215#endif
1216
1217#define early_pfn_valid(pfn) pfn_valid(pfn)
1218void sparse_init(void);
1219#else
1220#define sparse_init() do {} while (0)
1221#define sparse_index_init(_sec, _nid) do {} while (0)
1222#endif
1223
1224#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1225bool early_pfn_in_nid(unsigned long pfn, int nid);
1226#else
1227#define early_pfn_in_nid(pfn, nid) (1)
1228#endif
1229
1230#ifndef early_pfn_valid
1231#define early_pfn_valid(pfn) (1)
1232#endif
1233
1234void memory_present(int nid, unsigned long start, unsigned long end);
1235unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1236
1237
1238
1239
1240
1241
1242
1243#ifdef CONFIG_HOLES_IN_ZONE
1244#define pfn_valid_within(pfn) pfn_valid(pfn)
1245#else
1246#define pfn_valid_within(pfn) (1)
1247#endif
1248
1249#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265int memmap_valid_within(unsigned long pfn,
1266 struct page *page, struct zone *zone);
1267#else
1268static inline int memmap_valid_within(unsigned long pfn,
1269 struct page *page, struct zone *zone)
1270{
1271 return 1;
1272}
1273#endif
1274
1275#endif
1276#endif
1277#endif
1278