1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifndef __ASSEMBLY__
5#ifndef __GENERATING_BOUNDS_H
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <linux/page-flags-layout.h>
19#include <linux/atomic.h>
20#include <asm/page.h>
21
22
23#ifndef CONFIG_FORCE_MAX_ZONEORDER
24#define MAX_ORDER 11
25#else
26#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
27#endif
28#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
29
30
31
32
33
34
35
36#define PAGE_ALLOC_COSTLY_ORDER 3
37
38enum {
39 MIGRATE_UNMOVABLE,
40 MIGRATE_MOVABLE,
41 MIGRATE_RECLAIMABLE,
42 MIGRATE_PCPTYPES,
43 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
44#ifdef CONFIG_CMA
45
46
47
48
49
50
51
52
53
54
55
56
57
58 MIGRATE_CMA,
59#endif
60#ifdef CONFIG_MEMORY_ISOLATION
61 MIGRATE_ISOLATE,
62#endif
63 MIGRATE_TYPES
64};
65
66
67extern char * const migratetype_names[MIGRATE_TYPES];
68
69#ifdef CONFIG_CMA
70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
71# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
72#else
73# define is_migrate_cma(migratetype) false
74# define is_migrate_cma_page(_page) false
75#endif
76
77#define for_each_migratetype_order(order, type) \
78 for (order = 0; order < MAX_ORDER; order++) \
79 for (type = 0; type < MIGRATE_TYPES; type++)
80
81extern int page_group_by_mobility_disabled;
82
83#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
84#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
85
86#define get_pageblock_migratetype(page) \
87 get_pfnblock_flags_mask(page, page_to_pfn(page), \
88 PB_migrate_end, MIGRATETYPE_MASK)
89
90struct free_area {
91 struct list_head free_list[MIGRATE_TYPES];
92 unsigned long nr_free;
93};
94
95struct pglist_data;
96
97
98
99
100
101
102
103#if defined(CONFIG_SMP)
104struct zone_padding {
105 char x[0];
106} ____cacheline_internodealigned_in_smp;
107#define ZONE_PADDING(name) struct zone_padding name;
108#else
109#define ZONE_PADDING(name)
110#endif
111
112enum zone_stat_item {
113
114 NR_FREE_PAGES,
115 NR_ZONE_LRU_BASE,
116 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
117 NR_ZONE_ACTIVE_ANON,
118 NR_ZONE_INACTIVE_FILE,
119 NR_ZONE_ACTIVE_FILE,
120 NR_ZONE_UNEVICTABLE,
121 NR_ZONE_WRITE_PENDING,
122 NR_MLOCK,
123 NR_SLAB_RECLAIMABLE,
124 NR_SLAB_UNRECLAIMABLE,
125 NR_PAGETABLE,
126 NR_KERNEL_STACK_KB,
127
128 NR_BOUNCE,
129#if IS_ENABLED(CONFIG_ZSMALLOC)
130 NR_ZSPAGES,
131#endif
132#ifdef CONFIG_NUMA
133 NUMA_HIT,
134 NUMA_MISS,
135 NUMA_FOREIGN,
136 NUMA_INTERLEAVE_HIT,
137 NUMA_LOCAL,
138 NUMA_OTHER,
139#endif
140 NR_FREE_CMA_PAGES,
141 NR_VM_ZONE_STAT_ITEMS };
142
143enum node_stat_item {
144 NR_LRU_BASE,
145 NR_INACTIVE_ANON = NR_LRU_BASE,
146 NR_ACTIVE_ANON,
147 NR_INACTIVE_FILE,
148 NR_ACTIVE_FILE,
149 NR_UNEVICTABLE,
150 NR_ISOLATED_ANON,
151 NR_ISOLATED_FILE,
152 NR_PAGES_SCANNED,
153 WORKINGSET_REFAULT,
154 WORKINGSET_ACTIVATE,
155 WORKINGSET_NODERECLAIM,
156 NR_ANON_MAPPED,
157 NR_FILE_MAPPED,
158
159 NR_FILE_PAGES,
160 NR_FILE_DIRTY,
161 NR_WRITEBACK,
162 NR_WRITEBACK_TEMP,
163 NR_SHMEM,
164 NR_SHMEM_THPS,
165 NR_SHMEM_PMDMAPPED,
166 NR_ANON_THPS,
167 NR_UNSTABLE_NFS,
168 NR_VMSCAN_WRITE,
169 NR_VMSCAN_IMMEDIATE,
170 NR_DIRTIED,
171 NR_WRITTEN,
172 NR_VM_NODE_STAT_ITEMS
173};
174
175
176
177
178
179
180
181
182
183
184#define LRU_BASE 0
185#define LRU_ACTIVE 1
186#define LRU_FILE 2
187
188enum lru_list {
189 LRU_INACTIVE_ANON = LRU_BASE,
190 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
191 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
192 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
193 LRU_UNEVICTABLE,
194 NR_LRU_LISTS
195};
196
197#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
198
199#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
200
201static inline int is_file_lru(enum lru_list lru)
202{
203 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
204}
205
206static inline int is_active_lru(enum lru_list lru)
207{
208 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
209}
210
211struct zone_reclaim_stat {
212
213
214
215
216
217
218
219
220 unsigned long recent_rotated[2];
221 unsigned long recent_scanned[2];
222};
223
224struct lruvec {
225 struct list_head lists[NR_LRU_LISTS];
226 struct zone_reclaim_stat reclaim_stat;
227
228 atomic_long_t inactive_age;
229#ifdef CONFIG_MEMCG
230 struct pglist_data *pgdat;
231#endif
232};
233
234
235#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
236#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
237#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
238
239
240#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
241
242#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
243
244#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
245
246#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
247
248
249typedef unsigned __bitwise__ isolate_mode_t;
250
251enum zone_watermarks {
252 WMARK_MIN,
253 WMARK_LOW,
254 WMARK_HIGH,
255 NR_WMARK
256};
257
258#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
259#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
260#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
261
262struct per_cpu_pages {
263 int count;
264 int high;
265 int batch;
266
267
268 struct list_head lists[MIGRATE_PCPTYPES];
269};
270
271struct per_cpu_pageset {
272 struct per_cpu_pages pcp;
273#ifdef CONFIG_NUMA
274 s8 expire;
275#endif
276#ifdef CONFIG_SMP
277 s8 stat_threshold;
278 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
279#endif
280};
281
282struct per_cpu_nodestat {
283 s8 stat_threshold;
284 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
285};
286
287#endif
288
289enum zone_type {
290#ifdef CONFIG_ZONE_DMA
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309 ZONE_DMA,
310#endif
311#ifdef CONFIG_ZONE_DMA32
312
313
314
315
316
317 ZONE_DMA32,
318#endif
319
320
321
322
323
324 ZONE_NORMAL,
325#ifdef CONFIG_HIGHMEM
326
327
328
329
330
331
332
333
334 ZONE_HIGHMEM,
335#endif
336 ZONE_MOVABLE,
337#ifdef CONFIG_ZONE_DEVICE
338 ZONE_DEVICE,
339#endif
340 __MAX_NR_ZONES
341
342};
343
344#ifndef __GENERATING_BOUNDS_H
345
346struct zone {
347
348
349
350 unsigned long watermark[NR_WMARK];
351
352 unsigned long nr_reserved_highatomic;
353
354
355
356
357
358
359
360
361
362
363 long lowmem_reserve[MAX_NR_ZONES];
364
365#ifdef CONFIG_NUMA
366 int node;
367#endif
368 struct pglist_data *zone_pgdat;
369 struct per_cpu_pageset __percpu *pageset;
370
371#ifndef CONFIG_SPARSEMEM
372
373
374
375
376 unsigned long *pageblock_flags;
377#endif
378
379
380 unsigned long zone_start_pfn;
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423 unsigned long managed_pages;
424 unsigned long spanned_pages;
425 unsigned long present_pages;
426
427 const char *name;
428
429#ifdef CONFIG_MEMORY_ISOLATION
430
431
432
433
434
435 unsigned long nr_isolate_pageblock;
436#endif
437
438#ifdef CONFIG_MEMORY_HOTPLUG
439
440 seqlock_t span_seqlock;
441#endif
442
443 int initialized;
444
445
446 ZONE_PADDING(_pad1_)
447
448
449 struct free_area free_area[MAX_ORDER];
450
451
452 unsigned long flags;
453
454
455 spinlock_t lock;
456
457
458 ZONE_PADDING(_pad2_)
459
460
461
462
463
464
465 unsigned long percpu_drift_mark;
466
467#if defined CONFIG_COMPACTION || defined CONFIG_CMA
468
469 unsigned long compact_cached_free_pfn;
470
471 unsigned long compact_cached_migrate_pfn[2];
472#endif
473
474#ifdef CONFIG_COMPACTION
475
476
477
478
479
480 unsigned int compact_considered;
481 unsigned int compact_defer_shift;
482 int compact_order_failed;
483#endif
484
485#if defined CONFIG_COMPACTION || defined CONFIG_CMA
486
487 bool compact_blockskip_flush;
488#endif
489
490 bool contiguous;
491
492 ZONE_PADDING(_pad3_)
493
494 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
495} ____cacheline_internodealigned_in_smp;
496
497enum pgdat_flags {
498 PGDAT_CONGESTED,
499
500
501 PGDAT_DIRTY,
502
503
504
505 PGDAT_WRITEBACK,
506
507
508 PGDAT_RECLAIM_LOCKED,
509};
510
511static inline unsigned long zone_end_pfn(const struct zone *zone)
512{
513 return zone->zone_start_pfn + zone->spanned_pages;
514}
515
516static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
517{
518 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
519}
520
521static inline bool zone_is_initialized(struct zone *zone)
522{
523 return zone->initialized;
524}
525
526static inline bool zone_is_empty(struct zone *zone)
527{
528 return zone->spanned_pages == 0;
529}
530
531
532
533
534
535
536#define DEF_PRIORITY 12
537
538
539#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
540
541enum {
542 ZONELIST_FALLBACK,
543#ifdef CONFIG_NUMA
544
545
546
547
548 ZONELIST_NOFALLBACK,
549#endif
550 MAX_ZONELISTS
551};
552
553
554
555
556
557struct zoneref {
558 struct zone *zone;
559 int zone_idx;
560};
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576struct zonelist {
577 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
578};
579
580#ifndef CONFIG_DISCONTIGMEM
581
582extern struct page *mem_map;
583#endif
584
585
586
587
588
589
590
591
592
593
594
595
596struct bootmem_data;
597typedef struct pglist_data {
598 struct zone node_zones[MAX_NR_ZONES];
599 struct zonelist node_zonelists[MAX_ZONELISTS];
600 int nr_zones;
601#ifdef CONFIG_FLAT_NODE_MEM_MAP
602 struct page *node_mem_map;
603#ifdef CONFIG_PAGE_EXTENSION
604 struct page_ext *node_page_ext;
605#endif
606#endif
607#ifndef CONFIG_NO_BOOTMEM
608 struct bootmem_data *bdata;
609#endif
610#ifdef CONFIG_MEMORY_HOTPLUG
611
612
613
614
615
616
617
618
619
620
621 spinlock_t node_size_lock;
622#endif
623 unsigned long node_start_pfn;
624 unsigned long node_present_pages;
625 unsigned long node_spanned_pages;
626
627 int node_id;
628 wait_queue_head_t kswapd_wait;
629 wait_queue_head_t pfmemalloc_wait;
630 struct task_struct *kswapd;
631
632 int kswapd_order;
633 enum zone_type kswapd_classzone_idx;
634
635#ifdef CONFIG_COMPACTION
636 int kcompactd_max_order;
637 enum zone_type kcompactd_classzone_idx;
638 wait_queue_head_t kcompactd_wait;
639 struct task_struct *kcompactd;
640#endif
641#ifdef CONFIG_NUMA_BALANCING
642
643 spinlock_t numabalancing_migrate_lock;
644
645
646 unsigned long numabalancing_migrate_next_window;
647
648
649 unsigned long numabalancing_migrate_nr_pages;
650#endif
651
652
653
654
655 unsigned long totalreserve_pages;
656
657#ifdef CONFIG_NUMA
658
659
660
661 unsigned long min_unmapped_pages;
662 unsigned long min_slab_pages;
663#endif
664
665
666 ZONE_PADDING(_pad1_)
667 spinlock_t lru_lock;
668
669#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
670
671
672
673
674 unsigned long first_deferred_pfn;
675#endif
676
677#ifdef CONFIG_TRANSPARENT_HUGEPAGE
678 spinlock_t split_queue_lock;
679 struct list_head split_queue;
680 unsigned long split_queue_len;
681#endif
682
683
684 struct lruvec lruvec;
685
686
687
688
689
690 unsigned int inactive_ratio;
691
692 unsigned long flags;
693
694 ZONE_PADDING(_pad2_)
695
696
697 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
698 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
699} pg_data_t;
700
701#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
702#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
703#ifdef CONFIG_FLAT_NODE_MEM_MAP
704#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
705#else
706#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
707#endif
708#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
709
710#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
711#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
712static inline spinlock_t *zone_lru_lock(struct zone *zone)
713{
714 return &zone->zone_pgdat->lru_lock;
715}
716
717static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
718{
719 return &pgdat->lruvec;
720}
721
722static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
723{
724 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
725}
726
727static inline bool pgdat_is_empty(pg_data_t *pgdat)
728{
729 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
730}
731
732static inline int zone_id(const struct zone *zone)
733{
734 struct pglist_data *pgdat = zone->zone_pgdat;
735
736 return zone - pgdat->node_zones;
737}
738
739#ifdef CONFIG_ZONE_DEVICE
740static inline bool is_dev_zone(const struct zone *zone)
741{
742 return zone_id(zone) == ZONE_DEVICE;
743}
744#else
745static inline bool is_dev_zone(const struct zone *zone)
746{
747 return false;
748}
749#endif
750
751#include <linux/memory_hotplug.h>
752
753extern struct mutex zonelists_mutex;
754void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
755void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
756bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
757 int classzone_idx, unsigned int alloc_flags,
758 long free_pages);
759bool zone_watermark_ok(struct zone *z, unsigned int order,
760 unsigned long mark, int classzone_idx,
761 unsigned int alloc_flags);
762bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
763 unsigned long mark, int classzone_idx);
764enum memmap_context {
765 MEMMAP_EARLY,
766 MEMMAP_HOTPLUG,
767};
768extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
769 unsigned long size);
770
771extern void lruvec_init(struct lruvec *lruvec);
772
773static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
774{
775#ifdef CONFIG_MEMCG
776 return lruvec->pgdat;
777#else
778 return container_of(lruvec, struct pglist_data, lruvec);
779#endif
780}
781
782extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
783
784#ifdef CONFIG_HAVE_MEMORY_PRESENT
785void memory_present(int nid, unsigned long start, unsigned long end);
786#else
787static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
788#endif
789
790#ifdef CONFIG_HAVE_MEMORYLESS_NODES
791int local_memory_node(int node_id);
792#else
793static inline int local_memory_node(int node_id) { return node_id; };
794#endif
795
796#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
797unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
798#endif
799
800
801
802
803#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
804
805
806
807
808
809
810
811static inline bool managed_zone(struct zone *zone)
812{
813 return zone->managed_pages;
814}
815
816
817static inline bool populated_zone(struct zone *zone)
818{
819 return zone->present_pages;
820}
821
822extern int movable_zone;
823
824#ifdef CONFIG_HIGHMEM
825static inline int zone_movable_is_highmem(void)
826{
827#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
828 return movable_zone == ZONE_HIGHMEM;
829#else
830 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
831#endif
832}
833#endif
834
835static inline int is_highmem_idx(enum zone_type idx)
836{
837#ifdef CONFIG_HIGHMEM
838 return (idx == ZONE_HIGHMEM ||
839 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
840#else
841 return 0;
842#endif
843}
844
845
846
847
848
849
850
851static inline int is_highmem(struct zone *zone)
852{
853#ifdef CONFIG_HIGHMEM
854 return is_highmem_idx(zone_idx(zone));
855#else
856 return 0;
857#endif
858}
859
860
861struct ctl_table;
862int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
863 void __user *, size_t *, loff_t *);
864int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
865 void __user *, size_t *, loff_t *);
866extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
867int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
868 void __user *, size_t *, loff_t *);
869int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
870 void __user *, size_t *, loff_t *);
871int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
872 void __user *, size_t *, loff_t *);
873int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
874 void __user *, size_t *, loff_t *);
875
876extern int numa_zonelist_order_handler(struct ctl_table *, int,
877 void __user *, size_t *, loff_t *);
878extern char numa_zonelist_order[];
879#define NUMA_ZONELIST_ORDER_LEN 16
880
881#ifndef CONFIG_NEED_MULTIPLE_NODES
882
883extern struct pglist_data contig_page_data;
884#define NODE_DATA(nid) (&contig_page_data)
885#define NODE_MEM_MAP(nid) mem_map
886
887#else
888
889#include <asm/mmzone.h>
890
891#endif
892
893extern struct pglist_data *first_online_pgdat(void);
894extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
895extern struct zone *next_zone(struct zone *zone);
896
897
898
899
900
901#define for_each_online_pgdat(pgdat) \
902 for (pgdat = first_online_pgdat(); \
903 pgdat; \
904 pgdat = next_online_pgdat(pgdat))
905
906
907
908
909
910
911
912#define for_each_zone(zone) \
913 for (zone = (first_online_pgdat())->node_zones; \
914 zone; \
915 zone = next_zone(zone))
916
917#define for_each_populated_zone(zone) \
918 for (zone = (first_online_pgdat())->node_zones; \
919 zone; \
920 zone = next_zone(zone)) \
921 if (!populated_zone(zone)) \
922 ; \
923 else
924
925static inline struct zone *zonelist_zone(struct zoneref *zoneref)
926{
927 return zoneref->zone;
928}
929
930static inline int zonelist_zone_idx(struct zoneref *zoneref)
931{
932 return zoneref->zone_idx;
933}
934
935static inline int zonelist_node_idx(struct zoneref *zoneref)
936{
937#ifdef CONFIG_NUMA
938
939 return zoneref->zone->node;
940#else
941 return 0;
942#endif
943}
944
945struct zoneref *__next_zones_zonelist(struct zoneref *z,
946 enum zone_type highest_zoneidx,
947 nodemask_t *nodes);
948
949
950
951
952
953
954
955
956
957
958
959
960
961static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
962 enum zone_type highest_zoneidx,
963 nodemask_t *nodes)
964{
965 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
966 return z;
967 return __next_zones_zonelist(z, highest_zoneidx, nodes);
968}
969
970
971
972
973
974
975
976
977
978
979
980
981
982static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
983 enum zone_type highest_zoneidx,
984 nodemask_t *nodes)
985{
986 return next_zones_zonelist(zonelist->_zonerefs,
987 highest_zoneidx, nodes);
988}
989
990
991
992
993
994
995
996
997
998
999
1000
1001#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1002 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1003 zone; \
1004 z = next_zones_zonelist(++z, highidx, nodemask), \
1005 zone = zonelist_zone(z))
1006
1007#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1008 for (zone = z->zone; \
1009 zone; \
1010 z = next_zones_zonelist(++z, highidx, nodemask), \
1011 zone = zonelist_zone(z))
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1024 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1025
1026#ifdef CONFIG_SPARSEMEM
1027#include <asm/sparsemem.h>
1028#endif
1029
1030#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1031 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1032static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1033{
1034 return 0;
1035}
1036#endif
1037
1038#ifdef CONFIG_FLATMEM
1039#define pfn_to_nid(pfn) (0)
1040#endif
1041
1042#ifdef CONFIG_SPARSEMEM
1043
1044
1045
1046
1047
1048
1049
1050#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1051#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1052
1053#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1054
1055#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1056#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1057
1058#define SECTION_BLOCKFLAGS_BITS \
1059 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1060
1061#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1062#error Allocator MAX_ORDER exceeds SECTION_SIZE
1063#endif
1064
1065#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
1066#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
1067
1068#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1069#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1070
1071struct page;
1072struct page_ext;
1073struct mem_section {
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 unsigned long section_mem_map;
1087
1088
1089 unsigned long *pageblock_flags;
1090#ifdef CONFIG_PAGE_EXTENSION
1091
1092
1093
1094
1095 struct page_ext *page_ext;
1096 unsigned long pad;
1097#endif
1098
1099
1100
1101
1102};
1103
1104#ifdef CONFIG_SPARSEMEM_EXTREME
1105#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1106#else
1107#define SECTIONS_PER_ROOT 1
1108#endif
1109
1110#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1111#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1112#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1113
1114#ifdef CONFIG_SPARSEMEM_EXTREME
1115extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1116#else
1117extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1118#endif
1119
1120static inline struct mem_section *__nr_to_section(unsigned long nr)
1121{
1122 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1123 return NULL;
1124 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1125}
1126extern int __section_nr(struct mem_section* ms);
1127extern unsigned long usemap_size(void);
1128
1129
1130
1131
1132
1133
1134#define SECTION_MARKED_PRESENT (1UL<<0)
1135#define SECTION_HAS_MEM_MAP (1UL<<1)
1136#define SECTION_MAP_LAST_BIT (1UL<<2)
1137#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1138#define SECTION_NID_SHIFT 2
1139
1140static inline struct page *__section_mem_map_addr(struct mem_section *section)
1141{
1142 unsigned long map = section->section_mem_map;
1143 map &= SECTION_MAP_MASK;
1144 return (struct page *)map;
1145}
1146
1147static inline int present_section(struct mem_section *section)
1148{
1149 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1150}
1151
1152static inline int present_section_nr(unsigned long nr)
1153{
1154 return present_section(__nr_to_section(nr));
1155}
1156
1157static inline int valid_section(struct mem_section *section)
1158{
1159 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1160}
1161
1162static inline int valid_section_nr(unsigned long nr)
1163{
1164 return valid_section(__nr_to_section(nr));
1165}
1166
1167static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1168{
1169 return __nr_to_section(pfn_to_section_nr(pfn));
1170}
1171
1172#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1173static inline int pfn_valid(unsigned long pfn)
1174{
1175 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1176 return 0;
1177 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1178}
1179#endif
1180
1181static inline int pfn_present(unsigned long pfn)
1182{
1183 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1184 return 0;
1185 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1186}
1187
1188
1189
1190
1191
1192
1193#ifdef CONFIG_NUMA
1194#define pfn_to_nid(pfn) \
1195({ \
1196 unsigned long __pfn_to_nid_pfn = (pfn); \
1197 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1198})
1199#else
1200#define pfn_to_nid(pfn) (0)
1201#endif
1202
1203#define early_pfn_valid(pfn) pfn_valid(pfn)
1204void sparse_init(void);
1205#else
1206#define sparse_init() do {} while (0)
1207#define sparse_index_init(_sec, _nid) do {} while (0)
1208#endif
1209
1210
1211
1212
1213
1214
1215struct mminit_pfnnid_cache {
1216 unsigned long last_start;
1217 unsigned long last_end;
1218 int last_nid;
1219};
1220
1221#ifndef early_pfn_valid
1222#define early_pfn_valid(pfn) (1)
1223#endif
1224
1225void memory_present(int nid, unsigned long start, unsigned long end);
1226unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
1227
1228
1229
1230
1231
1232
1233
1234#ifdef CONFIG_HOLES_IN_ZONE
1235#define pfn_valid_within(pfn) pfn_valid(pfn)
1236#else
1237#define pfn_valid_within(pfn) (1)
1238#endif
1239
1240#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256bool memmap_valid_within(unsigned long pfn,
1257 struct page *page, struct zone *zone);
1258#else
1259static inline bool memmap_valid_within(unsigned long pfn,
1260 struct page *page, struct zone *zone)
1261{
1262 return true;
1263}
1264#endif
1265
1266#endif
1267#endif
1268#endif
1269