1
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include <linux/bitops.h>
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
16#include <linux/seqlock.h>
17#include <linux/nodemask.h>
18#include <linux/pageblock-flags.h>
19#include <linux/page-flags-layout.h>
20#include <linux/atomic.h>
21#include <linux/mm_types.h>
22#include <linux/page-flags.h>
23#include <linux/local_lock.h>
24#include <asm/page.h>
25
26
27#ifndef CONFIG_FORCE_MAX_ZONEORDER
28#define MAX_ORDER 11
29#else
30#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
31#endif
32#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
33
34
35
36
37
38
39
40#define PAGE_ALLOC_COSTLY_ORDER 3
41
42enum migratetype {
43 MIGRATE_UNMOVABLE,
44 MIGRATE_MOVABLE,
45 MIGRATE_RECLAIMABLE,
46 MIGRATE_PCPTYPES,
47 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
48#ifdef CONFIG_CMA
49
50
51
52
53
54
55
56
57
58
59
60
61
62 MIGRATE_CMA,
63#endif
64#ifdef CONFIG_MEMORY_ISOLATION
65 MIGRATE_ISOLATE,
66#endif
67 MIGRATE_TYPES
68};
69
70
71extern const char * const migratetype_names[MIGRATE_TYPES];
72
73#ifdef CONFIG_CMA
74# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
75# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
76#else
77# define is_migrate_cma(migratetype) false
78# define is_migrate_cma_page(_page) false
79#endif
80
81static inline bool is_migrate_movable(int mt)
82{
83 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
84}
85
86#define for_each_migratetype_order(order, type) \
87 for (order = 0; order < MAX_ORDER; order++) \
88 for (type = 0; type < MIGRATE_TYPES; type++)
89
90extern int page_group_by_mobility_disabled;
91
92#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
93
94#define get_pageblock_migratetype(page) \
95 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
96
97struct free_area {
98 struct list_head free_list[MIGRATE_TYPES];
99 unsigned long nr_free;
100};
101
102static inline struct page *get_page_from_free_area(struct free_area *area,
103 int migratetype)
104{
105 return list_first_entry_or_null(&area->free_list[migratetype],
106 struct page, lru);
107}
108
109static inline bool free_area_empty(struct free_area *area, int migratetype)
110{
111 return list_empty(&area->free_list[migratetype]);
112}
113
114struct pglist_data;
115
116
117
118
119
120
121#if defined(CONFIG_SMP)
122struct zone_padding {
123 char x[0];
124} ____cacheline_internodealigned_in_smp;
125#define ZONE_PADDING(name) struct zone_padding name;
126#else
127#define ZONE_PADDING(name)
128#endif
129
130#ifdef CONFIG_NUMA
131enum numa_stat_item {
132 NUMA_HIT,
133 NUMA_MISS,
134 NUMA_FOREIGN,
135 NUMA_INTERLEAVE_HIT,
136 NUMA_LOCAL,
137 NUMA_OTHER,
138 NR_VM_NUMA_EVENT_ITEMS
139};
140#else
141#define NR_VM_NUMA_EVENT_ITEMS 0
142#endif
143
144enum zone_stat_item {
145
146 NR_FREE_PAGES,
147 NR_ZONE_LRU_BASE,
148 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
149 NR_ZONE_ACTIVE_ANON,
150 NR_ZONE_INACTIVE_FILE,
151 NR_ZONE_ACTIVE_FILE,
152 NR_ZONE_UNEVICTABLE,
153 NR_ZONE_WRITE_PENDING,
154 NR_MLOCK,
155
156 NR_BOUNCE,
157#if IS_ENABLED(CONFIG_ZSMALLOC)
158 NR_ZSPAGES,
159#endif
160 NR_FREE_CMA_PAGES,
161 NR_VM_ZONE_STAT_ITEMS };
162
163enum node_stat_item {
164 NR_LRU_BASE,
165 NR_INACTIVE_ANON = NR_LRU_BASE,
166 NR_ACTIVE_ANON,
167 NR_INACTIVE_FILE,
168 NR_ACTIVE_FILE,
169 NR_UNEVICTABLE,
170 NR_SLAB_RECLAIMABLE_B,
171 NR_SLAB_UNRECLAIMABLE_B,
172 NR_ISOLATED_ANON,
173 NR_ISOLATED_FILE,
174 WORKINGSET_NODES,
175 WORKINGSET_REFAULT_BASE,
176 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
177 WORKINGSET_REFAULT_FILE,
178 WORKINGSET_ACTIVATE_BASE,
179 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
180 WORKINGSET_ACTIVATE_FILE,
181 WORKINGSET_RESTORE_BASE,
182 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
183 WORKINGSET_RESTORE_FILE,
184 WORKINGSET_NODERECLAIM,
185 NR_ANON_MAPPED,
186 NR_FILE_MAPPED,
187
188 NR_FILE_PAGES,
189 NR_FILE_DIRTY,
190 NR_WRITEBACK,
191 NR_WRITEBACK_TEMP,
192 NR_SHMEM,
193 NR_SHMEM_THPS,
194 NR_SHMEM_PMDMAPPED,
195 NR_FILE_THPS,
196 NR_FILE_PMDMAPPED,
197 NR_ANON_THPS,
198 NR_VMSCAN_WRITE,
199 NR_VMSCAN_IMMEDIATE,
200 NR_DIRTIED,
201 NR_WRITTEN,
202 NR_KERNEL_MISC_RECLAIMABLE,
203 NR_FOLL_PIN_ACQUIRED,
204 NR_FOLL_PIN_RELEASED,
205 NR_KERNEL_STACK_KB,
206#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
207 NR_KERNEL_SCS_KB,
208#endif
209 NR_PAGETABLE,
210#ifdef CONFIG_SWAP
211 NR_SWAPCACHE,
212#endif
213 NR_VM_NODE_STAT_ITEMS
214};
215
216
217
218
219
220
221static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
222{
223 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
224 return false;
225
226 return item == NR_ANON_THPS ||
227 item == NR_FILE_THPS ||
228 item == NR_SHMEM_THPS ||
229 item == NR_SHMEM_PMDMAPPED ||
230 item == NR_FILE_PMDMAPPED;
231}
232
233
234
235
236
237
238static __always_inline bool vmstat_item_in_bytes(int idx)
239{
240
241
242
243
244
245
246
247
248
249 return (idx == NR_SLAB_RECLAIMABLE_B ||
250 idx == NR_SLAB_UNRECLAIMABLE_B);
251}
252
253
254
255
256
257
258
259
260
261
262#define LRU_BASE 0
263#define LRU_ACTIVE 1
264#define LRU_FILE 2
265
266enum lru_list {
267 LRU_INACTIVE_ANON = LRU_BASE,
268 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
269 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
270 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
271 LRU_UNEVICTABLE,
272 NR_LRU_LISTS
273};
274
275#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
276
277#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
278
279static inline bool is_file_lru(enum lru_list lru)
280{
281 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
282}
283
284static inline bool is_active_lru(enum lru_list lru)
285{
286 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
287}
288
289#define ANON_AND_FILE 2
290
291enum lruvec_flags {
292 LRUVEC_CONGESTED,
293
294
295};
296
297struct lruvec {
298 struct list_head lists[NR_LRU_LISTS];
299
300 spinlock_t lru_lock;
301
302
303
304
305
306 unsigned long anon_cost;
307 unsigned long file_cost;
308
309 atomic_long_t nonresident_age;
310
311 unsigned long refaults[ANON_AND_FILE];
312
313 unsigned long flags;
314#ifdef CONFIG_MEMCG
315 struct pglist_data *pgdat;
316#endif
317};
318
319
320#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
321
322#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
323
324#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
325
326
327typedef unsigned __bitwise isolate_mode_t;
328
329enum zone_watermarks {
330 WMARK_MIN,
331 WMARK_LOW,
332 WMARK_HIGH,
333 NR_WMARK
334};
335
336
337
338
339
340#ifdef CONFIG_TRANSPARENT_HUGEPAGE
341#define NR_PCP_THP 1
342#else
343#define NR_PCP_THP 0
344#endif
345#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP))
346
347
348
349
350
351#define NR_PCP_ORDER_WIDTH 8
352#define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1)
353
354#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
355#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
356#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
357#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
358
359
360struct per_cpu_pages {
361 int count;
362 int high;
363 int batch;
364 short free_factor;
365#ifdef CONFIG_NUMA
366 short expire;
367#endif
368
369
370 struct list_head lists[NR_PCP_LISTS];
371};
372
373struct per_cpu_zonestat {
374#ifdef CONFIG_SMP
375 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
376 s8 stat_threshold;
377#endif
378#ifdef CONFIG_NUMA
379
380
381
382
383
384 unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
385#endif
386};
387
388struct per_cpu_nodestat {
389 s8 stat_threshold;
390 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
391};
392
393#endif
394
395enum zone_type {
396
397
398
399
400
401
402
403
404
405
406#ifdef CONFIG_ZONE_DMA
407 ZONE_DMA,
408#endif
409#ifdef CONFIG_ZONE_DMA32
410 ZONE_DMA32,
411#endif
412
413
414
415
416
417 ZONE_NORMAL,
418#ifdef CONFIG_HIGHMEM
419
420
421
422
423
424
425
426
427 ZONE_HIGHMEM,
428#endif
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478 ZONE_MOVABLE,
479#ifdef CONFIG_ZONE_DEVICE
480 ZONE_DEVICE,
481#endif
482 __MAX_NR_ZONES
483
484};
485
486#ifndef __GENERATING_BOUNDS_H
487
488#define ASYNC_AND_SYNC 2
489
490struct zone {
491
492
493
494 unsigned long _watermark[NR_WMARK];
495 unsigned long watermark_boost;
496
497 unsigned long nr_reserved_highatomic;
498
499
500
501
502
503
504
505
506
507
508 long lowmem_reserve[MAX_NR_ZONES];
509
510#ifdef CONFIG_NUMA
511 int node;
512#endif
513 struct pglist_data *zone_pgdat;
514 struct per_cpu_pages __percpu *per_cpu_pageset;
515 struct per_cpu_zonestat __percpu *per_cpu_zonestats;
516
517
518
519
520 int pageset_high;
521 int pageset_batch;
522
523#ifndef CONFIG_SPARSEMEM
524
525
526
527
528 unsigned long *pageblock_flags;
529#endif
530
531
532 unsigned long zone_start_pfn;
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576 atomic_long_t managed_pages;
577 unsigned long spanned_pages;
578 unsigned long present_pages;
579#if defined(CONFIG_MEMORY_HOTPLUG)
580 unsigned long present_early_pages;
581#endif
582#ifdef CONFIG_CMA
583 unsigned long cma_pages;
584#endif
585
586 const char *name;
587
588#ifdef CONFIG_MEMORY_ISOLATION
589
590
591
592
593
594 unsigned long nr_isolate_pageblock;
595#endif
596
597#ifdef CONFIG_MEMORY_HOTPLUG
598
599 seqlock_t span_seqlock;
600#endif
601
602 int initialized;
603
604
605 ZONE_PADDING(_pad1_)
606
607
608 struct free_area free_area[MAX_ORDER];
609
610
611 unsigned long flags;
612
613
614 spinlock_t lock;
615
616
617 ZONE_PADDING(_pad2_)
618
619
620
621
622
623
624 unsigned long percpu_drift_mark;
625
626#if defined CONFIG_COMPACTION || defined CONFIG_CMA
627
628 unsigned long compact_cached_free_pfn;
629
630 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
631 unsigned long compact_init_migrate_pfn;
632 unsigned long compact_init_free_pfn;
633#endif
634
635#ifdef CONFIG_COMPACTION
636
637
638
639
640
641
642 unsigned int compact_considered;
643 unsigned int compact_defer_shift;
644 int compact_order_failed;
645#endif
646
647#if defined CONFIG_COMPACTION || defined CONFIG_CMA
648
649 bool compact_blockskip_flush;
650#endif
651
652 bool contiguous;
653
654 ZONE_PADDING(_pad3_)
655
656 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
657 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
658} ____cacheline_internodealigned_in_smp;
659
660enum pgdat_flags {
661 PGDAT_DIRTY,
662
663
664
665 PGDAT_WRITEBACK,
666
667
668 PGDAT_RECLAIM_LOCKED,
669};
670
671enum zone_flags {
672 ZONE_BOOSTED_WATERMARK,
673
674
675 ZONE_RECLAIM_ACTIVE,
676};
677
678static inline unsigned long zone_managed_pages(struct zone *zone)
679{
680 return (unsigned long)atomic_long_read(&zone->managed_pages);
681}
682
683static inline unsigned long zone_cma_pages(struct zone *zone)
684{
685#ifdef CONFIG_CMA
686 return zone->cma_pages;
687#else
688 return 0;
689#endif
690}
691
692static inline unsigned long zone_end_pfn(const struct zone *zone)
693{
694 return zone->zone_start_pfn + zone->spanned_pages;
695}
696
697static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
698{
699 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
700}
701
702static inline bool zone_is_initialized(struct zone *zone)
703{
704 return zone->initialized;
705}
706
707static inline bool zone_is_empty(struct zone *zone)
708{
709 return zone->spanned_pages == 0;
710}
711
712
713
714
715
716static inline bool zone_intersects(struct zone *zone,
717 unsigned long start_pfn, unsigned long nr_pages)
718{
719 if (zone_is_empty(zone))
720 return false;
721 if (start_pfn >= zone_end_pfn(zone) ||
722 start_pfn + nr_pages <= zone->zone_start_pfn)
723 return false;
724
725 return true;
726}
727
728
729
730
731
732
733#define DEF_PRIORITY 12
734
735
736#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
737
738enum {
739 ZONELIST_FALLBACK,
740#ifdef CONFIG_NUMA
741
742
743
744
745 ZONELIST_NOFALLBACK,
746#endif
747 MAX_ZONELISTS
748};
749
750
751
752
753
754struct zoneref {
755 struct zone *zone;
756 int zone_idx;
757};
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773struct zonelist {
774 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
775};
776
777
778
779
780
781
782extern struct page *mem_map;
783
784#ifdef CONFIG_TRANSPARENT_HUGEPAGE
785struct deferred_split {
786 spinlock_t split_queue_lock;
787 struct list_head split_queue;
788 unsigned long split_queue_len;
789};
790#endif
791
792
793
794
795
796
797
798
799
800typedef struct pglist_data {
801
802
803
804
805
806 struct zone node_zones[MAX_NR_ZONES];
807
808
809
810
811
812
813 struct zonelist node_zonelists[MAX_ZONELISTS];
814
815 int nr_zones;
816#ifdef CONFIG_FLATMEM
817 struct page *node_mem_map;
818#ifdef CONFIG_PAGE_EXTENSION
819 struct page_ext *node_page_ext;
820#endif
821#endif
822#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
823
824
825
826
827
828
829
830
831
832
833
834
835 spinlock_t node_size_lock;
836#endif
837 unsigned long node_start_pfn;
838 unsigned long node_present_pages;
839 unsigned long node_spanned_pages;
840
841 int node_id;
842 wait_queue_head_t kswapd_wait;
843 wait_queue_head_t pfmemalloc_wait;
844 struct task_struct *kswapd;
845
846 int kswapd_order;
847 enum zone_type kswapd_highest_zoneidx;
848
849 int kswapd_failures;
850
851#ifdef CONFIG_COMPACTION
852 int kcompactd_max_order;
853 enum zone_type kcompactd_highest_zoneidx;
854 wait_queue_head_t kcompactd_wait;
855 struct task_struct *kcompactd;
856 bool proactive_compact_trigger;
857#endif
858
859
860
861
862 unsigned long totalreserve_pages;
863
864#ifdef CONFIG_NUMA
865
866
867
868 unsigned long min_unmapped_pages;
869 unsigned long min_slab_pages;
870#endif
871
872
873 ZONE_PADDING(_pad1_)
874
875#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
876
877
878
879
880 unsigned long first_deferred_pfn;
881#endif
882
883#ifdef CONFIG_TRANSPARENT_HUGEPAGE
884 struct deferred_split deferred_split_queue;
885#endif
886
887
888
889
890
891
892
893
894 struct lruvec __lruvec;
895
896 unsigned long flags;
897
898 ZONE_PADDING(_pad2_)
899
900
901 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
902 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
903} pg_data_t;
904
905#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
906#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
907#ifdef CONFIG_FLATMEM
908#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
909#else
910#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
911#endif
912#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
913
914#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
915#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
916
917static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
918{
919 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
920}
921
922static inline bool pgdat_is_empty(pg_data_t *pgdat)
923{
924 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
925}
926
927#include <linux/memory_hotplug.h>
928
929void build_all_zonelists(pg_data_t *pgdat);
930void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
931 enum zone_type highest_zoneidx);
932bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
933 int highest_zoneidx, unsigned int alloc_flags,
934 long free_pages);
935bool zone_watermark_ok(struct zone *z, unsigned int order,
936 unsigned long mark, int highest_zoneidx,
937 unsigned int alloc_flags);
938bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
939 unsigned long mark, int highest_zoneidx);
940
941
942
943
944enum meminit_context {
945 MEMINIT_EARLY,
946 MEMINIT_HOTPLUG,
947};
948
949extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
950 unsigned long size);
951
952extern void lruvec_init(struct lruvec *lruvec);
953
954static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
955{
956#ifdef CONFIG_MEMCG
957 return lruvec->pgdat;
958#else
959 return container_of(lruvec, struct pglist_data, __lruvec);
960#endif
961}
962
963#ifdef CONFIG_HAVE_MEMORYLESS_NODES
964int local_memory_node(int node_id);
965#else
966static inline int local_memory_node(int node_id) { return node_id; };
967#endif
968
969
970
971
972#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
973
974#ifdef CONFIG_ZONE_DEVICE
975static inline bool zone_is_zone_device(struct zone *zone)
976{
977 return zone_idx(zone) == ZONE_DEVICE;
978}
979#else
980static inline bool zone_is_zone_device(struct zone *zone)
981{
982 return false;
983}
984#endif
985
986
987
988
989
990
991
992static inline bool managed_zone(struct zone *zone)
993{
994 return zone_managed_pages(zone);
995}
996
997
998static inline bool populated_zone(struct zone *zone)
999{
1000 return zone->present_pages;
1001}
1002
1003#ifdef CONFIG_NUMA
1004static inline int zone_to_nid(struct zone *zone)
1005{
1006 return zone->node;
1007}
1008
1009static inline void zone_set_nid(struct zone *zone, int nid)
1010{
1011 zone->node = nid;
1012}
1013#else
1014static inline int zone_to_nid(struct zone *zone)
1015{
1016 return 0;
1017}
1018
1019static inline void zone_set_nid(struct zone *zone, int nid) {}
1020#endif
1021
1022extern int movable_zone;
1023
1024static inline int is_highmem_idx(enum zone_type idx)
1025{
1026#ifdef CONFIG_HIGHMEM
1027 return (idx == ZONE_HIGHMEM ||
1028 (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
1029#else
1030 return 0;
1031#endif
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041static inline int is_highmem(struct zone *zone)
1042{
1043#ifdef CONFIG_HIGHMEM
1044 return is_highmem_idx(zone_idx(zone));
1045#else
1046 return 0;
1047#endif
1048}
1049
1050
1051struct ctl_table;
1052
1053int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
1054 loff_t *);
1055int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
1056 size_t *, loff_t *);
1057extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
1058int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
1059 size_t *, loff_t *);
1060int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int,
1061 void *, size_t *, loff_t *);
1062int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
1063 void *, size_t *, loff_t *);
1064int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
1065 void *, size_t *, loff_t *);
1066int numa_zonelist_order_handler(struct ctl_table *, int,
1067 void *, size_t *, loff_t *);
1068extern int percpu_pagelist_high_fraction;
1069extern char numa_zonelist_order[];
1070#define NUMA_ZONELIST_ORDER_LEN 16
1071
1072#ifndef CONFIG_NUMA
1073
1074extern struct pglist_data contig_page_data;
1075static inline struct pglist_data *NODE_DATA(int nid)
1076{
1077 return &contig_page_data;
1078}
1079#define NODE_MEM_MAP(nid) mem_map
1080
1081#else
1082
1083#include <asm/mmzone.h>
1084
1085#endif
1086
1087extern struct pglist_data *first_online_pgdat(void);
1088extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
1089extern struct zone *next_zone(struct zone *zone);
1090
1091
1092
1093
1094
1095#define for_each_online_pgdat(pgdat) \
1096 for (pgdat = first_online_pgdat(); \
1097 pgdat; \
1098 pgdat = next_online_pgdat(pgdat))
1099
1100
1101
1102
1103
1104
1105
1106#define for_each_zone(zone) \
1107 for (zone = (first_online_pgdat())->node_zones; \
1108 zone; \
1109 zone = next_zone(zone))
1110
1111#define for_each_populated_zone(zone) \
1112 for (zone = (first_online_pgdat())->node_zones; \
1113 zone; \
1114 zone = next_zone(zone)) \
1115 if (!populated_zone(zone)) \
1116 ; \
1117 else
1118
1119static inline struct zone *zonelist_zone(struct zoneref *zoneref)
1120{
1121 return zoneref->zone;
1122}
1123
1124static inline int zonelist_zone_idx(struct zoneref *zoneref)
1125{
1126 return zoneref->zone_idx;
1127}
1128
1129static inline int zonelist_node_idx(struct zoneref *zoneref)
1130{
1131 return zone_to_nid(zoneref->zone);
1132}
1133
1134struct zoneref *__next_zones_zonelist(struct zoneref *z,
1135 enum zone_type highest_zoneidx,
1136 nodemask_t *nodes);
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1154 enum zone_type highest_zoneidx,
1155 nodemask_t *nodes)
1156{
1157 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1158 return z;
1159 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1180 enum zone_type highest_zoneidx,
1181 nodemask_t *nodes)
1182{
1183 return next_zones_zonelist(zonelist->_zonerefs,
1184 highest_zoneidx, nodes);
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1199 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1200 zone; \
1201 z = next_zones_zonelist(++z, highidx, nodemask), \
1202 zone = zonelist_zone(z))
1203
1204#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
1205 for (zone = z->zone; \
1206 zone; \
1207 z = next_zones_zonelist(++z, highidx, nodemask), \
1208 zone = zonelist_zone(z))
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1221 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1222
1223#ifdef CONFIG_SPARSEMEM
1224#include <asm/sparsemem.h>
1225#endif
1226
1227#ifdef CONFIG_FLATMEM
1228#define pfn_to_nid(pfn) (0)
1229#endif
1230
1231#ifdef CONFIG_SPARSEMEM
1232
1233
1234
1235
1236
1237#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1238#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1239
1240#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1241
1242#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1243#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1244
1245#define SECTION_BLOCKFLAGS_BITS \
1246 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1247
1248#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1249#error Allocator MAX_ORDER exceeds SECTION_SIZE
1250#endif
1251
1252static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1253{
1254 return pfn >> PFN_SECTION_SHIFT;
1255}
1256static inline unsigned long section_nr_to_pfn(unsigned long sec)
1257{
1258 return sec << PFN_SECTION_SHIFT;
1259}
1260
1261#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1262#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1263
1264#define SUBSECTION_SHIFT 21
1265#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
1266
1267#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1268#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
1269#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1270
1271#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
1272#error Subsection size exceeds section size
1273#else
1274#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1275#endif
1276
1277#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
1278#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
1279
1280struct mem_section_usage {
1281#ifdef CONFIG_SPARSEMEM_VMEMMAP
1282 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
1283#endif
1284
1285 unsigned long pageblock_flags[0];
1286};
1287
1288void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
1289
1290struct page;
1291struct page_ext;
1292struct mem_section {
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 unsigned long section_mem_map;
1306
1307 struct mem_section_usage *usage;
1308#ifdef CONFIG_PAGE_EXTENSION
1309
1310
1311
1312
1313 struct page_ext *page_ext;
1314 unsigned long pad;
1315#endif
1316
1317
1318
1319
1320};
1321
1322#ifdef CONFIG_SPARSEMEM_EXTREME
1323#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1324#else
1325#define SECTIONS_PER_ROOT 1
1326#endif
1327
1328#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1329#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1330#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1331
1332#ifdef CONFIG_SPARSEMEM_EXTREME
1333extern struct mem_section **mem_section;
1334#else
1335extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1336#endif
1337
1338static inline unsigned long *section_to_usemap(struct mem_section *ms)
1339{
1340 return ms->usage->pageblock_flags;
1341}
1342
1343static inline struct mem_section *__nr_to_section(unsigned long nr)
1344{
1345#ifdef CONFIG_SPARSEMEM_EXTREME
1346 if (!mem_section)
1347 return NULL;
1348#endif
1349 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1350 return NULL;
1351 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1352}
1353extern size_t mem_section_usage_size(void);
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368#define SECTION_MARKED_PRESENT (1UL<<0)
1369#define SECTION_HAS_MEM_MAP (1UL<<1)
1370#define SECTION_IS_ONLINE (1UL<<2)
1371#define SECTION_IS_EARLY (1UL<<3)
1372#define SECTION_TAINT_ZONE_DEVICE (1UL<<4)
1373#define SECTION_MAP_LAST_BIT (1UL<<5)
1374#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1375#define SECTION_NID_SHIFT 6
1376
1377static inline struct page *__section_mem_map_addr(struct mem_section *section)
1378{
1379 unsigned long map = section->section_mem_map;
1380 map &= SECTION_MAP_MASK;
1381 return (struct page *)map;
1382}
1383
1384static inline int present_section(struct mem_section *section)
1385{
1386 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1387}
1388
1389static inline int present_section_nr(unsigned long nr)
1390{
1391 return present_section(__nr_to_section(nr));
1392}
1393
1394static inline int valid_section(struct mem_section *section)
1395{
1396 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1397}
1398
1399static inline int early_section(struct mem_section *section)
1400{
1401 return (section && (section->section_mem_map & SECTION_IS_EARLY));
1402}
1403
1404static inline int valid_section_nr(unsigned long nr)
1405{
1406 return valid_section(__nr_to_section(nr));
1407}
1408
1409static inline int online_section(struct mem_section *section)
1410{
1411 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1412}
1413
1414static inline int online_device_section(struct mem_section *section)
1415{
1416 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
1417
1418 return section && ((section->section_mem_map & flags) == flags);
1419}
1420
1421static inline int online_section_nr(unsigned long nr)
1422{
1423 return online_section(__nr_to_section(nr));
1424}
1425
1426#ifdef CONFIG_MEMORY_HOTPLUG
1427void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1428void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1429#endif
1430
1431static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1432{
1433 return __nr_to_section(pfn_to_section_nr(pfn));
1434}
1435
1436extern unsigned long __highest_present_section_nr;
1437
1438static inline int subsection_map_index(unsigned long pfn)
1439{
1440 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
1441}
1442
1443#ifdef CONFIG_SPARSEMEM_VMEMMAP
1444static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1445{
1446 int idx = subsection_map_index(pfn);
1447
1448 return test_bit(idx, ms->usage->subsection_map);
1449}
1450#else
1451static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1452{
1453 return 1;
1454}
1455#endif
1456
1457#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469static inline int pfn_valid(unsigned long pfn)
1470{
1471 struct mem_section *ms;
1472
1473
1474
1475
1476
1477
1478
1479 if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
1480 return 0;
1481
1482 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1483 return 0;
1484 ms = __nr_to_section(pfn_to_section_nr(pfn));
1485 if (!valid_section(ms))
1486 return 0;
1487
1488
1489
1490
1491 return early_section(ms) || pfn_section_valid(ms, pfn);
1492}
1493#endif
1494
1495static inline int pfn_in_present_section(unsigned long pfn)
1496{
1497 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1498 return 0;
1499 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1500}
1501
1502static inline unsigned long next_present_section_nr(unsigned long section_nr)
1503{
1504 while (++section_nr <= __highest_present_section_nr) {
1505 if (present_section_nr(section_nr))
1506 return section_nr;
1507 }
1508
1509 return -1;
1510}
1511
1512
1513
1514
1515
1516
1517#ifdef CONFIG_NUMA
1518#define pfn_to_nid(pfn) \
1519({ \
1520 unsigned long __pfn_to_nid_pfn = (pfn); \
1521 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1522})
1523#else
1524#define pfn_to_nid(pfn) (0)
1525#endif
1526
1527void sparse_init(void);
1528#else
1529#define sparse_init() do {} while (0)
1530#define sparse_index_init(_sec, _nid) do {} while (0)
1531#define pfn_in_present_section pfn_valid
1532#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
1533#endif
1534
1535#endif
1536#endif
1537#endif
1538