1#ifndef _LINUX_MMZONE_H
2#define _LINUX_MMZONE_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#include <linux/spinlock.h>
8#include <linux/list.h>
9#include <linux/wait.h>
10#include <linux/bitops.h>
11#include <linux/cache.h>
12#include <linux/threads.h>
13#include <linux/numa.h>
14#include <linux/init.h>
15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h>
18#include <asm/atomic.h>
19#include <asm/page.h>
20
21
22#ifndef CONFIG_FORCE_MAX_ZONEORDER
23#define MAX_ORDER 11
24#else
25#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
26#endif
27#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
28
29
30
31
32
33
34
35#define PAGE_ALLOC_COSTLY_ORDER 3
36
37#define MIGRATE_UNMOVABLE 0
38#define MIGRATE_RECLAIMABLE 1
39#define MIGRATE_MOVABLE 2
40#define MIGRATE_RESERVE 3
41#define MIGRATE_ISOLATE 4
42#define MIGRATE_TYPES 5
43
44#define for_each_migratetype_order(order, type) \
45 for (order = 0; order < MAX_ORDER; order++) \
46 for (type = 0; type < MIGRATE_TYPES; type++)
47
48extern int page_group_by_mobility_disabled;
49
50static inline int get_pageblock_migratetype(struct page *page)
51{
52 if (unlikely(page_group_by_mobility_disabled))
53 return MIGRATE_UNMOVABLE;
54
55 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
56}
57
58struct free_area {
59 struct list_head free_list[MIGRATE_TYPES];
60 unsigned long nr_free;
61};
62
63struct pglist_data;
64
65
66
67
68
69
70
71#if defined(CONFIG_SMP)
72struct zone_padding {
73 char x[0];
74} ____cacheline_internodealigned_in_smp;
75#define ZONE_PADDING(name) struct zone_padding name;
76#else
77#define ZONE_PADDING(name)
78#endif
79
80enum zone_stat_item {
81
82 NR_FREE_PAGES,
83 NR_INACTIVE,
84 NR_ACTIVE,
85 NR_ANON_PAGES,
86 NR_FILE_MAPPED,
87
88 NR_FILE_PAGES,
89 NR_FILE_DIRTY,
90 NR_WRITEBACK,
91
92 NR_SLAB_RECLAIMABLE,
93 NR_SLAB_UNRECLAIMABLE,
94 NR_PAGETABLE,
95 NR_UNSTABLE_NFS,
96 NR_BOUNCE,
97 NR_VMSCAN_WRITE,
98#ifdef CONFIG_NUMA
99 NUMA_HIT,
100 NUMA_MISS,
101 NUMA_FOREIGN,
102 NUMA_INTERLEAVE_HIT,
103 NUMA_LOCAL,
104 NUMA_OTHER,
105#endif
106 NR_VM_ZONE_STAT_ITEMS };
107
108struct per_cpu_pages {
109 int count;
110 int high;
111 int batch;
112 struct list_head list;
113};
114
115struct per_cpu_pageset {
116 struct per_cpu_pages pcp[2];
117#ifdef CONFIG_NUMA
118 s8 expire;
119#endif
120#ifdef CONFIG_SMP
121 s8 stat_threshold;
122 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
123#endif
124} ____cacheline_aligned_in_smp;
125
126#ifdef CONFIG_NUMA
127#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
128#else
129#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
130#endif
131
132enum zone_type {
133#ifdef CONFIG_ZONE_DMA
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152 ZONE_DMA,
153#endif
154#ifdef CONFIG_ZONE_DMA32
155
156
157
158
159
160 ZONE_DMA32,
161#endif
162
163
164
165
166
167 ZONE_NORMAL,
168#ifdef CONFIG_HIGHMEM
169
170
171
172
173
174
175
176
177 ZONE_HIGHMEM,
178#endif
179 ZONE_MOVABLE,
180 MAX_NR_ZONES
181};
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196#define __ZONE_COUNT ( \
197 defined(CONFIG_ZONE_DMA) \
198 + defined(CONFIG_ZONE_DMA32) \
199 + 1 \
200 + defined(CONFIG_HIGHMEM) \
201 + 1 \
202)
203#if __ZONE_COUNT < 2
204#define ZONES_SHIFT 0
205#elif __ZONE_COUNT <= 2
206#define ZONES_SHIFT 1
207#elif __ZONE_COUNT <= 4
208#define ZONES_SHIFT 2
209#else
210#error ZONES_SHIFT -- too many zones configured adjust calculation
211#endif
212#undef __ZONE_COUNT
213
214struct zone {
215
216 unsigned long pages_min, pages_low, pages_high;
217
218
219
220
221
222
223
224
225 unsigned long lowmem_reserve[MAX_NR_ZONES];
226
227#ifdef CONFIG_NUMA
228 int node;
229
230
231
232 unsigned long min_unmapped_pages;
233 unsigned long min_slab_pages;
234 struct per_cpu_pageset *pageset[NR_CPUS];
235#else
236 struct per_cpu_pageset pageset[NR_CPUS];
237#endif
238
239
240
241 spinlock_t lock;
242#ifdef CONFIG_MEMORY_HOTPLUG
243
244 seqlock_t span_seqlock;
245#endif
246 struct free_area free_area[MAX_ORDER];
247
248#ifndef CONFIG_SPARSEMEM
249
250
251
252
253 unsigned long *pageblock_flags;
254#endif
255
256
257 ZONE_PADDING(_pad1_)
258
259
260 spinlock_t lru_lock;
261 struct list_head active_list;
262 struct list_head inactive_list;
263 unsigned long nr_scan_active;
264 unsigned long nr_scan_inactive;
265 unsigned long pages_scanned;
266 unsigned long flags;
267
268
269 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284 int prev_priority;
285
286
287 ZONE_PADDING(_pad2_)
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314 wait_queue_head_t * wait_table;
315 unsigned long wait_table_hash_nr_entries;
316 unsigned long wait_table_bits;
317
318
319
320
321 struct pglist_data *zone_pgdat;
322
323 unsigned long zone_start_pfn;
324
325
326
327
328
329
330
331
332
333
334
335 unsigned long spanned_pages;
336 unsigned long present_pages;
337
338
339
340
341 const char *name;
342} ____cacheline_internodealigned_in_smp;
343
344typedef enum {
345 ZONE_ALL_UNRECLAIMABLE,
346 ZONE_RECLAIM_LOCKED,
347 ZONE_OOM_LOCKED,
348} zone_flags_t;
349
350static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
351{
352 set_bit(flag, &zone->flags);
353}
354
355static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
356{
357 return test_and_set_bit(flag, &zone->flags);
358}
359
360static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
361{
362 clear_bit(flag, &zone->flags);
363}
364
365static inline int zone_is_all_unreclaimable(const struct zone *zone)
366{
367 return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
368}
369
370static inline int zone_is_reclaim_locked(const struct zone *zone)
371{
372 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
373}
374
375static inline int zone_is_oom_locked(const struct zone *zone)
376{
377 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
378}
379
380
381
382
383
384
385#define DEF_PRIORITY 12
386
387
388#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
389
390#ifdef CONFIG_NUMA
391
392
393
394
395
396
397
398
399#define MAX_ZONELISTS (2 * MAX_NR_ZONES)
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461struct zonelist_cache {
462 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];
463 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);
464 unsigned long last_full_zap;
465};
466#else
467#define MAX_ZONELISTS MAX_NR_ZONES
468struct zonelist_cache;
469#endif
470
471
472
473
474
475
476
477
478
479
480
481struct zonelist {
482 struct zonelist_cache *zlcache_ptr;
483 struct zone *zones[MAX_ZONES_PER_ZONELIST + 1];
484#ifdef CONFIG_NUMA
485 struct zonelist_cache zlcache;
486#endif
487};
488
489#ifdef CONFIG_NUMA
490
491
492
493
494
495
496static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
497{
498 return !zonelist->zlcache_ptr;
499}
500#else
501static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
502{
503 return 0;
504}
505#endif
506
507#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
508struct node_active_region {
509 unsigned long start_pfn;
510 unsigned long end_pfn;
511 int nid;
512};
513#endif
514
515#ifndef CONFIG_DISCONTIGMEM
516
517extern struct page *mem_map;
518#endif
519
520
521
522
523
524
525
526
527
528
529
530
531struct bootmem_data;
532typedef struct pglist_data {
533 struct zone node_zones[MAX_NR_ZONES];
534 struct zonelist node_zonelists[MAX_ZONELISTS];
535 int nr_zones;
536#ifdef CONFIG_FLAT_NODE_MEM_MAP
537 struct page *node_mem_map;
538#endif
539 struct bootmem_data *bdata;
540#ifdef CONFIG_MEMORY_HOTPLUG
541
542
543
544
545
546
547
548 spinlock_t node_size_lock;
549#endif
550 unsigned long node_start_pfn;
551 unsigned long node_present_pages;
552 unsigned long node_spanned_pages;
553
554 int node_id;
555 wait_queue_head_t kswapd_wait;
556 struct task_struct *kswapd;
557 int kswapd_max_order;
558} pg_data_t;
559
560#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
561#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
562#ifdef CONFIG_FLAT_NODE_MEM_MAP
563#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
564#else
565#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
566#endif
567#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
568
569#include <linux/memory_hotplug.h>
570
571void get_zone_counts(unsigned long *active, unsigned long *inactive,
572 unsigned long *free);
573void build_all_zonelists(void);
574void wakeup_kswapd(struct zone *zone, int order);
575int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
576 int classzone_idx, int alloc_flags);
577enum memmap_context {
578 MEMMAP_EARLY,
579 MEMMAP_HOTPLUG,
580};
581extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
582 unsigned long size,
583 enum memmap_context context);
584
585#ifdef CONFIG_HAVE_MEMORY_PRESENT
586void memory_present(int nid, unsigned long start, unsigned long end);
587#else
588static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
589#endif
590
591#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
592unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
593#endif
594
595
596
597
598#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
599
600static inline int populated_zone(struct zone *zone)
601{
602 return (!!zone->present_pages);
603}
604
605extern int movable_zone;
606
607static inline int zone_movable_is_highmem(void)
608{
609#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
610 return movable_zone == ZONE_HIGHMEM;
611#else
612 return 0;
613#endif
614}
615
616static inline int is_highmem_idx(enum zone_type idx)
617{
618#ifdef CONFIG_HIGHMEM
619 return (idx == ZONE_HIGHMEM ||
620 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
621#else
622 return 0;
623#endif
624}
625
626static inline int is_normal_idx(enum zone_type idx)
627{
628 return (idx == ZONE_NORMAL);
629}
630
631
632
633
634
635
636
637static inline int is_highmem(struct zone *zone)
638{
639#ifdef CONFIG_HIGHMEM
640 int zone_idx = zone - zone->zone_pgdat->node_zones;
641 return zone_idx == ZONE_HIGHMEM ||
642 (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
643#else
644 return 0;
645#endif
646}
647
648static inline int is_normal(struct zone *zone)
649{
650 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
651}
652
653static inline int is_dma32(struct zone *zone)
654{
655#ifdef CONFIG_ZONE_DMA32
656 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
657#else
658 return 0;
659#endif
660}
661
662static inline int is_dma(struct zone *zone)
663{
664#ifdef CONFIG_ZONE_DMA
665 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
666#else
667 return 0;
668#endif
669}
670
671
672struct ctl_table;
673struct file;
674int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
675 void __user *, size_t *, loff_t *);
676extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
677int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
678 void __user *, size_t *, loff_t *);
679int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
680 void __user *, size_t *, loff_t *);
681int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
682 struct file *, void __user *, size_t *, loff_t *);
683int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
684 struct file *, void __user *, size_t *, loff_t *);
685
686extern int numa_zonelist_order_handler(struct ctl_table *, int,
687 struct file *, void __user *, size_t *, loff_t *);
688extern char numa_zonelist_order[];
689#define NUMA_ZONELIST_ORDER_LEN 16
690
691#include <linux/topology.h>
692
693#ifndef numa_node_id
694#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
695#endif
696
697#ifndef CONFIG_NEED_MULTIPLE_NODES
698
699extern struct pglist_data contig_page_data;
700#define NODE_DATA(nid) (&contig_page_data)
701#define NODE_MEM_MAP(nid) mem_map
702#define MAX_NODES_SHIFT 1
703
704#else
705
706#include <asm/mmzone.h>
707
708#endif
709
710extern struct pglist_data *first_online_pgdat(void);
711extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
712extern struct zone *next_zone(struct zone *zone);
713
714
715
716
717
718#define for_each_online_pgdat(pgdat) \
719 for (pgdat = first_online_pgdat(); \
720 pgdat; \
721 pgdat = next_online_pgdat(pgdat))
722
723
724
725
726
727
728
729#define for_each_zone(zone) \
730 for (zone = (first_online_pgdat())->node_zones; \
731 zone; \
732 zone = next_zone(zone))
733
734#ifdef CONFIG_SPARSEMEM
735#include <asm/sparsemem.h>
736#endif
737
738#if BITS_PER_LONG == 32
739
740
741
742
743#define FLAGS_RESERVED 9
744
745#elif BITS_PER_LONG == 64
746
747
748
749#define FLAGS_RESERVED 32
750
751#else
752
753#error BITS_PER_LONG not defined
754
755#endif
756
757#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
758 !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
759#define early_pfn_to_nid(nid) (0UL)
760#endif
761
762#ifdef CONFIG_FLATMEM
763#define pfn_to_nid(pfn) (0)
764#endif
765
766#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
767#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
768
769#ifdef CONFIG_SPARSEMEM
770
771
772
773
774
775
776
777#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
778
779#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
780#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
781
782#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
783
784#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
785#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
786
787#define SECTION_BLOCKFLAGS_BITS \
788 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
789
790#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
791#error Allocator MAX_ORDER exceeds SECTION_SIZE
792#endif
793
794struct page;
795struct mem_section {
796
797
798
799
800
801
802
803
804
805
806
807
808 unsigned long section_mem_map;
809
810
811 unsigned long *pageblock_flags;
812};
813
814#ifdef CONFIG_SPARSEMEM_EXTREME
815#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
816#else
817#define SECTIONS_PER_ROOT 1
818#endif
819
820#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
821#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
822#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
823
824#ifdef CONFIG_SPARSEMEM_EXTREME
825extern struct mem_section *mem_section[NR_SECTION_ROOTS];
826#else
827extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
828#endif
829
830static inline struct mem_section *__nr_to_section(unsigned long nr)
831{
832 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
833 return NULL;
834 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
835}
836extern int __section_nr(struct mem_section* ms);
837
838
839
840
841
842
843#define SECTION_MARKED_PRESENT (1UL<<0)
844#define SECTION_HAS_MEM_MAP (1UL<<1)
845#define SECTION_MAP_LAST_BIT (1UL<<2)
846#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
847#define SECTION_NID_SHIFT 2
848
849static inline struct page *__section_mem_map_addr(struct mem_section *section)
850{
851 unsigned long map = section->section_mem_map;
852 map &= SECTION_MAP_MASK;
853 return (struct page *)map;
854}
855
856static inline int present_section(struct mem_section *section)
857{
858 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
859}
860
861static inline int present_section_nr(unsigned long nr)
862{
863 return present_section(__nr_to_section(nr));
864}
865
866static inline int valid_section(struct mem_section *section)
867{
868 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
869}
870
871static inline int valid_section_nr(unsigned long nr)
872{
873 return valid_section(__nr_to_section(nr));
874}
875
876static inline struct mem_section *__pfn_to_section(unsigned long pfn)
877{
878 return __nr_to_section(pfn_to_section_nr(pfn));
879}
880
881static inline int pfn_valid(unsigned long pfn)
882{
883 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
884 return 0;
885 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
886}
887
888static inline int pfn_present(unsigned long pfn)
889{
890 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
891 return 0;
892 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
893}
894
895
896
897
898
899
900#ifdef CONFIG_NUMA
901#define pfn_to_nid(pfn) \
902({ \
903 unsigned long __pfn_to_nid_pfn = (pfn); \
904 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
905})
906#else
907#define pfn_to_nid(pfn) (0)
908#endif
909
910#define early_pfn_valid(pfn) pfn_valid(pfn)
911void sparse_init(void);
912#else
913#define sparse_init() do {} while (0)
914#define sparse_index_init(_sec, _nid) do {} while (0)
915#endif
916
917#ifdef CONFIG_NODES_SPAN_OTHER_NODES
918#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
919#else
920#define early_pfn_in_nid(pfn, nid) (1)
921#endif
922
923#ifndef early_pfn_valid
924#define early_pfn_valid(pfn) (1)
925#endif
926
927void memory_present(int nid, unsigned long start, unsigned long end);
928unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
929
930
931
932
933
934
935
936#ifdef CONFIG_HOLES_IN_ZONE
937#define pfn_valid_within(pfn) pfn_valid(pfn)
938#else
939#define pfn_valid_within(pfn) (1)
940#endif
941
942#endif
943#endif
944#endif
945