1
2
3
4
5
6
7
8
9
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/memory.h>
25#include <linux/pfn.h>
26#include <linux/poison.h>
27#include <linux/initrd.h>
28#include <linux/export.h>
29#include <linux/gfp.h>
30#include <asm/processor.h>
31#include <asm/uaccess.h>
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
34#include <asm/dma.h>
35#include <asm/lowcore.h>
36#include <asm/tlb.h>
37#include <asm/tlbflush.h>
38#include <asm/sections.h>
39#include <asm/ctl_reg.h>
40#include <asm/sclp.h>
41
42pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
43
44unsigned long empty_zero_page, zero_page_mask;
45EXPORT_SYMBOL(empty_zero_page);
46EXPORT_SYMBOL(zero_page_mask);
47
48static void __init setup_zero_pages(void)
49{
50 struct cpuid cpu_id;
51 unsigned int order;
52 struct page *page;
53 int i;
54
55 get_cpu_id(&cpu_id);
56 switch (cpu_id.machine) {
57 case 0x9672:
58 case 0x2064:
59 case 0x2066:
60 case 0x2084:
61 case 0x2086:
62 case 0x2094:
63 case 0x2096:
64 order = 0;
65 break;
66 case 0x2097:
67 case 0x2098:
68 case 0x2817:
69 case 0x2818:
70 order = 2;
71 break;
72 case 0x2827:
73 case 0x2828:
74 order = 5;
75 break;
76 case 0x2964:
77 default:
78 order = 7;
79 break;
80 }
81
82 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
83 order--;
84
85 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
86 if (!empty_zero_page)
87 panic("Out of memory in setup_zero_pages");
88
89 page = virt_to_page((void *) empty_zero_page);
90 split_page(page, order);
91 for (i = 1 << order; i > 0; i--) {
92 mark_page_reserved(page);
93 page++;
94 }
95
96 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
97}
98
99
100
101
102void __init paging_init(void)
103{
104 unsigned long max_zone_pfns[MAX_NR_ZONES];
105 unsigned long pgd_type, asce_bits;
106
107 init_mm.pgd = swapper_pg_dir;
108#ifdef CONFIG_64BIT
109 if (VMALLOC_END > (1UL << 42)) {
110 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
111 pgd_type = _REGION2_ENTRY_EMPTY;
112 } else {
113 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
114 pgd_type = _REGION3_ENTRY_EMPTY;
115 }
116#else
117 asce_bits = _ASCE_TABLE_LENGTH;
118 pgd_type = _SEGMENT_ENTRY_EMPTY;
119#endif
120 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
121 clear_table((unsigned long *) init_mm.pgd, pgd_type,
122 sizeof(unsigned long)*2048);
123 vmem_map_init();
124
125
126 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
127 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
128 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
129 arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
130
131 sparse_memory_present_with_active_regions(MAX_NUMNODES);
132 sparse_init();
133 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
134 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
135 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
136 free_area_init_nodes(max_zone_pfns);
137}
138
139void __init mem_init(void)
140{
141 if (MACHINE_HAS_TLB_LC)
142 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
143 cpumask_set_cpu(0, mm_cpumask(&init_mm));
144 atomic_set(&init_mm.context.attach_count, 1);
145
146 max_mapnr = max_low_pfn;
147 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
148
149
150 cmma_init();
151
152
153 free_all_bootmem();
154 setup_zero_pages();
155
156 mem_init_print_info(NULL);
157 printk("Write protected kernel read-only data: %#lx - %#lx\n",
158 (unsigned long)&_stext,
159 PFN_ALIGN((unsigned long)&_eshared) - 1);
160}
161
162void free_initmem(void)
163{
164 free_initmem_default(POISON_FREE_INITMEM);
165}
166
167#ifdef CONFIG_BLK_DEV_INITRD
168void __init free_initrd_mem(unsigned long start, unsigned long end)
169{
170 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
171 "initrd");
172}
173#endif
174
175#ifdef CONFIG_MEMORY_HOTPLUG
176int arch_add_memory(int nid, u64 start, u64 size)
177{
178 unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
179 unsigned long start_pfn = PFN_DOWN(start);
180 unsigned long size_pages = PFN_DOWN(size);
181 struct zone *zone;
182 int rc;
183
184 rc = vmem_add_mapping(start, size);
185 if (rc)
186 return rc;
187 for_each_zone(zone) {
188 if (zone_idx(zone) != ZONE_MOVABLE) {
189
190 zone_start_pfn = zone->zone_start_pfn;
191 zone_end_pfn = zone->zone_start_pfn +
192 zone->spanned_pages;
193 } else {
194
195 zone_start_pfn = start_pfn;
196 zone_end_pfn = start_pfn + size_pages;
197 }
198 if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
199 continue;
200 nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
201 zone_end_pfn - start_pfn : size_pages;
202 rc = __add_pages(nid, zone, start_pfn, nr_pages);
203 if (rc)
204 break;
205 start_pfn += nr_pages;
206 size_pages -= nr_pages;
207 if (!size_pages)
208 break;
209 }
210 if (rc)
211 vmem_remove_mapping(start, size);
212 return rc;
213}
214
215unsigned long memory_block_size_bytes(void)
216{
217
218
219
220
221 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm());
222}
223
224#ifdef CONFIG_MEMORY_HOTREMOVE
225int arch_remove_memory(u64 start, u64 size)
226{
227
228
229
230
231
232 return -EBUSY;
233}
234#endif
235#endif
236