1
2
3
4
5
6
7
8
9
10#include <linux/pagemap.h>
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/init.h>
22#include <linux/memblock.h>
23#include <linux/vmalloc.h>
24#include <linux/gfp.h>
25
26#include <linux/uaccess.h>
27#include <asm/pgalloc.h>
28#include <asm/hwrpb.h>
29#include <asm/dma.h>
30#include <asm/mmu_context.h>
31#include <asm/console.h>
32#include <asm/tlb.h>
33#include <asm/setup.h>
34#include <asm/sections.h>
35
36extern void die_if_kernel(char *,struct pt_regs *,long);
37
38static struct pcb_struct original_pcb;
39
40pgd_t *
41pgd_alloc(struct mm_struct *mm)
42{
43 pgd_t *ret, *init;
44
45 ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
46 init = pgd_offset(&init_mm, 0UL);
47 if (ret) {
48#ifdef CONFIG_ALPHA_LARGE_VMALLOC
49 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
50 (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
51#else
52 pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
53#endif
54
55
56 pgd_val(ret[PTRS_PER_PGD-1])
57 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
58 }
59 return ret;
60}
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76pmd_t *
77__bad_pagetable(void)
78{
79 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
80 return (pmd_t *) EMPTY_PGT;
81}
82
83pte_t
84__bad_page(void)
85{
86 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
87 return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
88}
89
90static inline unsigned long
91load_PCB(struct pcb_struct *pcb)
92{
93 register unsigned long sp __asm__("$30");
94 pcb->ksp = sp;
95 return __reload_thread(pcb);
96}
97
98
99
100static inline void
101switch_to_system_map(void)
102{
103 unsigned long newptbr;
104 unsigned long original_pcb_ptr;
105
106
107
108 memset(swapper_pg_dir, 0, PAGE_SIZE);
109 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
110 pgd_val(swapper_pg_dir[1023]) =
111 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
112
113
114
115 if (hwrpb->vptb != 0xfffffffe00000000UL) {
116 wrvptptr(0xfffffffe00000000UL);
117 hwrpb->vptb = 0xfffffffe00000000UL;
118 hwrpb_update_checksum(hwrpb);
119 }
120
121
122 init_thread_info.pcb.ptbr = newptbr;
123 init_thread_info.pcb.flags = 1;
124 original_pcb_ptr = load_PCB(&init_thread_info.pcb);
125 tbia();
126
127
128
129
130
131
132
133
134 if (original_pcb_ptr < PAGE_OFFSET) {
135 original_pcb_ptr = (unsigned long)
136 phys_to_virt(original_pcb_ptr);
137 }
138 original_pcb = *(struct pcb_struct *) original_pcb_ptr;
139}
140
141int callback_init_done;
142
143void * __init
144callback_init(void * kernel_end)
145{
146 struct crb_struct * crb;
147 pgd_t *pgd;
148 p4d_t *p4d;
149 pud_t *pud;
150 pmd_t *pmd;
151 void *two_pages;
152
153
154 crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
155
156 if (alpha_using_srm) {
157
158 if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
159 __halt();
160
161
162 crb->dispatch_va = (struct procdesc_struct *)
163 (VMALLOC_START + (unsigned long)crb->dispatch_va
164 - crb->map[0].va);
165 crb->fixup_va = (struct procdesc_struct *)
166 (VMALLOC_START + (unsigned long)crb->fixup_va
167 - crb->map[0].va);
168 }
169
170 switch_to_system_map();
171
172
173
174
175
176
177
178
179
180
181
182 two_pages = (void *)
183 (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
184 kernel_end = two_pages + 2*PAGE_SIZE;
185 memset(two_pages, 0, 2*PAGE_SIZE);
186
187 pgd = pgd_offset_k(VMALLOC_START);
188 p4d = p4d_offset(pgd, VMALLOC_START);
189 pud = pud_offset(p4d, VMALLOC_START);
190 pud_set(pud, (pmd_t *)two_pages);
191 pmd = pmd_offset(pud, VMALLOC_START);
192 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
193
194 if (alpha_using_srm) {
195 static struct vm_struct console_remap_vm;
196 unsigned long nr_pages = 0;
197 unsigned long vaddr;
198 unsigned long i, j;
199
200
201 for (i = 0; i < crb->map_entries; ++i)
202 nr_pages += crb->map[i].count;
203
204
205 console_remap_vm.flags = VM_ALLOC;
206 console_remap_vm.size = nr_pages << PAGE_SHIFT;
207 vm_area_register_early(&console_remap_vm, PAGE_SIZE);
208
209 vaddr = (unsigned long)console_remap_vm.addr;
210
211
212
213 for (i = 0; i < crb->map_entries; ++i) {
214 unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
215 crb->map[i].va = vaddr;
216 for (j = 0; j < crb->map[i].count; ++j) {
217
218
219
220 if (pmd != pmd_offset(pud, vaddr)) {
221 memset(kernel_end, 0, PAGE_SIZE);
222 pmd = pmd_offset(pud, vaddr);
223 pmd_set(pmd, (pte_t *)kernel_end);
224 kernel_end += PAGE_SIZE;
225 }
226 set_pte(pte_offset_kernel(pmd, vaddr),
227 pfn_pte(pfn, PAGE_KERNEL));
228 pfn++;
229 vaddr += PAGE_SIZE;
230 }
231 }
232 }
233
234 callback_init_done = 1;
235 return kernel_end;
236}
237
238
239
240
241void __init paging_init(void)
242{
243 unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
244 unsigned long dma_pfn;
245
246 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
247 max_pfn = max_low_pfn;
248
249 max_zone_pfn[ZONE_DMA] = dma_pfn;
250 max_zone_pfn[ZONE_NORMAL] = max_pfn;
251
252
253 free_area_init(max_zone_pfn);
254
255
256 memset((void *)ZERO_PGE, 0, PAGE_SIZE);
257}
258
259#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
260void
261srm_paging_stop (void)
262{
263
264 swapper_pg_dir[1] = swapper_pg_dir[1023];
265 tbia();
266 wrvptptr(0x200000000UL);
267 hwrpb->vptb = 0x200000000UL;
268 hwrpb_update_checksum(hwrpb);
269
270
271 load_PCB(&original_pcb);
272 tbia();
273}
274#endif
275
276void __init
277mem_init(void)
278{
279 set_max_mapnr(max_low_pfn);
280 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
281 memblock_free_all();
282}
283