1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/ptrace.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/smp.h>
35#include <linux/interrupt.h>
36#include <asm/system.h>
37#include <asm/tlb.h>
38#include <asm/io.h>
39#include <asm/uaccess.h>
40#include <asm/pgalloc.h>
41#include <asm/mmu_context.h>
42#include <cpu/registers.h>
43
44
45inline void __do_tlb_refill(unsigned long address,
46 unsigned long long is_text_not_data, pte_t *pte)
47{
48 unsigned long long ptel;
49 unsigned long long pteh=0;
50 struct tlb_info *tlbp;
51 unsigned long long next;
52
53
54 ptel = pte_val(*pte);
55
56
57
58
59 pteh = neff_sign_extend(address & MMU_VPN_MASK);
60
61
62 pteh |= get_asid() << PTEH_ASID_SHIFT;
63 pteh |= PTEH_VALID;
64
65
66 ptel &= _PAGE_FLAGS_HARDWARE_MASK;
67
68 tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
69 next = tlbp->next;
70 __flush_tlb_slot(next);
71 asm volatile ("putcfg %0,1,%2\n\n\t"
72 "putcfg %0,0,%1\n"
73 : : "r" (next), "r" (pteh), "r" (ptel) );
74
75 next += TLB_STEP;
76 if (next > tlbp->last) next = tlbp->first;
77 tlbp->next = next;
78
79}
80
81static int handle_vmalloc_fault(struct mm_struct *mm,
82 unsigned long protection_flags,
83 unsigned long long textaccess,
84 unsigned long address)
85{
86 pgd_t *dir;
87 pud_t *pud;
88 pmd_t *pmd;
89 static pte_t *pte;
90 pte_t entry;
91
92 dir = pgd_offset_k(address);
93
94 pud = pud_offset(dir, address);
95 if (pud_none_or_clear_bad(pud))
96 return 0;
97
98 pmd = pmd_offset(pud, address);
99 if (pmd_none_or_clear_bad(pmd))
100 return 0;
101
102 pte = pte_offset_kernel(pmd, address);
103 entry = *pte;
104
105 if (pte_none(entry) || !pte_present(entry))
106 return 0;
107 if ((pte_val(entry) & protection_flags) != protection_flags)
108 return 0;
109
110 __do_tlb_refill(address, textaccess, pte);
111
112 return 1;
113}
114
115static int handle_tlbmiss(struct mm_struct *mm,
116 unsigned long long protection_flags,
117 unsigned long long textaccess,
118 unsigned long address)
119{
120 pgd_t *dir;
121 pud_t *pud;
122 pmd_t *pmd;
123 pte_t *pte;
124 pte_t entry;
125
126
127
128
129
130
131
132
133
134
135
136 if (address >= (unsigned long) TASK_SIZE)
137
138 return 0;
139
140 dir = pgd_offset(mm, address);
141 if (pgd_none(*dir) || !pgd_present(*dir))
142 return 0;
143 if (!pgd_present(*dir))
144 return 0;
145
146 pud = pud_offset(dir, address);
147 if (pud_none(*pud) || !pud_present(*pud))
148 return 0;
149
150 pmd = pmd_offset(pud, address);
151 if (pmd_none(*pmd) || !pmd_present(*pmd))
152 return 0;
153
154 pte = pte_offset_kernel(pmd, address);
155 entry = *pte;
156
157 if (pte_none(entry) || !pte_present(entry))
158 return 0;
159
160
161
162
163
164
165
166 if ((pte_val(entry) & protection_flags) != protection_flags)
167 return 0;
168
169 __do_tlb_refill(address, textaccess, pte);
170
171 return 1;
172}
173
174
175
176
177
178
179struct expevt_lookup {
180 unsigned short protection_flags[8];
181 unsigned char is_text_access[8];
182 unsigned char is_write_access[8];
183};
184
185#define PRU (1<<9)
186#define PRW (1<<8)
187#define PRX (1<<7)
188#define PRR (1<<6)
189
190#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
191#define YOUNG (_PAGE_ACCESSED)
192
193
194
195static struct expevt_lookup expevt_lookup_table = {
196 .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
197 .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
198};
199
200
201
202
203
204
205
206
207
208asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
209 unsigned long long expevt,
210 unsigned long address)
211{
212 struct task_struct *tsk;
213 struct mm_struct *mm;
214 unsigned long long textaccess;
215 unsigned long long protection_flags;
216 unsigned long long index;
217 unsigned long long expevt4;
218
219
220
221
222
223
224
225
226
227
228 expevt4 = (expevt >> 4);
229
230
231 index = expevt4 ^ (expevt4 >> 5);
232 index &= 7;
233 protection_flags = expevt_lookup_table.protection_flags[index];
234 textaccess = expevt_lookup_table.is_text_access[index];
235
236
237
238
239
240
241
242
243
244
245
246
247 tsk = current;
248 mm = tsk->mm;
249
250 if ((address >= VMALLOC_START && address < VMALLOC_END) ||
251 (address >= IOBASE_VADDR && address < IOBASE_END)) {
252 if (ssr_md)
253
254
255
256
257 if (handle_vmalloc_fault(mm, protection_flags,
258 textaccess, address))
259 return 1;
260 } else if (!in_interrupt() && mm) {
261 if (handle_tlbmiss(mm, protection_flags, textaccess, address))
262 return 1;
263 }
264
265 return 0;
266}
267