1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/ptrace.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/smp.h>
35#include <linux/interrupt.h>
36#include <linux/kprobes.h>
37#include <asm/tlb.h>
38#include <asm/io.h>
39#include <asm/uaccess.h>
40#include <asm/pgalloc.h>
41#include <asm/mmu_context.h>
42
43static int handle_tlbmiss(unsigned long long protection_flags,
44 unsigned long address)
45{
46 pgd_t *pgd;
47 pud_t *pud;
48 pmd_t *pmd;
49 pte_t *pte;
50 pte_t entry;
51
52 if (is_vmalloc_addr((void *)address)) {
53 pgd = pgd_offset_k(address);
54 } else {
55 if (unlikely(address >= TASK_SIZE || !current->mm))
56 return 1;
57
58 pgd = pgd_offset(current->mm, address);
59 }
60
61 pud = pud_offset(pgd, address);
62 if (pud_none(*pud) || !pud_present(*pud))
63 return 1;
64
65 pmd = pmd_offset(pud, address);
66 if (pmd_none(*pmd) || !pmd_present(*pmd))
67 return 1;
68
69 pte = pte_offset_kernel(pmd, address);
70 entry = *pte;
71 if (pte_none(entry) || !pte_present(entry))
72 return 1;
73
74
75
76
77
78
79
80 if ((pte_val(entry) & protection_flags) != protection_flags)
81 return 1;
82
83 update_mmu_cache(NULL, address, pte);
84
85 return 0;
86}
87
88
89
90
91
92
93struct expevt_lookup {
94 unsigned short protection_flags[8];
95 unsigned char is_text_access[8];
96 unsigned char is_write_access[8];
97};
98
99#define PRU (1<<9)
100#define PRW (1<<8)
101#define PRX (1<<7)
102#define PRR (1<<6)
103
104
105
106static struct expevt_lookup expevt_lookup_table = {
107 .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
108 .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
109};
110
111static inline unsigned int
112expevt_to_fault_code(unsigned long expevt)
113{
114 if (expevt == 0xa40)
115 return FAULT_CODE_ITLB;
116 else if (expevt == 0x060)
117 return FAULT_CODE_WRITE;
118
119 return 0;
120}
121
122
123
124
125
126
127
128
129
130asmlinkage int __kprobes
131do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
132 unsigned long address)
133{
134 unsigned long long protection_flags;
135 unsigned long long index;
136 unsigned long long expevt4;
137 unsigned int fault_code;
138
139
140
141
142
143
144
145
146
147
148 expevt4 = (expevt >> 4);
149
150
151 index = expevt4 ^ (expevt4 >> 5);
152 index &= 7;
153
154 fault_code = expevt_to_fault_code(expevt);
155
156 protection_flags = expevt_lookup_table.protection_flags[index];
157
158 if (expevt_lookup_table.is_text_access[index])
159 fault_code |= FAULT_CODE_ITLB;
160 if (!ssr_md)
161 fault_code |= FAULT_CODE_USER;
162
163 set_thread_fault_code(fault_code);
164
165 return handle_tlbmiss(protection_flags, address);
166}
167