1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/vmalloc.h>
18
19#include <asm/pgtable.h>
20#include <asm/set_memory.h>
21#include <asm/tlbflush.h>
22
23struct page_change_data {
24 pgprot_t set_mask;
25 pgprot_t clear_mask;
26};
27
28bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
29
30static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
31{
32 struct page_change_data *cdata = data;
33 pte_t pte = READ_ONCE(*ptep);
34
35 pte = clear_pte_bit(pte, cdata->clear_mask);
36 pte = set_pte_bit(pte, cdata->set_mask);
37
38 set_pte(ptep, pte);
39 return 0;
40}
41
42
43
44
45static int __change_memory_common(unsigned long start, unsigned long size,
46 pgprot_t set_mask, pgprot_t clear_mask)
47{
48 struct page_change_data data;
49 int ret;
50
51 data.set_mask = set_mask;
52 data.clear_mask = clear_mask;
53
54 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
55 &data);
56
57 flush_tlb_kernel_range(start, start + size);
58 return ret;
59}
60
61static int change_memory_common(unsigned long addr, int numpages,
62 pgprot_t set_mask, pgprot_t clear_mask)
63{
64 unsigned long start = addr;
65 unsigned long size = PAGE_SIZE*numpages;
66 unsigned long end = start + size;
67 struct vm_struct *area;
68 int i;
69
70 if (!PAGE_ALIGNED(addr)) {
71 start &= PAGE_MASK;
72 end = start + size;
73 WARN_ON_ONCE(1);
74 }
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89 area = find_vm_area((void *)addr);
90 if (!area ||
91 end > (unsigned long)area->addr + area->size ||
92 !(area->flags & VM_ALLOC))
93 return -EINVAL;
94
95 if (!numpages)
96 return 0;
97
98
99
100
101
102 if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
103 pgprot_val(clear_mask) == PTE_RDONLY)) {
104 for (i = 0; i < area->nr_pages; i++) {
105 __change_memory_common((u64)page_address(area->pages[i]),
106 PAGE_SIZE, set_mask, clear_mask);
107 }
108 }
109
110
111
112
113
114 vm_unmap_aliases();
115
116 return __change_memory_common(start, size, set_mask, clear_mask);
117}
118
119int set_memory_ro(unsigned long addr, int numpages)
120{
121 return change_memory_common(addr, numpages,
122 __pgprot(PTE_RDONLY),
123 __pgprot(PTE_WRITE));
124}
125
126int set_memory_rw(unsigned long addr, int numpages)
127{
128 return change_memory_common(addr, numpages,
129 __pgprot(PTE_WRITE),
130 __pgprot(PTE_RDONLY));
131}
132
133int set_memory_nx(unsigned long addr, int numpages)
134{
135 return change_memory_common(addr, numpages,
136 __pgprot(PTE_PXN),
137 __pgprot(0));
138}
139EXPORT_SYMBOL_GPL(set_memory_nx);
140
141int set_memory_x(unsigned long addr, int numpages)
142{
143 return change_memory_common(addr, numpages,
144 __pgprot(0),
145 __pgprot(PTE_PXN));
146}
147EXPORT_SYMBOL_GPL(set_memory_x);
148
149int set_memory_valid(unsigned long addr, int numpages, int enable)
150{
151 if (enable)
152 return __change_memory_common(addr, PAGE_SIZE * numpages,
153 __pgprot(PTE_VALID),
154 __pgprot(0));
155 else
156 return __change_memory_common(addr, PAGE_SIZE * numpages,
157 __pgprot(0),
158 __pgprot(PTE_VALID));
159}
160
161int set_direct_map_invalid_noflush(struct page *page)
162{
163 struct page_change_data data = {
164 .set_mask = __pgprot(0),
165 .clear_mask = __pgprot(PTE_VALID),
166 };
167
168 if (!debug_pagealloc_enabled() && !rodata_full)
169 return 0;
170
171 return apply_to_page_range(&init_mm,
172 (unsigned long)page_address(page),
173 PAGE_SIZE, change_page_range, &data);
174}
175
176int set_direct_map_default_noflush(struct page *page)
177{
178 struct page_change_data data = {
179 .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
180 .clear_mask = __pgprot(PTE_RDONLY),
181 };
182
183 if (!debug_pagealloc_enabled() && !rodata_full)
184 return 0;
185
186 return apply_to_page_range(&init_mm,
187 (unsigned long)page_address(page),
188 PAGE_SIZE, change_page_range, &data);
189}
190
191#ifdef CONFIG_DEBUG_PAGEALLOC
192void __kernel_map_pages(struct page *page, int numpages, int enable)
193{
194 if (!debug_pagealloc_enabled() && !rodata_full)
195 return;
196
197 set_memory_valid((unsigned long)page_address(page), numpages, enable);
198}
199#endif
200
201
202
203
204
205
206
207
208
209
210bool kernel_page_present(struct page *page)
211{
212 pgd_t *pgdp;
213 pud_t *pudp, pud;
214 pmd_t *pmdp, pmd;
215 pte_t *ptep;
216 unsigned long addr = (unsigned long)page_address(page);
217
218 if (!debug_pagealloc_enabled() && !rodata_full)
219 return true;
220
221 pgdp = pgd_offset_k(addr);
222 if (pgd_none(READ_ONCE(*pgdp)))
223 return false;
224
225 pudp = pud_offset(pgdp, addr);
226 pud = READ_ONCE(*pudp);
227 if (pud_none(pud))
228 return false;
229 if (pud_sect(pud))
230 return true;
231
232 pmdp = pmd_offset(pudp, addr);
233 pmd = READ_ONCE(*pmdp);
234 if (pmd_none(pmd))
235 return false;
236 if (pmd_sect(pmd))
237 return true;
238
239 ptep = pte_offset_kernel(pmdp, addr);
240 return pte_valid(READ_ONCE(*ptep));
241}
242