1
2
3
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/vmalloc.h>
10
11#include <asm/set_memory.h>
12#include <asm/tlbflush.h>
13
14struct page_change_data {
15 pgprot_t set_mask;
16 pgprot_t clear_mask;
17};
18
19bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
20
21static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
22{
23 struct page_change_data *cdata = data;
24 pte_t pte = READ_ONCE(*ptep);
25
26 pte = clear_pte_bit(pte, cdata->clear_mask);
27 pte = set_pte_bit(pte, cdata->set_mask);
28
29 set_pte(ptep, pte);
30 return 0;
31}
32
33
34
35
36static int __change_memory_common(unsigned long start, unsigned long size,
37 pgprot_t set_mask, pgprot_t clear_mask)
38{
39 struct page_change_data data;
40 int ret;
41
42 data.set_mask = set_mask;
43 data.clear_mask = clear_mask;
44
45 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
46 &data);
47
48 flush_tlb_kernel_range(start, start + size);
49 return ret;
50}
51
52static int change_memory_common(unsigned long addr, int numpages,
53 pgprot_t set_mask, pgprot_t clear_mask)
54{
55 unsigned long start = addr;
56 unsigned long size = PAGE_SIZE * numpages;
57 unsigned long end = start + size;
58 struct vm_struct *area;
59 int i;
60
61 if (!PAGE_ALIGNED(addr)) {
62 start &= PAGE_MASK;
63 end = start + size;
64 WARN_ON_ONCE(1);
65 }
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80 area = find_vm_area((void *)addr);
81 if (!area ||
82 end > (unsigned long)area->addr + area->size ||
83 !(area->flags & VM_ALLOC))
84 return -EINVAL;
85
86 if (!numpages)
87 return 0;
88
89
90
91
92
93 if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
94 pgprot_val(clear_mask) == PTE_RDONLY)) {
95 for (i = 0; i < area->nr_pages; i++) {
96 __change_memory_common((u64)page_address(area->pages[i]),
97 PAGE_SIZE, set_mask, clear_mask);
98 }
99 }
100
101
102
103
104
105 vm_unmap_aliases();
106
107 return __change_memory_common(start, size, set_mask, clear_mask);
108}
109
110int set_memory_ro(unsigned long addr, int numpages)
111{
112 return change_memory_common(addr, numpages,
113 __pgprot(PTE_RDONLY),
114 __pgprot(PTE_WRITE));
115}
116
117int set_memory_rw(unsigned long addr, int numpages)
118{
119 return change_memory_common(addr, numpages,
120 __pgprot(PTE_WRITE),
121 __pgprot(PTE_RDONLY));
122}
123
124int set_memory_nx(unsigned long addr, int numpages)
125{
126 return change_memory_common(addr, numpages,
127 __pgprot(PTE_PXN),
128 __pgprot(PTE_MAYBE_GP));
129}
130
131int set_memory_x(unsigned long addr, int numpages)
132{
133 return change_memory_common(addr, numpages,
134 __pgprot(PTE_MAYBE_GP),
135 __pgprot(PTE_PXN));
136}
137
138int set_memory_valid(unsigned long addr, int numpages, int enable)
139{
140 if (enable)
141 return __change_memory_common(addr, PAGE_SIZE * numpages,
142 __pgprot(PTE_VALID),
143 __pgprot(0));
144 else
145 return __change_memory_common(addr, PAGE_SIZE * numpages,
146 __pgprot(0),
147 __pgprot(PTE_VALID));
148}
149
150int set_direct_map_invalid_noflush(struct page *page)
151{
152 struct page_change_data data = {
153 .set_mask = __pgprot(0),
154 .clear_mask = __pgprot(PTE_VALID),
155 };
156
157 if (!rodata_full)
158 return 0;
159
160 return apply_to_page_range(&init_mm,
161 (unsigned long)page_address(page),
162 PAGE_SIZE, change_page_range, &data);
163}
164
165int set_direct_map_default_noflush(struct page *page)
166{
167 struct page_change_data data = {
168 .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
169 .clear_mask = __pgprot(PTE_RDONLY),
170 };
171
172 if (!rodata_full)
173 return 0;
174
175 return apply_to_page_range(&init_mm,
176 (unsigned long)page_address(page),
177 PAGE_SIZE, change_page_range, &data);
178}
179
180void __kernel_map_pages(struct page *page, int numpages, int enable)
181{
182 if (!debug_pagealloc_enabled() && !rodata_full)
183 return;
184
185 set_memory_valid((unsigned long)page_address(page), numpages, enable);
186}
187
188
189
190
191
192
193
194
195
196
197bool kernel_page_present(struct page *page)
198{
199 pgd_t *pgdp;
200 p4d_t *p4dp;
201 pud_t *pudp, pud;
202 pmd_t *pmdp, pmd;
203 pte_t *ptep;
204 unsigned long addr = (unsigned long)page_address(page);
205
206 if (!debug_pagealloc_enabled() && !rodata_full)
207 return true;
208
209 pgdp = pgd_offset_k(addr);
210 if (pgd_none(READ_ONCE(*pgdp)))
211 return false;
212
213 p4dp = p4d_offset(pgdp, addr);
214 if (p4d_none(READ_ONCE(*p4dp)))
215 return false;
216
217 pudp = pud_offset(p4dp, addr);
218 pud = READ_ONCE(*pudp);
219 if (pud_none(pud))
220 return false;
221 if (pud_sect(pud))
222 return true;
223
224 pmdp = pmd_offset(pudp, addr);
225 pmd = READ_ONCE(*pmdp);
226 if (pmd_none(pmd))
227 return false;
228 if (pmd_sect(pmd))
229 return true;
230
231 ptep = pte_offset_kernel(pmdp, addr);
232 return pte_valid(READ_ONCE(*ptep));
233}
234