1
2
3
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/vmalloc.h>
10
11#include <asm/pgtable.h>
12#include <asm/set_memory.h>
13#include <asm/tlbflush.h>
14
15struct page_change_data {
16 pgprot_t set_mask;
17 pgprot_t clear_mask;
18};
19
20bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
21
22static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
23 void *data)
24{
25 struct page_change_data *cdata = data;
26 pte_t pte = READ_ONCE(*ptep);
27
28 pte = clear_pte_bit(pte, cdata->clear_mask);
29 pte = set_pte_bit(pte, cdata->set_mask);
30
31 set_pte(ptep, pte);
32 return 0;
33}
34
35
36
37
38static int __change_memory_common(unsigned long start, unsigned long size,
39 pgprot_t set_mask, pgprot_t clear_mask)
40{
41 struct page_change_data data;
42 int ret;
43
44 data.set_mask = set_mask;
45 data.clear_mask = clear_mask;
46
47 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
48 &data);
49
50 flush_tlb_kernel_range(start, start + size);
51 return ret;
52}
53
54static int change_memory_common(unsigned long addr, int numpages,
55 pgprot_t set_mask, pgprot_t clear_mask)
56{
57 unsigned long start = addr;
58 unsigned long size = PAGE_SIZE*numpages;
59 unsigned long end = start + size;
60 struct vm_struct *area;
61 int i;
62
63 if (!PAGE_ALIGNED(addr)) {
64 start &= PAGE_MASK;
65 end = start + size;
66 WARN_ON_ONCE(1);
67 }
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 area = find_vm_area((void *)addr);
83 if (!area ||
84 end > (unsigned long)area->addr + area->size ||
85 !(area->flags & VM_ALLOC))
86 return -EINVAL;
87
88 if (!numpages)
89 return 0;
90
91
92
93
94
95 if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
96 pgprot_val(clear_mask) == PTE_RDONLY)) {
97 for (i = 0; i < area->nr_pages; i++) {
98 __change_memory_common((u64)page_address(area->pages[i]),
99 PAGE_SIZE, set_mask, clear_mask);
100 }
101 }
102
103
104
105
106
107 vm_unmap_aliases();
108
109 return __change_memory_common(start, size, set_mask, clear_mask);
110}
111
112int set_memory_ro(unsigned long addr, int numpages)
113{
114 return change_memory_common(addr, numpages,
115 __pgprot(PTE_RDONLY),
116 __pgprot(PTE_WRITE));
117}
118
119int set_memory_rw(unsigned long addr, int numpages)
120{
121 return change_memory_common(addr, numpages,
122 __pgprot(PTE_WRITE),
123 __pgprot(PTE_RDONLY));
124}
125
126int set_memory_nx(unsigned long addr, int numpages)
127{
128 return change_memory_common(addr, numpages,
129 __pgprot(PTE_PXN),
130 __pgprot(0));
131}
132EXPORT_SYMBOL_GPL(set_memory_nx);
133
134int set_memory_x(unsigned long addr, int numpages)
135{
136 return change_memory_common(addr, numpages,
137 __pgprot(0),
138 __pgprot(PTE_PXN));
139}
140EXPORT_SYMBOL_GPL(set_memory_x);
141
142int set_memory_valid(unsigned long addr, int numpages, int enable)
143{
144 if (enable)
145 return __change_memory_common(addr, PAGE_SIZE * numpages,
146 __pgprot(PTE_VALID),
147 __pgprot(0));
148 else
149 return __change_memory_common(addr, PAGE_SIZE * numpages,
150 __pgprot(0),
151 __pgprot(PTE_VALID));
152}
153
154#ifdef CONFIG_DEBUG_PAGEALLOC
155void __kernel_map_pages(struct page *page, int numpages, int enable)
156{
157 set_memory_valid((unsigned long)page_address(page), numpages, enable);
158}
159#ifdef CONFIG_HIBERNATION
160
161
162
163
164
165
166
167
168
169
170bool kernel_page_present(struct page *page)
171{
172 pgd_t *pgdp;
173 pud_t *pudp, pud;
174 pmd_t *pmdp, pmd;
175 pte_t *ptep;
176 unsigned long addr = (unsigned long)page_address(page);
177
178 pgdp = pgd_offset_k(addr);
179 if (pgd_none(READ_ONCE(*pgdp)))
180 return false;
181
182 pudp = pud_offset(pgdp, addr);
183 pud = READ_ONCE(*pudp);
184 if (pud_none(pud))
185 return false;
186 if (pud_sect(pud))
187 return true;
188
189 pmdp = pmd_offset(pudp, addr);
190 pmd = READ_ONCE(*pmdp);
191 if (pmd_none(pmd))
192 return false;
193 if (pmd_sect(pmd))
194 return true;
195
196 ptep = pte_offset_kernel(pmdp, addr);
197 return pte_valid(READ_ONCE(*ptep));
198}
199#endif
200#endif
201