1
2
3
4
5
6
7
8
9
10
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/vmstat.h>
14#include <linux/highmem.h>
15#include <asm/pgtable.h>
16
17static inline pte_t gup_get_pte(pte_t *ptep)
18{
19#ifndef CONFIG_X2TLB
20 return READ_ONCE(*ptep);
21#else
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55 pte_t pte;
56
57retry:
58 pte.pte_low = ptep->pte_low;
59 smp_rmb();
60 pte.pte_high = ptep->pte_high;
61 smp_rmb();
62 if (unlikely(pte.pte_low != ptep->pte_low))
63 goto retry;
64
65 return pte;
66#endif
67}
68
69
70
71
72
73
74static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
75 unsigned long end, int write, struct page **pages, int *nr)
76{
77 u64 mask, result;
78 pte_t *ptep;
79
80#ifdef CONFIG_X2TLB
81 result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
82 if (write)
83 result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
84#elif defined(CONFIG_SUPERH64)
85 result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
86 if (write)
87 result |= _PAGE_WRITE;
88#else
89 result = _PAGE_PRESENT | _PAGE_USER;
90 if (write)
91 result |= _PAGE_RW;
92#endif
93
94 mask = result | _PAGE_SPECIAL;
95
96 ptep = pte_offset_map(&pmd, addr);
97 do {
98 pte_t pte = gup_get_pte(ptep);
99 struct page *page;
100
101 if ((pte_val(pte) & mask) != result) {
102 pte_unmap(ptep);
103 return 0;
104 }
105 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
106 page = pte_page(pte);
107 get_page(page);
108 __flush_anon_page(page, addr);
109 flush_dcache_page(page);
110 pages[*nr] = page;
111 (*nr)++;
112
113 } while (ptep++, addr += PAGE_SIZE, addr != end);
114 pte_unmap(ptep - 1);
115
116 return 1;
117}
118
119static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
120 int write, struct page **pages, int *nr)
121{
122 unsigned long next;
123 pmd_t *pmdp;
124
125 pmdp = pmd_offset(&pud, addr);
126 do {
127 pmd_t pmd = *pmdp;
128
129 next = pmd_addr_end(addr, end);
130 if (pmd_none(pmd))
131 return 0;
132 if (!gup_pte_range(pmd, addr, next, write, pages, nr))
133 return 0;
134 } while (pmdp++, addr = next, addr != end);
135
136 return 1;
137}
138
139static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
140 int write, struct page **pages, int *nr)
141{
142 unsigned long next;
143 pud_t *pudp;
144
145 pudp = pud_offset(&pgd, addr);
146 do {
147 pud_t pud = *pudp;
148
149 next = pud_addr_end(addr, end);
150 if (pud_none(pud))
151 return 0;
152 if (!gup_pmd_range(pud, addr, next, write, pages, nr))
153 return 0;
154 } while (pudp++, addr = next, addr != end);
155
156 return 1;
157}
158
159
160
161
162
163int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
164 struct page **pages)
165{
166 struct mm_struct *mm = current->mm;
167 unsigned long addr, len, end;
168 unsigned long next;
169 unsigned long flags;
170 pgd_t *pgdp;
171 int nr = 0;
172
173 start &= PAGE_MASK;
174 addr = start;
175 len = (unsigned long) nr_pages << PAGE_SHIFT;
176 end = start + len;
177 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
178 (void __user *)start, len)))
179 return 0;
180
181
182
183
184
185 local_irq_save(flags);
186 pgdp = pgd_offset(mm, addr);
187 do {
188 pgd_t pgd = *pgdp;
189
190 next = pgd_addr_end(addr, end);
191 if (pgd_none(pgd))
192 break;
193 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
194 break;
195 } while (pgdp++, addr = next, addr != end);
196 local_irq_restore(flags);
197
198 return nr;
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217int get_user_pages_fast(unsigned long start, int nr_pages, int write,
218 struct page **pages)
219{
220 struct mm_struct *mm = current->mm;
221 unsigned long addr, len, end;
222 unsigned long next;
223 pgd_t *pgdp;
224 int nr = 0;
225
226 start &= PAGE_MASK;
227 addr = start;
228 len = (unsigned long) nr_pages << PAGE_SHIFT;
229
230 end = start + len;
231 if (end < start)
232 goto slow_irqon;
233
234 local_irq_disable();
235 pgdp = pgd_offset(mm, addr);
236 do {
237 pgd_t pgd = *pgdp;
238
239 next = pgd_addr_end(addr, end);
240 if (pgd_none(pgd))
241 goto slow;
242 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
243 goto slow;
244 } while (pgdp++, addr = next, addr != end);
245 local_irq_enable();
246
247 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
248 return nr;
249
250 {
251 int ret;
252
253slow:
254 local_irq_enable();
255slow_irqon:
256
257 start += nr << PAGE_SHIFT;
258 pages += nr;
259
260 ret = get_user_pages_unlocked(current, mm, start,
261 (end - start) >> PAGE_SHIFT, write, 0, pages);
262
263
264 if (nr > 0) {
265 if (ret < 0)
266 ret = nr;
267 else
268 ret += nr;
269 }
270
271 return ret;
272 }
273}
274