1
2
3
4
5
6
7
8
9
10
11#include <asm/uaccess.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
34{
35
36
37
38
39
40
41
42
43 register char *dst __asm__ ("r13") = pdst;
44 register const char *src __asm__ ("r11") = psrc;
45 register int n __asm__ ("r12") = pn;
46 register int retn __asm__ ("r10") = 0;
47
48
49
50
51
52 if (((unsigned long) dst & 3) != 0
53
54
55 && n >= 3)
56 {
57 if ((unsigned long) dst & 1)
58 {
59 __asm_copy_to_user_1 (dst, src, retn);
60 n--;
61 }
62
63 if ((unsigned long) dst & 2)
64 {
65 __asm_copy_to_user_2 (dst, src, retn);
66 n -= 2;
67 }
68 }
69
70
71 if (n >= 44*2)
72
73 {
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90 __asm__ volatile ("\
91 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
92 .err \n\
93 .endif \n\
94 \n\
95 ;; Save the registers we'll use in the movem process \n\
96 ;; on the stack. \n\
97 subq 11*4,$sp \n\
98 movem $r10,[$sp] \n\
99 \n\
100 ;; Now we've got this: \n\
101 ;; r11 - src \n\
102 ;; r13 - dst \n\
103 ;; r12 - n \n\
104 \n\
105 ;; Update n for the first loop \n\
106 subq 44,$r12 \n\
107 \n\
108; Since the noted PC of a faulting instruction in a delay-slot of a taken \n\
109; branch, is that of the branch target, we actually point at the from-movem \n\
110; for this case. There is no ambiguity here; if there was a fault in that \n\
111; instruction (meaning a kernel oops), the faulted PC would be the address \n\
112; after *that* movem. \n\
113 \n\
1140: \n\
115 movem [$r11+],$r10 \n\
116 subq 44,$r12 \n\
117 bge 0b \n\
118 movem $r10,[$r13+] \n\
1191: \n\
120 addq 44,$r12 ;; compensate for last loop underflowing n \n\
121 \n\
122 ;; Restore registers from stack \n\
123 movem [$sp+],$r10 \n\
1242: \n\
125 .section .fixup,\"ax\" \n\
126 \n\
127; To provide a correct count in r10 of bytes that failed to be copied, \n\
128; we jump back into the loop if the loop-branch was taken. There is no \n\
129; performance penalty for sany use; the program will segfault soon enough.\n\
130 \n\
1313: \n\
132 move.d [$sp],$r10 \n\
133 addq 44,$r10 \n\
134 move.d $r10,[$sp] \n\
135 jump 0b \n\
1364: \n\
137 movem [$sp+],$r10 \n\
138 addq 44,$r10 \n\
139 addq 44,$r12 \n\
140 jump 2b \n\
141 \n\
142 .previous \n\
143 .section __ex_table,\"a\" \n\
144 .dword 0b,3b \n\
145 .dword 1b,4b \n\
146 .previous"
147
148 : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
149 : "0" (dst), "1" (src), "2" (n), "3" (retn));
150
151 }
152
153
154
155
156
157
158 while (n >= 16)
159 {
160 __asm_copy_to_user_16 (dst, src, retn);
161 n -= 16;
162 }
163
164
165
166 while (n >= 4)
167 {
168 __asm_copy_to_user_4 (dst, src, retn);
169 n -= 4;
170 }
171
172 switch (n)
173 {
174 case 0:
175 break;
176 case 1:
177 __asm_copy_to_user_1 (dst, src, retn);
178 break;
179 case 2:
180 __asm_copy_to_user_2 (dst, src, retn);
181 break;
182 case 3:
183 __asm_copy_to_user_3 (dst, src, retn);
184 break;
185 }
186
187 return retn;
188}
189EXPORT_SYMBOL(__copy_user);
190
191
192
193
194
195unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
196 unsigned long pn)
197{
198
199
200
201
202
203
204
205
206 register char *dst __asm__ ("r13") = pdst;
207 register const char *src __asm__ ("r11") = psrc;
208 register int n __asm__ ("r12") = pn;
209 register int retn __asm__ ("r10") = 0;
210
211
212
213
214 if (((unsigned long) src & 3) != 0)
215 {
216 if (((unsigned long) src & 1) && n != 0)
217 {
218 __asm_copy_from_user_1 (dst, src, retn);
219 n--;
220 }
221
222 if (((unsigned long) src & 2) && n >= 2)
223 {
224 __asm_copy_from_user_2 (dst, src, retn);
225 n -= 2;
226 }
227
228
229
230
231 if (retn != 0)
232 goto copy_exception_bytes;
233 }
234
235
236 if (n >= 44*2)
237
238
239 {
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256 __asm__ volatile ("\n\
257 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
258 .err \n\
259 .endif \n\
260 \n\
261 ;; Save the registers we'll use in the movem process \n\
262 ;; on the stack. \n\
263 subq 11*4,$sp \n\
264 movem $r10,[$sp] \n\
265 \n\
266 ;; Now we've got this: \n\
267 ;; r11 - src \n\
268 ;; r13 - dst \n\
269 ;; r12 - n \n\
270 \n\
271 ;; Update n for the first loop \n\
272 subq 44,$r12 \n\
2730: \n\
274 movem [$r11+],$r10 \n\
2751: \n\
276 subq 44,$r12 \n\
277 bge 0b \n\
278 movem $r10,[$r13+] \n\
279 \n\
280 addq 44,$r12 ;; compensate for last loop underflowing n \n\
281 \n\
282 ;; Restore registers from stack \n\
283 movem [$sp+],$r10 \n\
2844: \n\
285 .section .fixup,\"ax\" \n\
286 \n\
287;; Do not jump back into the loop if we fail. For some uses, we get a \n\
288;; page fault somewhere on the line. Without checking for page limits, \n\
289;; we don't know where, but we need to copy accurately and keep an \n\
290;; accurate count; not just clear the whole line. To do that, we fall \n\
291;; down in the code below, proceeding with smaller amounts. It should \n\
292;; be kept in mind that we have to cater to code like what at one time \n\
293;; was in fs/super.c: \n\
294;; i = size - copy_from_user((void *)page, data, size); \n\
295;; which would cause repeated faults while clearing the remainder of \n\
296;; the SIZE bytes at PAGE after the first fault. \n\
297;; A caveat here is that we must not fall through from a failing page \n\
298;; to a valid page. \n\
299 \n\
3003: \n\
301 movem [$sp+],$r10 \n\
302 addq 44,$r12 ;; Get back count before faulting point. \n\
303 subq 44,$r11 ;; Get back pointer to faulting movem-line. \n\
304 jump 4b ;; Fall through, pretending the fault didn't happen.\n\
305 \n\
306 .previous \n\
307 .section __ex_table,\"a\" \n\
308 .dword 1b,3b \n\
309 .previous"
310
311 : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
312 : "0" (dst), "1" (src), "2" (n), "3" (retn));
313
314 }
315
316
317
318
319
320
321
322
323
324
325 while (n >= 4)
326 {
327 __asm_copy_from_user_4 (dst, src, retn);
328 n -= 4;
329
330 if (retn)
331 goto copy_exception_bytes;
332 }
333
334
335 switch (n)
336 {
337
338
339
340 case 0:
341
342
343 break;
344 case 1:
345 __asm_copy_from_user_1 (dst, src, retn);
346 break;
347 case 2:
348 __asm_copy_from_user_2 (dst, src, retn);
349 break;
350 case 3:
351 __asm_copy_from_user_3 (dst, src, retn);
352 break;
353 }
354
355
356
357 return retn;
358
359copy_exception_bytes:
360
361
362
363
364 {
365 char *endp;
366 for (endp = dst + n; dst < endp; dst++)
367 *dst = 0;
368 }
369
370 return retn + n;
371}
372EXPORT_SYMBOL(__copy_user_zeroing);
373
374
375unsigned long __do_clear_user(void __user *pto, unsigned long pn)
376{
377
378
379
380
381
382
383
384
385 register char *dst __asm__ ("r13") = pto;
386 register int n __asm__ ("r12") = pn;
387 register int retn __asm__ ("r10") = 0;
388
389
390 if (((unsigned long) dst & 3) != 0
391
392 && n >= 3)
393 {
394 if ((unsigned long) dst & 1)
395 {
396 __asm_clear_1 (dst, retn);
397 n--;
398 }
399
400 if ((unsigned long) dst & 2)
401 {
402 __asm_clear_2 (dst, retn);
403 n -= 2;
404 }
405 }
406
407
408
409 if (n >= (1*48))
410 {
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427 __asm__ volatile ("\n\
428 .ifnc %0%1%2,$r13$r12$r10 \n\
429 .err \n\
430 .endif \n\
431 \n\
432 ;; Save the registers we'll clobber in the movem process \n\
433 ;; on the stack. Don't mention them to gcc, it will only be \n\
434 ;; upset. \n\
435 subq 11*4,$sp \n\
436 movem $r10,[$sp] \n\
437 \n\
438 clear.d $r0 \n\
439 clear.d $r1 \n\
440 clear.d $r2 \n\
441 clear.d $r3 \n\
442 clear.d $r4 \n\
443 clear.d $r5 \n\
444 clear.d $r6 \n\
445 clear.d $r7 \n\
446 clear.d $r8 \n\
447 clear.d $r9 \n\
448 clear.d $r10 \n\
449 clear.d $r11 \n\
450 \n\
451 ;; Now we've got this: \n\
452 ;; r13 - dst \n\
453 ;; r12 - n \n\
454 \n\
455 ;; Update n for the first loop \n\
456 subq 12*4,$r12 \n\
4570: \n\
458 subq 12*4,$r12 \n\
459 bge 0b \n\
460 movem $r11,[$r13+] \n\
4611: \n\
462 addq 12*4,$r12 ;; compensate for last loop underflowing n\n\
463 \n\
464 ;; Restore registers from stack \n\
465 movem [$sp+],$r10 \n\
4662: \n\
467 .section .fixup,\"ax\" \n\
4683: \n\
469 move.d [$sp],$r10 \n\
470 addq 12*4,$r10 \n\
471 move.d $r10,[$sp] \n\
472 clear.d $r10 \n\
473 jump 0b \n\
474 \n\
4754: \n\
476 movem [$sp+],$r10 \n\
477 addq 12*4,$r10 \n\
478 addq 12*4,$r12 \n\
479 jump 2b \n\
480 \n\
481 .previous \n\
482 .section __ex_table,\"a\" \n\
483 .dword 0b,3b \n\
484 .dword 1b,4b \n\
485 .previous"
486
487 : "=r" (dst), "=r" (n), "=r" (retn)
488 : "0" (dst), "1" (n), "2" (retn)
489 : "r11");
490 }
491
492 while (n >= 16)
493 {
494 __asm_clear_16 (dst, retn);
495 n -= 16;
496 }
497
498
499
500 while (n >= 4)
501 {
502 __asm_clear_4 (dst, retn);
503 n -= 4;
504 }
505
506 switch (n)
507 {
508 case 0:
509 break;
510 case 1:
511 __asm_clear_1 (dst, retn);
512 break;
513 case 2:
514 __asm_clear_2 (dst, retn);
515 break;
516 case 3:
517 __asm_clear_3 (dst, retn);
518 break;
519 }
520
521 return retn;
522}
523EXPORT_SYMBOL(__do_clear_user);
524