1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/init.h>
15#include <linux/highmem.h>
16
17
18
19
20static void fa_copy_user_page(void *kto, const void *kfrom)
21{
22 int tmp;
23
24 asm volatile ("\
251: ldmia %1!, {r3, r4, ip, lr} @ 4\n\
26 stmia %0, {r3, r4, ip, lr} @ 4\n\
27 mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
28 add %0, %0, #16 @ 1\n\
29 ldmia %1!, {r3, r4, ip, lr} @ 4\n\
30 stmia %0, {r3, r4, ip, lr} @ 4\n\
31 mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
32 add %0, %0, #16 @ 1\n\
33 subs %2, %2, #1 @ 1\n\
34 bne 1b @ 1\n\
35 mcr p15, 0, %2, c7, c10, 4 @ 1 drain WB"
36 : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
37 : "2" (PAGE_SIZE / 32)
38 : "r3", "r4", "ip", "lr");
39}
40
41void fa_copy_user_highpage(struct page *to, struct page *from,
42 unsigned long vaddr, struct vm_area_struct *vma)
43{
44 void *kto, *kfrom;
45
46 kto = kmap_atomic(to);
47 kfrom = kmap_atomic(from);
48 fa_copy_user_page(kto, kfrom);
49 kunmap_atomic(kfrom);
50 kunmap_atomic(kto);
51}
52
53
54
55
56
57
58void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
59{
60 void *ptr, *kaddr = kmap_atomic(page);
61 asm volatile("\
62 mov r1, %2 @ 1\n\
63 mov r2, #0 @ 1\n\
64 mov r3, #0 @ 1\n\
65 mov ip, #0 @ 1\n\
66 mov lr, #0 @ 1\n\
671: stmia %0, {r2, r3, ip, lr} @ 4\n\
68 mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
69 add %0, %0, #16 @ 1\n\
70 stmia %0, {r2, r3, ip, lr} @ 4\n\
71 mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
72 add %0, %0, #16 @ 1\n\
73 subs r1, r1, #1 @ 1\n\
74 bne 1b @ 1\n\
75 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB"
76 : "=r" (ptr)
77 : "0" (kaddr), "I" (PAGE_SIZE / 32)
78 : "r1", "r2", "r3", "ip", "lr");
79 kunmap_atomic(kaddr);
80}
81
82struct cpu_user_fns fa_user_fns __initdata = {
83 .cpu_clear_user_highpage = fa_clear_user_highpage,
84 .cpu_copy_user_highpage = fa_copy_user_highpage,
85};
86