1
2#ifndef __SPARC_CHECKSUM_H
3#define __SPARC_CHECKSUM_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/in6.h>
20#include <linux/uaccess.h>
21
22
23
24
25
26
27
28
29
30
31
32
33__wsum csum_partial(const void *buff, int len, __wsum sum);
34
35
36
37
38
39
40
41
42unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
43
44static inline __wsum
45csum_partial_copy_nocheck(const void *src, void *dst, int len)
46{
47 register unsigned int ret asm("o0") = (unsigned int)src;
48 register char *d asm("o1") = dst;
49 register int l asm("g1") = len;
50
51 __asm__ __volatile__ (
52 "call __csum_partial_copy_sparc_generic\n\t"
53 " mov -1, %%g7\n"
54 : "=&r" (ret), "=&r" (d), "=&r" (l)
55 : "0" (ret), "1" (d), "2" (l)
56 : "o2", "o3", "o4", "o5", "o7",
57 "g2", "g3", "g4", "g5", "g7",
58 "memory", "cc");
59 return (__force __wsum)ret;
60}
61
62static inline __wsum
63csum_and_copy_from_user(const void __user *src, void *dst, int len)
64{
65 if (unlikely(!access_ok(src, len)))
66 return 0;
67 return csum_partial_copy_nocheck((__force void *)src, dst, len);
68}
69
70static inline __wsum
71csum_and_copy_to_user(const void *src, void __user *dst, int len)
72{
73 if (!access_ok(dst, len))
74 return 0;
75 return csum_partial_copy_nocheck(src, (__force void *)dst, len);
76}
77
78
79
80
81static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
82{
83 __sum16 sum;
84
85
86
87
88
89 __asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
90 "ld\t[%1 + 0x00], %0\n\t"
91 "ld\t[%1 + 0x04], %%g2\n\t"
92 "ld\t[%1 + 0x08], %%g3\n\t"
93 "addcc\t%%g2, %0, %0\n\t"
94 "addxcc\t%%g3, %0, %0\n\t"
95 "ld\t[%1 + 0x0c], %%g2\n\t"
96 "ld\t[%1 + 0x10], %%g3\n\t"
97 "addxcc\t%%g2, %0, %0\n\t"
98 "addx\t%0, %%g0, %0\n"
99 "1:\taddcc\t%%g3, %0, %0\n\t"
100 "add\t%1, 4, %1\n\t"
101 "addxcc\t%0, %%g0, %0\n\t"
102 "subcc\t%%g4, 1, %%g4\n\t"
103 "be,a\t2f\n\t"
104 "sll\t%0, 16, %%g2\n\t"
105 "b\t1b\n\t"
106 "ld\t[%1 + 0x10], %%g3\n"
107 "2:\taddcc\t%0, %%g2, %%g2\n\t"
108 "srl\t%%g2, 16, %0\n\t"
109 "addx\t%0, %%g0, %0\n\t"
110 "xnor\t%%g0, %0, %0"
111 : "=r" (sum), "=&r" (iph)
112 : "r" (ihl), "1" (iph)
113 : "g2", "g3", "g4", "cc", "memory");
114 return sum;
115}
116
117
118static inline __sum16 csum_fold(__wsum sum)
119{
120 unsigned int tmp;
121
122 __asm__ __volatile__("addcc\t%0, %1, %1\n\t"
123 "srl\t%1, 16, %1\n\t"
124 "addx\t%1, %%g0, %1\n\t"
125 "xnor\t%%g0, %1, %0"
126 : "=&r" (sum), "=r" (tmp)
127 : "0" (sum), "1" ((__force u32)sum<<16)
128 : "cc");
129 return (__force __sum16)sum;
130}
131
132static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
133 __u32 len, __u8 proto,
134 __wsum sum)
135{
136 __asm__ __volatile__("addcc\t%1, %0, %0\n\t"
137 "addxcc\t%2, %0, %0\n\t"
138 "addxcc\t%3, %0, %0\n\t"
139 "addx\t%0, %%g0, %0\n\t"
140 : "=r" (sum), "=r" (saddr)
141 : "r" (daddr), "r" (proto + len), "0" (sum),
142 "1" (saddr)
143 : "cc");
144 return sum;
145}
146
147
148
149
150
151static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
152 __u32 len, __u8 proto,
153 __wsum sum)
154{
155 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
156}
157
158#define _HAVE_ARCH_IPV6_CSUM
159
160static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
161 const struct in6_addr *daddr,
162 __u32 len, __u8 proto, __wsum sum)
163{
164 __asm__ __volatile__ (
165 "addcc %3, %4, %%g4\n\t"
166 "addxcc %5, %%g4, %%g4\n\t"
167 "ld [%2 + 0x0c], %%g2\n\t"
168 "ld [%2 + 0x08], %%g3\n\t"
169 "addxcc %%g2, %%g4, %%g4\n\t"
170 "ld [%2 + 0x04], %%g2\n\t"
171 "addxcc %%g3, %%g4, %%g4\n\t"
172 "ld [%2 + 0x00], %%g3\n\t"
173 "addxcc %%g2, %%g4, %%g4\n\t"
174 "ld [%1 + 0x0c], %%g2\n\t"
175 "addxcc %%g3, %%g4, %%g4\n\t"
176 "ld [%1 + 0x08], %%g3\n\t"
177 "addxcc %%g2, %%g4, %%g4\n\t"
178 "ld [%1 + 0x04], %%g2\n\t"
179 "addxcc %%g3, %%g4, %%g4\n\t"
180 "ld [%1 + 0x00], %%g3\n\t"
181 "addxcc %%g2, %%g4, %%g4\n\t"
182 "addxcc %%g3, %%g4, %0\n\t"
183 "addx 0, %0, %0\n"
184 : "=&r" (sum)
185 : "r" (saddr), "r" (daddr),
186 "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
187 : "g2", "g3", "g4", "cc");
188
189 return csum_fold(sum);
190}
191
192
193static inline __sum16 ip_compute_csum(const void *buff, int len)
194{
195 return csum_fold(csum_partial(buff, len, 0));
196}
197
198#define HAVE_ARCH_CSUM_ADD
199static inline __wsum csum_add(__wsum csum, __wsum addend)
200{
201 __asm__ __volatile__(
202 "addcc %0, %1, %0\n"
203 "addx %0, %%g0, %0"
204 : "=r" (csum)
205 : "r" (addend), "0" (csum));
206
207 return csum;
208}
209
210#endif
211