1#ifndef __ASM_SH_CHECKSUM_H
2#define __ASM_SH_CHECKSUM_H
3
4
5
6
7
8
9
10
11
12#include <linux/in6.h>
13
14
15
16
17
18
19
20
21
22
23
24
25
26asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
27
28
29
30
31
32
33
34
35
36asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
37 int len, __wsum sum,
38 int *src_err_ptr, int *dst_err_ptr);
39
40
41
42
43
44
45
46
47static inline
48__wsum csum_partial_copy_nocheck(const void *src, void *dst,
49 int len, __wsum sum)
50{
51 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
52}
53
54static inline
55__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
56 int len, __wsum sum, int *err_ptr)
57{
58 return csum_partial_copy_generic((__force const void *)src, dst,
59 len, sum, err_ptr, NULL);
60}
61
62
63
64
65
66static inline __sum16 csum_fold(__wsum sum)
67{
68 unsigned int __dummy;
69 __asm__("swap.w %0, %1\n\t"
70 "extu.w %0, %0\n\t"
71 "extu.w %1, %1\n\t"
72 "add %1, %0\n\t"
73 "swap.w %0, %1\n\t"
74 "add %1, %0\n\t"
75 "not %0, %0\n\t"
76 : "=r" (sum), "=&r" (__dummy)
77 : "0" (sum)
78 : "t");
79 return (__force __sum16)sum;
80}
81
82
83
84
85
86
87
88
89static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
90{
91 unsigned int sum, __dummy0, __dummy1;
92
93 __asm__ __volatile__(
94 "mov.l @%1+, %0\n\t"
95 "mov.l @%1+, %3\n\t"
96 "add #-2, %2\n\t"
97 "clrt\n\t"
98 "1:\t"
99 "addc %3, %0\n\t"
100 "movt %4\n\t"
101 "mov.l @%1+, %3\n\t"
102 "dt %2\n\t"
103 "bf/s 1b\n\t"
104 " cmp/eq #1, %4\n\t"
105 "addc %3, %0\n\t"
106 "addc %2, %0"
107
108
109
110 : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
111 : "1" (iph), "2" (ihl)
112 : "t", "memory");
113
114 return csum_fold(sum);
115}
116
117static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
118 unsigned short len,
119 unsigned short proto,
120 __wsum sum)
121{
122#ifdef __LITTLE_ENDIAN__
123 unsigned long len_proto = (proto + len) << 8;
124#else
125 unsigned long len_proto = proto + len;
126#endif
127 __asm__("clrt\n\t"
128 "addc %0, %1\n\t"
129 "addc %2, %1\n\t"
130 "addc %3, %1\n\t"
131 "movt %0\n\t"
132 "add %1, %0"
133 : "=r" (sum), "=r" (len_proto)
134 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
135 : "t");
136
137 return sum;
138}
139
140
141
142
143
144static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
145 unsigned short len,
146 unsigned short proto,
147 __wsum sum)
148{
149 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
150}
151
152
153
154
155
156static inline __sum16 ip_compute_csum(const void *buff, int len)
157{
158 return csum_fold(csum_partial(buff, len, 0));
159}
160
161#define _HAVE_ARCH_IPV6_CSUM
162static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
163 const struct in6_addr *daddr,
164 __u32 len, unsigned short proto,
165 __wsum sum)
166{
167 unsigned int __dummy;
168 __asm__("clrt\n\t"
169 "mov.l @(0,%2), %1\n\t"
170 "addc %1, %0\n\t"
171 "mov.l @(4,%2), %1\n\t"
172 "addc %1, %0\n\t"
173 "mov.l @(8,%2), %1\n\t"
174 "addc %1, %0\n\t"
175 "mov.l @(12,%2), %1\n\t"
176 "addc %1, %0\n\t"
177 "mov.l @(0,%3), %1\n\t"
178 "addc %1, %0\n\t"
179 "mov.l @(4,%3), %1\n\t"
180 "addc %1, %0\n\t"
181 "mov.l @(8,%3), %1\n\t"
182 "addc %1, %0\n\t"
183 "mov.l @(12,%3), %1\n\t"
184 "addc %1, %0\n\t"
185 "addc %4, %0\n\t"
186 "addc %5, %0\n\t"
187 "movt %1\n\t"
188 "add %1, %0\n"
189 : "=r" (sum), "=&r" (__dummy)
190 : "r" (saddr), "r" (daddr),
191 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
192 : "t");
193
194 return csum_fold(sum);
195}
196
197
198
199
200#define HAVE_CSUM_COPY_USER
201static inline __wsum csum_and_copy_to_user(const void *src,
202 void __user *dst,
203 int len, __wsum sum,
204 int *err_ptr)
205{
206 if (access_ok(VERIFY_WRITE, dst, len))
207 return csum_partial_copy_generic((__force const void *)src,
208 dst, len, sum, NULL, err_ptr);
209
210 if (len)
211 *err_ptr = -EFAULT;
212
213 return (__force __wsum)-1;
214}
215#endif
216