1
2#ifndef __ASM_SH_CHECKSUM_H
3#define __ASM_SH_CHECKSUM_H
4
5
6
7
8
9#include <linux/in6.h>
10
11
12
13
14
15
16
17
18
19
20
21
22
23asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
24
25
26
27
28
29
30
31
32
33asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
34
35#define _HAVE_ARCH_CSUM_AND_COPY
36
37
38
39
40
41
42
43static inline
44__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
45{
46 return csum_partial_copy_generic(src, dst, len);
47}
48
49#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
50static inline
51__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
52{
53 if (!access_ok(src, len))
54 return 0;
55 return csum_partial_copy_generic((__force const void *)src, dst, len);
56}
57
58
59
60
61
62static inline __sum16 csum_fold(__wsum sum)
63{
64 unsigned int __dummy;
65 __asm__("swap.w %0, %1\n\t"
66 "extu.w %0, %0\n\t"
67 "extu.w %1, %1\n\t"
68 "add %1, %0\n\t"
69 "swap.w %0, %1\n\t"
70 "add %1, %0\n\t"
71 "not %0, %0\n\t"
72 : "=r" (sum), "=&r" (__dummy)
73 : "0" (sum)
74 : "t");
75 return (__force __sum16)sum;
76}
77
78
79
80
81
82
83
84
85static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
86{
87 __wsum sum;
88 unsigned int __dummy0, __dummy1;
89
90 __asm__ __volatile__(
91 "mov.l @%1+, %0\n\t"
92 "mov.l @%1+, %3\n\t"
93 "add #-2, %2\n\t"
94 "clrt\n\t"
95 "1:\t"
96 "addc %3, %0\n\t"
97 "movt %4\n\t"
98 "mov.l @%1+, %3\n\t"
99 "dt %2\n\t"
100 "bf/s 1b\n\t"
101 " cmp/eq #1, %4\n\t"
102 "addc %3, %0\n\t"
103 "addc %2, %0"
104
105
106
107 : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
108 : "1" (iph), "2" (ihl)
109 : "t", "memory");
110
111 return csum_fold(sum);
112}
113
114static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
115 __u32 len, __u8 proto,
116 __wsum sum)
117{
118#ifdef __LITTLE_ENDIAN__
119 unsigned long len_proto = (proto + len) << 8;
120#else
121 unsigned long len_proto = proto + len;
122#endif
123 __asm__("clrt\n\t"
124 "addc %0, %1\n\t"
125 "addc %2, %1\n\t"
126 "addc %3, %1\n\t"
127 "movt %0\n\t"
128 "add %1, %0"
129 : "=r" (sum), "=r" (len_proto)
130 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
131 : "t");
132
133 return sum;
134}
135
136
137
138
139
140static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
141 __u32 len, __u8 proto,
142 __wsum sum)
143{
144 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
145}
146
147
148
149
150
151static inline __sum16 ip_compute_csum(const void *buff, int len)
152{
153 return csum_fold(csum_partial(buff, len, 0));
154}
155
156#define _HAVE_ARCH_IPV6_CSUM
157static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
158 const struct in6_addr *daddr,
159 __u32 len, __u8 proto, __wsum sum)
160{
161 unsigned int __dummy;
162 __asm__("clrt\n\t"
163 "mov.l @(0,%2), %1\n\t"
164 "addc %1, %0\n\t"
165 "mov.l @(4,%2), %1\n\t"
166 "addc %1, %0\n\t"
167 "mov.l @(8,%2), %1\n\t"
168 "addc %1, %0\n\t"
169 "mov.l @(12,%2), %1\n\t"
170 "addc %1, %0\n\t"
171 "mov.l @(0,%3), %1\n\t"
172 "addc %1, %0\n\t"
173 "mov.l @(4,%3), %1\n\t"
174 "addc %1, %0\n\t"
175 "mov.l @(8,%3), %1\n\t"
176 "addc %1, %0\n\t"
177 "mov.l @(12,%3), %1\n\t"
178 "addc %1, %0\n\t"
179 "addc %4, %0\n\t"
180 "addc %5, %0\n\t"
181 "movt %1\n\t"
182 "add %1, %0\n"
183 : "=r" (sum), "=&r" (__dummy)
184 : "r" (saddr), "r" (daddr),
185 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
186 : "t");
187
188 return csum_fold(sum);
189}
190
191
192
193
194#define HAVE_CSUM_COPY_USER
195static inline __wsum csum_and_copy_to_user(const void *src,
196 void __user *dst,
197 int len)
198{
199 if (!access_ok(dst, len))
200 return 0;
201 return csum_partial_copy_generic(src, (__force void *)dst, len);
202}
203#endif
204