1
2#ifndef __ASM_SH_CHECKSUM_H
3#define __ASM_SH_CHECKSUM_H
4
5
6
7
8
9#include <linux/in6.h>
10
11
12
13
14
15
16
17
18
19
20
21
22
23asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
24
25
26
27
28
29
30
31
32
33asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
34 int len, __wsum sum,
35 int *src_err_ptr, int *dst_err_ptr);
36
37
38
39
40
41
42
43
44static inline
45__wsum csum_partial_copy_nocheck(const void *src, void *dst,
46 int len, __wsum sum)
47{
48 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
49}
50
51static inline
52__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
53 int len, __wsum sum, int *err_ptr)
54{
55 return csum_partial_copy_generic((__force const void *)src, dst,
56 len, sum, err_ptr, NULL);
57}
58
59
60
61
62
63static inline __sum16 csum_fold(__wsum sum)
64{
65 unsigned int __dummy;
66 __asm__("swap.w %0, %1\n\t"
67 "extu.w %0, %0\n\t"
68 "extu.w %1, %1\n\t"
69 "add %1, %0\n\t"
70 "swap.w %0, %1\n\t"
71 "add %1, %0\n\t"
72 "not %0, %0\n\t"
73 : "=r" (sum), "=&r" (__dummy)
74 : "0" (sum)
75 : "t");
76 return (__force __sum16)sum;
77}
78
79
80
81
82
83
84
85
86static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
87{
88 unsigned int sum, __dummy0, __dummy1;
89
90 __asm__ __volatile__(
91 "mov.l @%1+, %0\n\t"
92 "mov.l @%1+, %3\n\t"
93 "add #-2, %2\n\t"
94 "clrt\n\t"
95 "1:\t"
96 "addc %3, %0\n\t"
97 "movt %4\n\t"
98 "mov.l @%1+, %3\n\t"
99 "dt %2\n\t"
100 "bf/s 1b\n\t"
101 " cmp/eq #1, %4\n\t"
102 "addc %3, %0\n\t"
103 "addc %2, %0"
104
105
106
107 : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
108 : "1" (iph), "2" (ihl)
109 : "t", "memory");
110
111 return csum_fold(sum);
112}
113
114static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
115 __u32 len, __u8 proto,
116 __wsum sum)
117{
118#ifdef __LITTLE_ENDIAN__
119 unsigned long len_proto = (proto + len) << 8;
120#else
121 unsigned long len_proto = proto + len;
122#endif
123 __asm__("clrt\n\t"
124 "addc %0, %1\n\t"
125 "addc %2, %1\n\t"
126 "addc %3, %1\n\t"
127 "movt %0\n\t"
128 "add %1, %0"
129 : "=r" (sum), "=r" (len_proto)
130 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
131 : "t");
132
133 return sum;
134}
135
136
137
138
139
140static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
141 __u32 len, __u8 proto,
142 __wsum sum)
143{
144 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
145}
146
147
148
149
150
151static inline __sum16 ip_compute_csum(const void *buff, int len)
152{
153 return csum_fold(csum_partial(buff, len, 0));
154}
155
156#define _HAVE_ARCH_IPV6_CSUM
157static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
158 const struct in6_addr *daddr,
159 __u32 len, __u8 proto, __wsum sum)
160{
161 unsigned int __dummy;
162 __asm__("clrt\n\t"
163 "mov.l @(0,%2), %1\n\t"
164 "addc %1, %0\n\t"
165 "mov.l @(4,%2), %1\n\t"
166 "addc %1, %0\n\t"
167 "mov.l @(8,%2), %1\n\t"
168 "addc %1, %0\n\t"
169 "mov.l @(12,%2), %1\n\t"
170 "addc %1, %0\n\t"
171 "mov.l @(0,%3), %1\n\t"
172 "addc %1, %0\n\t"
173 "mov.l @(4,%3), %1\n\t"
174 "addc %1, %0\n\t"
175 "mov.l @(8,%3), %1\n\t"
176 "addc %1, %0\n\t"
177 "mov.l @(12,%3), %1\n\t"
178 "addc %1, %0\n\t"
179 "addc %4, %0\n\t"
180 "addc %5, %0\n\t"
181 "movt %1\n\t"
182 "add %1, %0\n"
183 : "=r" (sum), "=&r" (__dummy)
184 : "r" (saddr), "r" (daddr),
185 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
186 : "t");
187
188 return csum_fold(sum);
189}
190
191
192
193
194#define HAVE_CSUM_COPY_USER
195static inline __wsum csum_and_copy_to_user(const void *src,
196 void __user *dst,
197 int len, __wsum sum,
198 int *err_ptr)
199{
200 if (access_ok(dst, len))
201 return csum_partial_copy_generic((__force const void *)src,
202 dst, len, sum, NULL, err_ptr);
203
204 if (len)
205 *err_ptr = -EFAULT;
206
207 return (__force __wsum)-1;
208}
209#endif
210