1#ifndef __SPARC64_CHECKSUM_H
2#define __SPARC64_CHECKSUM_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/in6.h>
19#include <asm/uaccess.h>
20
21
22
23
24
25
26
27
28
29
30
31
32__wsum csum_partial(const void * buff, int len, __wsum sum);
33
34
35
36
37
38
39
40__wsum csum_partial_copy_nocheck(const void *src, void *dst,
41 int len, __wsum sum);
42
43long __csum_partial_copy_from_user(const void __user *src,
44 void *dst, int len,
45 __wsum sum);
46
47static inline __wsum
48csum_partial_copy_from_user(const void __user *src,
49 void *dst, int len,
50 __wsum sum, int *err)
51{
52 long ret = __csum_partial_copy_from_user(src, dst, len, sum);
53 if (ret < 0)
54 *err = -EFAULT;
55 return (__force __wsum) ret;
56}
57
58
59
60
61#define HAVE_CSUM_COPY_USER
62long __csum_partial_copy_to_user(const void *src,
63 void __user *dst, int len,
64 __wsum sum);
65
66static inline __wsum
67csum_and_copy_to_user(const void *src,
68 void __user *dst, int len,
69 __wsum sum, int *err)
70{
71 long ret = __csum_partial_copy_to_user(src, dst, len, sum);
72 if (ret < 0)
73 *err = -EFAULT;
74 return (__force __wsum) ret;
75}
76
77
78
79
80__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
81
82
83static inline __sum16 csum_fold(__wsum sum)
84{
85 unsigned int tmp;
86
87 __asm__ __volatile__(
88" addcc %0, %1, %1\n"
89" srl %1, 16, %1\n"
90" addc %1, %%g0, %1\n"
91" xnor %%g0, %1, %0\n"
92 : "=&r" (sum), "=r" (tmp)
93 : "0" (sum), "1" ((__force u32)sum<<16)
94 : "cc");
95 return (__force __sum16)sum;
96}
97
98static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
99 __u32 len, __u8 proto,
100 __wsum sum)
101{
102 __asm__ __volatile__(
103" addcc %1, %0, %0\n"
104" addccc %2, %0, %0\n"
105" addccc %3, %0, %0\n"
106" addc %0, %%g0, %0\n"
107 : "=r" (sum), "=r" (saddr)
108 : "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
109 : "cc");
110 return sum;
111}
112
113
114
115
116
117static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
118 __u32 len, __u8 proto,
119 __wsum sum)
120{
121 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
122}
123
124#define _HAVE_ARCH_IPV6_CSUM
125
126static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
127 const struct in6_addr *daddr,
128 __u32 len, __u8 proto, __wsum sum)
129{
130 __asm__ __volatile__ (
131" addcc %3, %4, %%g7\n"
132" addccc %5, %%g7, %%g7\n"
133" lduw [%2 + 0x0c], %%g2\n"
134" lduw [%2 + 0x08], %%g3\n"
135" addccc %%g2, %%g7, %%g7\n"
136" lduw [%2 + 0x04], %%g2\n"
137" addccc %%g3, %%g7, %%g7\n"
138" lduw [%2 + 0x00], %%g3\n"
139" addccc %%g2, %%g7, %%g7\n"
140" lduw [%1 + 0x0c], %%g2\n"
141" addccc %%g3, %%g7, %%g7\n"
142" lduw [%1 + 0x08], %%g3\n"
143" addccc %%g2, %%g7, %%g7\n"
144" lduw [%1 + 0x04], %%g2\n"
145" addccc %%g3, %%g7, %%g7\n"
146" lduw [%1 + 0x00], %%g3\n"
147" addccc %%g2, %%g7, %%g7\n"
148" addccc %%g3, %%g7, %0\n"
149" addc 0, %0, %0\n"
150 : "=&r" (sum)
151 : "r" (saddr), "r" (daddr), "r"(htonl(len)),
152 "r"(htonl(proto)), "r"(sum)
153 : "g2", "g3", "g7", "cc");
154
155 return csum_fold(sum);
156}
157
158
159static inline __sum16 ip_compute_csum(const void *buff, int len)
160{
161 return csum_fold(csum_partial(buff, len, 0));
162}
163
164#define HAVE_ARCH_CSUM_ADD
165static inline __wsum csum_add(__wsum csum, __wsum addend)
166{
167 __asm__ __volatile__(
168 "addcc %0, %1, %0\n"
169 "addx %0, %%g0, %0"
170 : "=r" (csum)
171 : "r" (addend), "0" (csum));
172
173 return csum;
174}
175
176#endif
177