1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _CHECKSUM_H
16#define _CHECKSUM_H
17
18#include <linux/errno.h>
19#include <asm/types.h>
20#include <asm/byteorder.h>
21#include <linux/uaccess.h>
22#include <asm/checksum.h>
23
24#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
25static __always_inline
26__wsum csum_and_copy_from_user (const void __user *src, void *dst,
27 int len)
28{
29 if (copy_from_user(dst, src, len))
30 return 0;
31 return csum_partial(dst, len, ~0U);
32}
33#endif
34
35#ifndef HAVE_CSUM_COPY_USER
36static __always_inline __wsum csum_and_copy_to_user
37(const void *src, void __user *dst, int len)
38{
39 __wsum sum = csum_partial(src, len, ~0U);
40
41 if (copy_to_user(dst, src, len) == 0)
42 return sum;
43 return 0;
44}
45#endif
46
47#ifndef _HAVE_ARCH_CSUM_AND_COPY
48static __always_inline __wsum
49csum_partial_copy_nocheck(const void *src, void *dst, int len)
50{
51 memcpy(dst, src, len);
52 return csum_partial(dst, len, 0);
53}
54#endif
55
56#ifndef HAVE_ARCH_CSUM_ADD
57static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
58{
59 u32 res = (__force u32)csum;
60 res += (__force u32)addend;
61 return (__force __wsum)(res + (res < (__force u32)addend));
62}
63#endif
64
65static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
66{
67 return csum_add(csum, ~addend);
68}
69
70static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
71{
72 u16 res = (__force u16)csum;
73
74 res += (__force u16)addend;
75 return (__force __sum16)(res + (res < (__force u16)addend));
76}
77
78static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
79{
80 return csum16_add(csum, ~addend);
81}
82
83static __always_inline __wsum csum_shift(__wsum sum, int offset)
84{
85
86 if (offset & 1)
87 return (__force __wsum)ror32((__force u32)sum, 8);
88 return sum;
89}
90
91static __always_inline __wsum
92csum_block_add(__wsum csum, __wsum csum2, int offset)
93{
94 return csum_add(csum, csum_shift(csum2, offset));
95}
96
97static __always_inline __wsum
98csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
99{
100 return csum_block_add(csum, csum2, offset);
101}
102
103static __always_inline __wsum
104csum_block_sub(__wsum csum, __wsum csum2, int offset)
105{
106 return csum_block_add(csum, ~csum2, offset);
107}
108
109static __always_inline __wsum csum_unfold(__sum16 n)
110{
111 return (__force __wsum)n;
112}
113
114static __always_inline
115__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
116{
117 return csum_partial(buff, len, sum);
118}
119
120#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
121
122static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
123{
124 *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
125}
126
127static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
128{
129 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
130
131 *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
132}
133
134
135
136
137
138
139
140static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
141{
142 *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
143}
144
145static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
146{
147 *csum = csum_add(csum_sub(*csum, old), new);
148}
149
150struct sk_buff;
151void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
152 __be32 from, __be32 to, bool pseudohdr);
153void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
154 const __be32 *from, const __be32 *to,
155 bool pseudohdr);
156void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
157 __wsum diff, bool pseudohdr);
158
159static __always_inline
160void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
161 __be16 from, __be16 to, bool pseudohdr)
162{
163 inet_proto_csum_replace4(sum, skb, (__force __be32)from,
164 (__force __be32)to, pseudohdr);
165}
166
167static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
168 int start, int offset)
169{
170 __sum16 *psum = (__sum16 *)(ptr + offset);
171 __wsum delta;
172
173
174 csum = csum_sub(csum, csum_partial(ptr, start, 0));
175
176
177 delta = csum_sub((__force __wsum)csum_fold(csum),
178 (__force __wsum)*psum);
179 *psum = csum_fold(csum);
180
181 return delta;
182}
183
184static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
185{
186 *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
187}
188
189static __always_inline __wsum wsum_negate(__wsum val)
190{
191 return (__force __wsum)-((__force u32)val);
192}
193#endif
194