1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/dccp.h>
14#include <linux/skbuff.h>
15
16#include "dccp.h"
17
18
19int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES;
20int sysctl_dccp_retries1 __read_mostly = TCP_RETR1;
21int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
22
23static void dccp_write_err(struct sock *sk)
24{
25 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
26 sk->sk_error_report(sk);
27
28 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
29 dccp_done(sk);
30 DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
31}
32
33
34static int dccp_write_timeout(struct sock *sk)
35{
36 const struct inet_connection_sock *icsk = inet_csk(sk);
37 int retry_until;
38
39 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
40 if (icsk->icsk_retransmits != 0)
41 dst_negative_advice(&sk->sk_dst_cache);
42 retry_until = icsk->icsk_syn_retries ?
43 : sysctl_dccp_request_retries;
44 } else {
45 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66 dst_negative_advice(&sk->sk_dst_cache);
67 }
68
69 retry_until = sysctl_dccp_retries2;
70
71
72
73 }
74
75 if (icsk->icsk_retransmits >= retry_until) {
76
77 dccp_write_err(sk);
78 return 1;
79 }
80 return 0;
81}
82
83
84
85
86static void dccp_retransmit_timer(struct sock *sk)
87{
88 struct inet_connection_sock *icsk = inet_csk(sk);
89
90
91
92
93
94 if (sk->sk_send_head == NULL) {
95 dccp_pr_debug("feat negotiation retransmit timeout %p\n", sk);
96 if (sk->sk_state == DCCP_OPEN)
97 dccp_send_ack(sk);
98 goto backoff;
99 }
100
101
102
103
104
105
106
107
108
109 BUG_TRAP(sk->sk_send_head != NULL);
110
111
112
113
114
115 if (dccp_write_timeout(sk))
116 goto out;
117
118
119
120
121
122 if (icsk->icsk_retransmits == 0)
123 DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
124
125 if (dccp_retransmit_skb(sk, sk->sk_send_head) < 0) {
126
127
128
129
130 if (icsk->icsk_retransmits == 0)
131 icsk->icsk_retransmits = 1;
132 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
133 min(icsk->icsk_rto,
134 TCP_RESOURCE_PROBE_INTERVAL),
135 DCCP_RTO_MAX);
136 goto out;
137 }
138
139backoff:
140 icsk->icsk_backoff++;
141 icsk->icsk_retransmits++;
142
143 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
144 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
145 DCCP_RTO_MAX);
146 if (icsk->icsk_retransmits > sysctl_dccp_retries1)
147 __sk_dst_reset(sk);
148out:;
149}
150
151static void dccp_write_timer(unsigned long data)
152{
153 struct sock *sk = (struct sock *)data;
154 struct inet_connection_sock *icsk = inet_csk(sk);
155 int event = 0;
156
157 bh_lock_sock(sk);
158 if (sock_owned_by_user(sk)) {
159
160 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
161 jiffies + (HZ / 20));
162 goto out;
163 }
164
165 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
166 goto out;
167
168 if (time_after(icsk->icsk_timeout, jiffies)) {
169 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
170 icsk->icsk_timeout);
171 goto out;
172 }
173
174 event = icsk->icsk_pending;
175 icsk->icsk_pending = 0;
176
177 switch (event) {
178 case ICSK_TIME_RETRANS:
179 dccp_retransmit_timer(sk);
180 break;
181 }
182out:
183 bh_unlock_sock(sk);
184 sock_put(sk);
185}
186
187
188
189
190static void dccp_response_timer(struct sock *sk)
191{
192 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
193 DCCP_RTO_MAX);
194}
195
196static void dccp_keepalive_timer(unsigned long data)
197{
198 struct sock *sk = (struct sock *)data;
199
200
201 bh_lock_sock(sk);
202 if (sock_owned_by_user(sk)) {
203
204 inet_csk_reset_keepalive_timer(sk, HZ / 20);
205 goto out;
206 }
207
208 if (sk->sk_state == DCCP_LISTEN) {
209 dccp_response_timer(sk);
210 goto out;
211 }
212out:
213 bh_unlock_sock(sk);
214 sock_put(sk);
215}
216
217
218static void dccp_delack_timer(unsigned long data)
219{
220 struct sock *sk = (struct sock *)data;
221 struct inet_connection_sock *icsk = inet_csk(sk);
222
223 bh_lock_sock(sk);
224 if (sock_owned_by_user(sk)) {
225
226 icsk->icsk_ack.blocked = 1;
227 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
228 sk_reset_timer(sk, &icsk->icsk_delack_timer,
229 jiffies + TCP_DELACK_MIN);
230 goto out;
231 }
232
233 if (sk->sk_state == DCCP_CLOSED ||
234 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
235 goto out;
236 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
237 sk_reset_timer(sk, &icsk->icsk_delack_timer,
238 icsk->icsk_ack.timeout);
239 goto out;
240 }
241
242 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
243
244 if (inet_csk_ack_scheduled(sk)) {
245 if (!icsk->icsk_ack.pingpong) {
246
247 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
248 icsk->icsk_rto);
249 } else {
250
251
252
253 icsk->icsk_ack.pingpong = 0;
254 icsk->icsk_ack.ato = TCP_ATO_MIN;
255 }
256 dccp_send_ack(sk);
257 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
258 }
259out:
260 bh_unlock_sock(sk);
261 sock_put(sk);
262}
263
264
265static void dccp_write_xmit_timer(unsigned long data)
266{
267 struct sock *sk = (struct sock *)data;
268 struct dccp_sock *dp = dccp_sk(sk);
269
270 bh_lock_sock(sk);
271 if (sock_owned_by_user(sk))
272 sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
273 else
274 dccp_write_xmit(sk, 0);
275 bh_unlock_sock(sk);
276 sock_put(sk);
277}
278
279static void dccp_init_write_xmit_timer(struct sock *sk)
280{
281 struct dccp_sock *dp = dccp_sk(sk);
282
283 init_timer(&dp->dccps_xmit_timer);
284 dp->dccps_xmit_timer.data = (unsigned long)sk;
285 dp->dccps_xmit_timer.function = dccp_write_xmit_timer;
286}
287
288void dccp_init_xmit_timers(struct sock *sk)
289{
290 dccp_init_write_xmit_timer(sk);
291 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
292 &dccp_keepalive_timer);
293}
294
295static ktime_t dccp_timestamp_seed;
296
297
298
299
300
301
302u32 dccp_timestamp(void)
303{
304 s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
305
306 do_div(delta, 10);
307 return delta;
308}
309EXPORT_SYMBOL_GPL(dccp_timestamp);
310
311void __init dccp_timestamping_init(void)
312{
313 dccp_timestamp_seed = ktime_get_real();
314}
315