1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/dccp.h>
14#include <linux/skbuff.h>
15
16#include "dccp.h"
17
18
19int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES;
20int sysctl_dccp_retries1 __read_mostly = TCP_RETR1;
21int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
22
23static void dccp_write_err(struct sock *sk)
24{
25 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
26 sk->sk_error_report(sk);
27
28 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
29 dccp_done(sk);
30 DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
31}
32
33
34static int dccp_write_timeout(struct sock *sk)
35{
36 const struct inet_connection_sock *icsk = inet_csk(sk);
37 int retry_until;
38
39 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
40 if (icsk->icsk_retransmits != 0)
41 dst_negative_advice(&sk->sk_dst_cache);
42 retry_until = icsk->icsk_syn_retries ?
43 : sysctl_dccp_request_retries;
44 } else {
45 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66 dst_negative_advice(&sk->sk_dst_cache);
67 }
68
69 retry_until = sysctl_dccp_retries2;
70
71
72
73 }
74
75 if (icsk->icsk_retransmits >= retry_until) {
76
77 dccp_write_err(sk);
78 return 1;
79 }
80 return 0;
81}
82
83
84
85
86static void dccp_retransmit_timer(struct sock *sk)
87{
88 struct inet_connection_sock *icsk = inet_csk(sk);
89
90
91
92
93
94 if (dccp_write_timeout(sk))
95 return;
96
97
98
99
100
101 if (icsk->icsk_retransmits == 0)
102 DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
103
104 if (dccp_retransmit_skb(sk) != 0) {
105
106
107
108
109 if (--icsk->icsk_retransmits == 0)
110 icsk->icsk_retransmits = 1;
111 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
112 min(icsk->icsk_rto,
113 TCP_RESOURCE_PROBE_INTERVAL),
114 DCCP_RTO_MAX);
115 return;
116 }
117
118 icsk->icsk_backoff++;
119
120 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
121 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
122 DCCP_RTO_MAX);
123 if (icsk->icsk_retransmits > sysctl_dccp_retries1)
124 __sk_dst_reset(sk);
125}
126
127static void dccp_write_timer(unsigned long data)
128{
129 struct sock *sk = (struct sock *)data;
130 struct inet_connection_sock *icsk = inet_csk(sk);
131 int event = 0;
132
133 bh_lock_sock(sk);
134 if (sock_owned_by_user(sk)) {
135
136 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
137 jiffies + (HZ / 20));
138 goto out;
139 }
140
141 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
142 goto out;
143
144 if (time_after(icsk->icsk_timeout, jiffies)) {
145 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
146 icsk->icsk_timeout);
147 goto out;
148 }
149
150 event = icsk->icsk_pending;
151 icsk->icsk_pending = 0;
152
153 switch (event) {
154 case ICSK_TIME_RETRANS:
155 dccp_retransmit_timer(sk);
156 break;
157 }
158out:
159 bh_unlock_sock(sk);
160 sock_put(sk);
161}
162
163
164
165
166static void dccp_response_timer(struct sock *sk)
167{
168 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
169 DCCP_RTO_MAX);
170}
171
172static void dccp_keepalive_timer(unsigned long data)
173{
174 struct sock *sk = (struct sock *)data;
175
176
177 bh_lock_sock(sk);
178 if (sock_owned_by_user(sk)) {
179
180 inet_csk_reset_keepalive_timer(sk, HZ / 20);
181 goto out;
182 }
183
184 if (sk->sk_state == DCCP_LISTEN) {
185 dccp_response_timer(sk);
186 goto out;
187 }
188out:
189 bh_unlock_sock(sk);
190 sock_put(sk);
191}
192
193
194static void dccp_delack_timer(unsigned long data)
195{
196 struct sock *sk = (struct sock *)data;
197 struct inet_connection_sock *icsk = inet_csk(sk);
198
199 bh_lock_sock(sk);
200 if (sock_owned_by_user(sk)) {
201
202 icsk->icsk_ack.blocked = 1;
203 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
204 sk_reset_timer(sk, &icsk->icsk_delack_timer,
205 jiffies + TCP_DELACK_MIN);
206 goto out;
207 }
208
209 if (sk->sk_state == DCCP_CLOSED ||
210 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
211 goto out;
212 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
213 sk_reset_timer(sk, &icsk->icsk_delack_timer,
214 icsk->icsk_ack.timeout);
215 goto out;
216 }
217
218 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
219
220 if (inet_csk_ack_scheduled(sk)) {
221 if (!icsk->icsk_ack.pingpong) {
222
223 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
224 icsk->icsk_rto);
225 } else {
226
227
228
229 icsk->icsk_ack.pingpong = 0;
230 icsk->icsk_ack.ato = TCP_ATO_MIN;
231 }
232 dccp_send_ack(sk);
233 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
234 }
235out:
236 bh_unlock_sock(sk);
237 sock_put(sk);
238}
239
240
241static void dccp_write_xmit_timer(unsigned long data)
242{
243 struct sock *sk = (struct sock *)data;
244 struct dccp_sock *dp = dccp_sk(sk);
245
246 bh_lock_sock(sk);
247 if (sock_owned_by_user(sk))
248 sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
249 else
250 dccp_write_xmit(sk, 0);
251 bh_unlock_sock(sk);
252 sock_put(sk);
253}
254
255static void dccp_init_write_xmit_timer(struct sock *sk)
256{
257 struct dccp_sock *dp = dccp_sk(sk);
258
259 setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
260 (unsigned long)sk);
261}
262
263void dccp_init_xmit_timers(struct sock *sk)
264{
265 dccp_init_write_xmit_timer(sk);
266 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
267 &dccp_keepalive_timer);
268}
269
270static ktime_t dccp_timestamp_seed;
271
272
273
274
275
276
277u32 dccp_timestamp(void)
278{
279 s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
280
281 do_div(delta, 10);
282 return delta;
283}
284EXPORT_SYMBOL_GPL(dccp_timestamp);
285
286void __init dccp_timestamping_init(void)
287{
288 dccp_timestamp_seed = ktime_get_real();
289}
290