1
2
3
4
5
6
7
8
9#include <linux/dccp.h>
10#include <linux/skbuff.h>
11#include <linux/export.h>
12
13#include "dccp.h"
14
15
16int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES;
17int sysctl_dccp_retries1 __read_mostly = TCP_RETR1;
18int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
19
20static void dccp_write_err(struct sock *sk)
21{
22 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
23 sk_error_report(sk);
24
25 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
26 dccp_done(sk);
27 __DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT);
28}
29
30
31static int dccp_write_timeout(struct sock *sk)
32{
33 const struct inet_connection_sock *icsk = inet_csk(sk);
34 int retry_until;
35
36 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
37 if (icsk->icsk_retransmits != 0)
38 dst_negative_advice(sk);
39 retry_until = icsk->icsk_syn_retries ?
40 : sysctl_dccp_request_retries;
41 } else {
42 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63 dst_negative_advice(sk);
64 }
65
66 retry_until = sysctl_dccp_retries2;
67
68
69
70 }
71
72 if (icsk->icsk_retransmits >= retry_until) {
73
74 dccp_write_err(sk);
75 return 1;
76 }
77 return 0;
78}
79
80
81
82
83static void dccp_retransmit_timer(struct sock *sk)
84{
85 struct inet_connection_sock *icsk = inet_csk(sk);
86
87
88
89
90
91 if (dccp_write_timeout(sk))
92 return;
93
94
95
96
97
98 if (icsk->icsk_retransmits == 0)
99 __DCCP_INC_STATS(DCCP_MIB_TIMEOUTS);
100
101 if (dccp_retransmit_skb(sk) != 0) {
102
103
104
105
106 if (--icsk->icsk_retransmits == 0)
107 icsk->icsk_retransmits = 1;
108 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
109 min(icsk->icsk_rto,
110 TCP_RESOURCE_PROBE_INTERVAL),
111 DCCP_RTO_MAX);
112 return;
113 }
114
115 icsk->icsk_backoff++;
116
117 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
118 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
119 DCCP_RTO_MAX);
120 if (icsk->icsk_retransmits > sysctl_dccp_retries1)
121 __sk_dst_reset(sk);
122}
123
124static void dccp_write_timer(struct timer_list *t)
125{
126 struct inet_connection_sock *icsk =
127 from_timer(icsk, t, icsk_retransmit_timer);
128 struct sock *sk = &icsk->icsk_inet.sk;
129 int event = 0;
130
131 bh_lock_sock(sk);
132 if (sock_owned_by_user(sk)) {
133
134 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
135 jiffies + (HZ / 20));
136 goto out;
137 }
138
139 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
140 goto out;
141
142 if (time_after(icsk->icsk_timeout, jiffies)) {
143 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
144 icsk->icsk_timeout);
145 goto out;
146 }
147
148 event = icsk->icsk_pending;
149 icsk->icsk_pending = 0;
150
151 switch (event) {
152 case ICSK_TIME_RETRANS:
153 dccp_retransmit_timer(sk);
154 break;
155 }
156out:
157 bh_unlock_sock(sk);
158 sock_put(sk);
159}
160
161static void dccp_keepalive_timer(struct timer_list *t)
162{
163 struct sock *sk = from_timer(sk, t, sk_timer);
164
165 pr_err("dccp should not use a keepalive timer !\n");
166 sock_put(sk);
167}
168
169
170static void dccp_delack_timer(struct timer_list *t)
171{
172 struct inet_connection_sock *icsk =
173 from_timer(icsk, t, icsk_delack_timer);
174 struct sock *sk = &icsk->icsk_inet.sk;
175
176 bh_lock_sock(sk);
177 if (sock_owned_by_user(sk)) {
178
179 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
180 sk_reset_timer(sk, &icsk->icsk_delack_timer,
181 jiffies + TCP_DELACK_MIN);
182 goto out;
183 }
184
185 if (sk->sk_state == DCCP_CLOSED ||
186 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
187 goto out;
188 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
189 sk_reset_timer(sk, &icsk->icsk_delack_timer,
190 icsk->icsk_ack.timeout);
191 goto out;
192 }
193
194 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
195
196 if (inet_csk_ack_scheduled(sk)) {
197 if (!inet_csk_in_pingpong_mode(sk)) {
198
199 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
200 icsk->icsk_rto);
201 } else {
202
203
204
205 inet_csk_exit_pingpong_mode(sk);
206 icsk->icsk_ack.ato = TCP_ATO_MIN;
207 }
208 dccp_send_ack(sk);
209 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
210 }
211out:
212 bh_unlock_sock(sk);
213 sock_put(sk);
214}
215
216
217
218
219
220
221
222static void dccp_write_xmitlet(struct tasklet_struct *t)
223{
224 struct dccp_sock *dp = from_tasklet(dp, t, dccps_xmitlet);
225 struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
226
227 bh_lock_sock(sk);
228 if (sock_owned_by_user(sk))
229 sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
230 else
231 dccp_write_xmit(sk);
232 bh_unlock_sock(sk);
233 sock_put(sk);
234}
235
236static void dccp_write_xmit_timer(struct timer_list *t)
237{
238 struct dccp_sock *dp = from_timer(dp, t, dccps_xmit_timer);
239
240 dccp_write_xmitlet(&dp->dccps_xmitlet);
241}
242
243void dccp_init_xmit_timers(struct sock *sk)
244{
245 struct dccp_sock *dp = dccp_sk(sk);
246
247 tasklet_setup(&dp->dccps_xmitlet, dccp_write_xmitlet);
248 timer_setup(&dp->dccps_xmit_timer, dccp_write_xmit_timer, 0);
249 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
250 &dccp_keepalive_timer);
251}
252
253static ktime_t dccp_timestamp_seed;
254
255
256
257
258
259
260u32 dccp_timestamp(void)
261{
262 u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
263
264 do_div(delta, 10);
265 return delta;
266}
267EXPORT_SYMBOL_GPL(dccp_timestamp);
268
269void __init dccp_timestamping_init(void)
270{
271 dccp_timestamp_seed = ktime_get_real();
272}
273