1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/dccp.h>
14#include <linux/skbuff.h>
15#include <linux/export.h>
16
17#include "dccp.h"
18
19
20int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES;
21int sysctl_dccp_retries1 __read_mostly = TCP_RETR1;
22int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
23
24static void dccp_write_err(struct sock *sk)
25{
26 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
27 sk->sk_error_report(sk);
28
29 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
30 dccp_done(sk);
31 __DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT);
32}
33
34
35static int dccp_write_timeout(struct sock *sk)
36{
37 const struct inet_connection_sock *icsk = inet_csk(sk);
38 int retry_until;
39
40 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
41 if (icsk->icsk_retransmits != 0)
42 dst_negative_advice(sk);
43 retry_until = icsk->icsk_syn_retries ?
44 : sysctl_dccp_request_retries;
45 } else {
46 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67 dst_negative_advice(sk);
68 }
69
70 retry_until = sysctl_dccp_retries2;
71
72
73
74 }
75
76 if (icsk->icsk_retransmits >= retry_until) {
77
78 dccp_write_err(sk);
79 return 1;
80 }
81 return 0;
82}
83
84
85
86
87static void dccp_retransmit_timer(struct sock *sk)
88{
89 struct inet_connection_sock *icsk = inet_csk(sk);
90
91
92
93
94
95 if (dccp_write_timeout(sk))
96 return;
97
98
99
100
101
102 if (icsk->icsk_retransmits == 0)
103 __DCCP_INC_STATS(DCCP_MIB_TIMEOUTS);
104
105 if (dccp_retransmit_skb(sk) != 0) {
106
107
108
109
110 if (--icsk->icsk_retransmits == 0)
111 icsk->icsk_retransmits = 1;
112 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
113 min(icsk->icsk_rto,
114 TCP_RESOURCE_PROBE_INTERVAL),
115 DCCP_RTO_MAX);
116 return;
117 }
118
119 icsk->icsk_backoff++;
120
121 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
122 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
123 DCCP_RTO_MAX);
124 if (icsk->icsk_retransmits > sysctl_dccp_retries1)
125 __sk_dst_reset(sk);
126}
127
128static void dccp_write_timer(struct timer_list *t)
129{
130 struct inet_connection_sock *icsk =
131 from_timer(icsk, t, icsk_retransmit_timer);
132 struct sock *sk = &icsk->icsk_inet.sk;
133 int event = 0;
134
135 bh_lock_sock(sk);
136 if (sock_owned_by_user(sk)) {
137
138 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
139 jiffies + (HZ / 20));
140 goto out;
141 }
142
143 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
144 goto out;
145
146 if (time_after(icsk->icsk_timeout, jiffies)) {
147 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
148 icsk->icsk_timeout);
149 goto out;
150 }
151
152 event = icsk->icsk_pending;
153 icsk->icsk_pending = 0;
154
155 switch (event) {
156 case ICSK_TIME_RETRANS:
157 dccp_retransmit_timer(sk);
158 break;
159 }
160out:
161 bh_unlock_sock(sk);
162 sock_put(sk);
163}
164
165static void dccp_keepalive_timer(struct timer_list *t)
166{
167 struct sock *sk = from_timer(sk, t, sk_timer);
168
169 pr_err("dccp should not use a keepalive timer !\n");
170 sock_put(sk);
171}
172
173
174static void dccp_delack_timer(struct timer_list *t)
175{
176 struct inet_connection_sock *icsk =
177 from_timer(icsk, t, icsk_delack_timer);
178 struct sock *sk = &icsk->icsk_inet.sk;
179
180 bh_lock_sock(sk);
181 if (sock_owned_by_user(sk)) {
182
183 icsk->icsk_ack.blocked = 1;
184 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
185 sk_reset_timer(sk, &icsk->icsk_delack_timer,
186 jiffies + TCP_DELACK_MIN);
187 goto out;
188 }
189
190 if (sk->sk_state == DCCP_CLOSED ||
191 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
192 goto out;
193 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
194 sk_reset_timer(sk, &icsk->icsk_delack_timer,
195 icsk->icsk_ack.timeout);
196 goto out;
197 }
198
199 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
200
201 if (inet_csk_ack_scheduled(sk)) {
202 if (!icsk->icsk_ack.pingpong) {
203
204 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
205 icsk->icsk_rto);
206 } else {
207
208
209
210 icsk->icsk_ack.pingpong = 0;
211 icsk->icsk_ack.ato = TCP_ATO_MIN;
212 }
213 dccp_send_ack(sk);
214 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
215 }
216out:
217 bh_unlock_sock(sk);
218 sock_put(sk);
219}
220
221
222
223
224
225static void dccp_write_xmitlet(unsigned long data)
226{
227 struct sock *sk = (struct sock *)data;
228
229 bh_lock_sock(sk);
230 if (sock_owned_by_user(sk))
231 sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
232 else
233 dccp_write_xmit(sk);
234 bh_unlock_sock(sk);
235 sock_put(sk);
236}
237
238static void dccp_write_xmit_timer(struct timer_list *t)
239{
240 struct dccp_sock *dp = from_timer(dp, t, dccps_xmit_timer);
241 struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
242
243 dccp_write_xmitlet((unsigned long)sk);
244}
245
246void dccp_init_xmit_timers(struct sock *sk)
247{
248 struct dccp_sock *dp = dccp_sk(sk);
249
250 tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
251 timer_setup(&dp->dccps_xmit_timer, dccp_write_xmit_timer, 0);
252 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
253 &dccp_keepalive_timer);
254}
255
256static ktime_t dccp_timestamp_seed;
257
258
259
260
261
262
263u32 dccp_timestamp(void)
264{
265 u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
266
267 do_div(delta, 10);
268 return delta;
269}
270EXPORT_SYMBOL_GPL(dccp_timestamp);
271
272void __init dccp_timestamping_init(void)
273{
274 dccp_timestamp_seed = ktime_get_real();
275}
276