1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <net/tcp.h>
23
24int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
25int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
26int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
27int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
28int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
29int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
30int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
31int sysctl_tcp_orphan_retries __read_mostly;
32
33static void tcp_write_timer(unsigned long);
34static void tcp_delack_timer(unsigned long);
35static void tcp_keepalive_timer (unsigned long data);
36
37void tcp_init_xmit_timers(struct sock *sk)
38{
39 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
40 &tcp_keepalive_timer);
41}
42
43EXPORT_SYMBOL(tcp_init_xmit_timers);
44
45static void tcp_write_err(struct sock *sk)
46{
47 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
48 sk->sk_error_report(sk);
49
50 tcp_done(sk);
51 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
52}
53
54
55
56
57
58
59
60
61
62
63
64
65static int tcp_out_of_resources(struct sock *sk, int do_reset)
66{
67 struct tcp_sock *tp = tcp_sk(sk);
68 int orphans = percpu_counter_read_positive(&tcp_orphan_count);
69
70
71
72 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
73 orphans <<= 1;
74
75
76 if (sk->sk_err_soft)
77 orphans <<= 1;
78
79 if (tcp_too_many_orphans(sk, orphans)) {
80 if (net_ratelimit())
81 printk(KERN_INFO "Out of socket memory\n");
82
83
84
85 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
86
87 (!tp->snd_wnd && !tp->packets_out))
88 do_reset = 1;
89 if (do_reset)
90 tcp_send_active_reset(sk, GFP_ATOMIC);
91 tcp_done(sk);
92 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
93 return 1;
94 }
95 return 0;
96}
97
98
99static int tcp_orphan_retries(struct sock *sk, int alive)
100{
101 int retries = sysctl_tcp_orphan_retries;
102
103
104 if (sk->sk_err_soft && !alive)
105 retries = 0;
106
107
108
109
110 if (retries == 0 && alive)
111 retries = 8;
112 return retries;
113}
114
115static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
116{
117
118 if (sysctl_tcp_mtu_probing) {
119 if (!icsk->icsk_mtup.enabled) {
120 icsk->icsk_mtup.enabled = 1;
121 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
122 } else {
123 struct tcp_sock *tp = tcp_sk(sk);
124 int mss;
125
126 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
127 mss = min(sysctl_tcp_base_mss, mss);
128 mss = max(mss, 68 - tp->tcp_header_len);
129 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
130 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
131 }
132 }
133}
134
135
136static int tcp_write_timeout(struct sock *sk)
137{
138 struct inet_connection_sock *icsk = inet_csk(sk);
139 int retry_until;
140 bool do_reset;
141
142 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
143 if (icsk->icsk_retransmits)
144 dst_negative_advice(&sk->sk_dst_cache);
145 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
146 } else {
147 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
148
149 tcp_mtu_probing(icsk, sk);
150
151 dst_negative_advice(&sk->sk_dst_cache);
152 }
153
154 retry_until = sysctl_tcp_retries2;
155 if (sock_flag(sk, SOCK_DEAD)) {
156 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
157
158 retry_until = tcp_orphan_retries(sk, alive);
159 do_reset = alive ||
160 !retransmits_timed_out(sk, retry_until);
161
162 if (tcp_out_of_resources(sk, do_reset))
163 return 1;
164 }
165 }
166
167 if (retransmits_timed_out(sk, retry_until)) {
168
169 tcp_write_err(sk);
170 return 1;
171 }
172 return 0;
173}
174
175static void tcp_delack_timer(unsigned long data)
176{
177 struct sock *sk = (struct sock *)data;
178 struct tcp_sock *tp = tcp_sk(sk);
179 struct inet_connection_sock *icsk = inet_csk(sk);
180
181 bh_lock_sock(sk);
182 if (sock_owned_by_user(sk)) {
183
184 icsk->icsk_ack.blocked = 1;
185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
186 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
187 goto out_unlock;
188 }
189
190 sk_mem_reclaim_partial(sk);
191
192 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
193 goto out;
194
195 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
196 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
197 goto out;
198 }
199 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
200
201 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
202 struct sk_buff *skb;
203
204 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
205
206 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
207 sk_backlog_rcv(sk, skb);
208
209 tp->ucopy.memory = 0;
210 }
211
212 if (inet_csk_ack_scheduled(sk)) {
213 if (!icsk->icsk_ack.pingpong) {
214
215 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
216 } else {
217
218
219
220 icsk->icsk_ack.pingpong = 0;
221 icsk->icsk_ack.ato = TCP_ATO_MIN;
222 }
223 tcp_send_ack(sk);
224 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
225 }
226 TCP_CHECK_TIMER(sk);
227
228out:
229 if (tcp_memory_pressure)
230 sk_mem_reclaim(sk);
231out_unlock:
232 bh_unlock_sock(sk);
233 sock_put(sk);
234}
235
236static void tcp_probe_timer(struct sock *sk)
237{
238 struct inet_connection_sock *icsk = inet_csk(sk);
239 struct tcp_sock *tp = tcp_sk(sk);
240 int max_probes;
241
242 if (tp->packets_out || !tcp_send_head(sk)) {
243 icsk->icsk_probes_out = 0;
244 return;
245 }
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262 max_probes = sysctl_tcp_retries2;
263
264 if (sock_flag(sk, SOCK_DEAD)) {
265 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
266
267 max_probes = tcp_orphan_retries(sk, alive);
268
269 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
270 return;
271 }
272
273 if (icsk->icsk_probes_out > max_probes) {
274 tcp_write_err(sk);
275 } else {
276
277 tcp_send_probe0(sk);
278 }
279}
280
281
282
283
284
285void tcp_retransmit_timer(struct sock *sk)
286{
287 struct tcp_sock *tp = tcp_sk(sk);
288 struct inet_connection_sock *icsk = inet_csk(sk);
289
290 if (!tp->packets_out)
291 goto out;
292
293 WARN_ON(tcp_write_queue_empty(sk));
294
295 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
296 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
297
298
299
300
301
302#ifdef TCP_DEBUG
303 struct inet_sock *inet = inet_sk(sk);
304 if (sk->sk_family == AF_INET) {
305 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
306 &inet->daddr, ntohs(inet->dport),
307 inet->num, tp->snd_una, tp->snd_nxt);
308 }
309#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
310 else if (sk->sk_family == AF_INET6) {
311 struct ipv6_pinfo *np = inet6_sk(sk);
312 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
313 &np->daddr, ntohs(inet->dport),
314 inet->num, tp->snd_una, tp->snd_nxt);
315 }
316#endif
317#endif
318 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
319 tcp_write_err(sk);
320 goto out;
321 }
322 tcp_enter_loss(sk, 0);
323 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
324 __sk_dst_reset(sk);
325 goto out_reset_timer;
326 }
327
328 if (tcp_write_timeout(sk))
329 goto out;
330
331 if (icsk->icsk_retransmits == 0) {
332 int mib_idx;
333
334 if (icsk->icsk_ca_state == TCP_CA_Disorder) {
335 if (tcp_is_sack(tp))
336 mib_idx = LINUX_MIB_TCPSACKFAILURES;
337 else
338 mib_idx = LINUX_MIB_TCPRENOFAILURES;
339 } else if (icsk->icsk_ca_state == TCP_CA_Recovery) {
340 if (tcp_is_sack(tp))
341 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
342 else
343 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
344 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
345 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
346 } else {
347 mib_idx = LINUX_MIB_TCPTIMEOUTS;
348 }
349 NET_INC_STATS_BH(sock_net(sk), mib_idx);
350 }
351
352 if (tcp_use_frto(sk)) {
353 tcp_enter_frto(sk);
354 } else {
355 tcp_enter_loss(sk, 0);
356 }
357
358 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
359
360
361
362 if (!icsk->icsk_retransmits)
363 icsk->icsk_retransmits = 1;
364 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
365 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
366 TCP_RTO_MAX);
367 goto out;
368 }
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385 icsk->icsk_backoff++;
386 icsk->icsk_retransmits++;
387
388out_reset_timer:
389 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
390 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
391 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
392 __sk_dst_reset(sk);
393
394out:;
395}
396
397static void tcp_write_timer(unsigned long data)
398{
399 struct sock *sk = (struct sock *)data;
400 struct inet_connection_sock *icsk = inet_csk(sk);
401 int event;
402
403 bh_lock_sock(sk);
404 if (sock_owned_by_user(sk)) {
405
406 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
407 goto out_unlock;
408 }
409
410 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
411 goto out;
412
413 if (time_after(icsk->icsk_timeout, jiffies)) {
414 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
415 goto out;
416 }
417
418 event = icsk->icsk_pending;
419 icsk->icsk_pending = 0;
420
421 switch (event) {
422 case ICSK_TIME_RETRANS:
423 tcp_retransmit_timer(sk);
424 break;
425 case ICSK_TIME_PROBE0:
426 tcp_probe_timer(sk);
427 break;
428 }
429 TCP_CHECK_TIMER(sk);
430
431out:
432 sk_mem_reclaim(sk);
433out_unlock:
434 bh_unlock_sock(sk);
435 sock_put(sk);
436}
437
438
439
440
441
442static void tcp_synack_timer(struct sock *sk)
443{
444 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
445 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
446}
447
448void tcp_set_keepalive(struct sock *sk, int val)
449{
450 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
451 return;
452
453 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
454 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
455 else if (!val)
456 inet_csk_delete_keepalive_timer(sk);
457}
458
459
460static void tcp_keepalive_timer (unsigned long data)
461{
462 struct sock *sk = (struct sock *) data;
463 struct inet_connection_sock *icsk = inet_csk(sk);
464 struct tcp_sock *tp = tcp_sk(sk);
465 __u32 elapsed;
466
467
468 bh_lock_sock(sk);
469 if (sock_owned_by_user(sk)) {
470
471 inet_csk_reset_keepalive_timer (sk, HZ/20);
472 goto out;
473 }
474
475 if (sk->sk_state == TCP_LISTEN) {
476 tcp_synack_timer(sk);
477 goto out;
478 }
479
480 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
481 if (tp->linger2 >= 0) {
482 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
483
484 if (tmo > 0) {
485 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
486 goto out;
487 }
488 }
489 tcp_send_active_reset(sk, GFP_ATOMIC);
490 goto death;
491 }
492
493 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
494 goto out;
495
496 elapsed = keepalive_time_when(tp);
497
498
499 if (tp->packets_out || tcp_send_head(sk))
500 goto resched;
501
502 elapsed = tcp_time_stamp - tp->rcv_tstamp;
503
504 if (elapsed >= keepalive_time_when(tp)) {
505 if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
506 tcp_send_active_reset(sk, GFP_ATOMIC);
507 tcp_write_err(sk);
508 goto out;
509 }
510 if (tcp_write_wakeup(sk) <= 0) {
511 icsk->icsk_probes_out++;
512 elapsed = keepalive_intvl_when(tp);
513 } else {
514
515
516
517 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
518 }
519 } else {
520
521 elapsed = keepalive_time_when(tp) - elapsed;
522 }
523
524 TCP_CHECK_TIMER(sk);
525 sk_mem_reclaim(sk);
526
527resched:
528 inet_csk_reset_keepalive_timer (sk, elapsed);
529 goto out;
530
531death:
532 tcp_done(sk);
533
534out:
535 bh_unlock_sock(sk);
536 sock_put(sk);
537}
538