1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/gfp.h>
23#include <net/tcp.h>
24
25int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
26int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
27int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
28int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
29int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
30int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
31int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
32int sysctl_tcp_orphan_retries __read_mostly;
33int sysctl_tcp_thin_linear_timeouts __read_mostly;
34
35static void tcp_write_timer(unsigned long);
36static void tcp_delack_timer(unsigned long);
37static void tcp_keepalive_timer (unsigned long data);
38
39void tcp_init_xmit_timers(struct sock *sk)
40{
41 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
42 &tcp_keepalive_timer);
43}
44EXPORT_SYMBOL(tcp_init_xmit_timers);
45
46static void tcp_write_err(struct sock *sk)
47{
48 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
49 sk->sk_error_report(sk);
50
51 tcp_done(sk);
52 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
53}
54
55
56
57
58
59
60
61
62
63
64
65
66static int tcp_out_of_resources(struct sock *sk, int do_reset)
67{
68 struct tcp_sock *tp = tcp_sk(sk);
69 int shift = 0;
70
71
72
73 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
74 shift++;
75
76
77 if (sk->sk_err_soft)
78 shift++;
79
80 if (tcp_too_many_orphans(sk, shift)) {
81 if (net_ratelimit())
82 printk(KERN_INFO "Out of socket memory\n");
83
84
85
86 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
87
88 (!tp->snd_wnd && !tp->packets_out))
89 do_reset = 1;
90 if (do_reset)
91 tcp_send_active_reset(sk, GFP_ATOMIC);
92 tcp_done(sk);
93 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
94 return 1;
95 }
96 return 0;
97}
98
99
100static int tcp_orphan_retries(struct sock *sk, int alive)
101{
102 int retries = sysctl_tcp_orphan_retries;
103
104
105 if (sk->sk_err_soft && !alive)
106 retries = 0;
107
108
109
110
111 if (retries == 0 && alive)
112 retries = 8;
113 return retries;
114}
115
116static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
117{
118
119 if (sysctl_tcp_mtu_probing) {
120 if (!icsk->icsk_mtup.enabled) {
121 icsk->icsk_mtup.enabled = 1;
122 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
123 } else {
124 struct tcp_sock *tp = tcp_sk(sk);
125 int mss;
126
127 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
128 mss = min(sysctl_tcp_base_mss, mss);
129 mss = max(mss, 68 - tp->tcp_header_len);
130 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
131 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
132 }
133 }
134}
135
136
137
138
139
140
141static bool retransmits_timed_out(struct sock *sk,
142 unsigned int boundary,
143 unsigned int timeout,
144 bool syn_set)
145{
146 unsigned int linear_backoff_thresh, start_ts;
147 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
148
149 if (!inet_csk(sk)->icsk_retransmits)
150 return false;
151
152 if (unlikely(!tcp_sk(sk)->retrans_stamp))
153 start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
154 else
155 start_ts = tcp_sk(sk)->retrans_stamp;
156
157 if (likely(timeout == 0)) {
158 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
159
160 if (boundary <= linear_backoff_thresh)
161 timeout = ((2 << boundary) - 1) * rto_base;
162 else
163 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
164 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
165 }
166 return (tcp_time_stamp - start_ts) >= timeout;
167}
168
169
170static int tcp_write_timeout(struct sock *sk)
171{
172 struct inet_connection_sock *icsk = inet_csk(sk);
173 int retry_until;
174 bool do_reset, syn_set = 0;
175
176 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
177 if (icsk->icsk_retransmits)
178 dst_negative_advice(sk);
179 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
180 syn_set = 1;
181 } else {
182 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
183
184 tcp_mtu_probing(icsk, sk);
185
186 dst_negative_advice(sk);
187 }
188
189 retry_until = sysctl_tcp_retries2;
190 if (sock_flag(sk, SOCK_DEAD)) {
191 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
192
193 retry_until = tcp_orphan_retries(sk, alive);
194 do_reset = alive ||
195 !retransmits_timed_out(sk, retry_until, 0, 0);
196
197 if (tcp_out_of_resources(sk, do_reset))
198 return 1;
199 }
200 }
201
202 if (retransmits_timed_out(sk, retry_until,
203 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
204
205 tcp_write_err(sk);
206 return 1;
207 }
208 return 0;
209}
210
211static void tcp_delack_timer(unsigned long data)
212{
213 struct sock *sk = (struct sock *)data;
214 struct tcp_sock *tp = tcp_sk(sk);
215 struct inet_connection_sock *icsk = inet_csk(sk);
216
217 bh_lock_sock(sk);
218 if (sock_owned_by_user(sk)) {
219
220 icsk->icsk_ack.blocked = 1;
221 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
222 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
223 goto out_unlock;
224 }
225
226 sk_mem_reclaim_partial(sk);
227
228 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
229 goto out;
230
231 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
232 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
233 goto out;
234 }
235 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
236
237 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
238 struct sk_buff *skb;
239
240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
241
242 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
243 sk_backlog_rcv(sk, skb);
244
245 tp->ucopy.memory = 0;
246 }
247
248 if (inet_csk_ack_scheduled(sk)) {
249 if (!icsk->icsk_ack.pingpong) {
250
251 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
252 } else {
253
254
255
256 icsk->icsk_ack.pingpong = 0;
257 icsk->icsk_ack.ato = TCP_ATO_MIN;
258 }
259 tcp_send_ack(sk);
260 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
261 }
262
263out:
264 if (tcp_memory_pressure)
265 sk_mem_reclaim(sk);
266out_unlock:
267 bh_unlock_sock(sk);
268 sock_put(sk);
269}
270
271static void tcp_probe_timer(struct sock *sk)
272{
273 struct inet_connection_sock *icsk = inet_csk(sk);
274 struct tcp_sock *tp = tcp_sk(sk);
275 int max_probes;
276
277 if (tp->packets_out || !tcp_send_head(sk)) {
278 icsk->icsk_probes_out = 0;
279 return;
280 }
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297 max_probes = sysctl_tcp_retries2;
298
299 if (sock_flag(sk, SOCK_DEAD)) {
300 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
301
302 max_probes = tcp_orphan_retries(sk, alive);
303
304 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
305 return;
306 }
307
308 if (icsk->icsk_probes_out > max_probes) {
309 tcp_write_err(sk);
310 } else {
311
312 tcp_send_probe0(sk);
313 }
314}
315
316
317
318
319
320void tcp_retransmit_timer(struct sock *sk)
321{
322 struct tcp_sock *tp = tcp_sk(sk);
323 struct inet_connection_sock *icsk = inet_csk(sk);
324
325 if (!tp->packets_out)
326 goto out;
327
328 WARN_ON(tcp_write_queue_empty(sk));
329
330 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
331 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
332
333
334
335
336
337#ifdef TCP_DEBUG
338 struct inet_sock *inet = inet_sk(sk);
339 if (sk->sk_family == AF_INET) {
340 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
341 &inet->inet_daddr, ntohs(inet->inet_dport),
342 inet->inet_num, tp->snd_una, tp->snd_nxt);
343 }
344#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
345 else if (sk->sk_family == AF_INET6) {
346 struct ipv6_pinfo *np = inet6_sk(sk);
347 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
348 &np->daddr, ntohs(inet->inet_dport),
349 inet->inet_num, tp->snd_una, tp->snd_nxt);
350 }
351#endif
352#endif
353 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
354 tcp_write_err(sk);
355 goto out;
356 }
357 tcp_enter_loss(sk, 0);
358 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
359 __sk_dst_reset(sk);
360 goto out_reset_timer;
361 }
362
363 if (tcp_write_timeout(sk))
364 goto out;
365
366 if (icsk->icsk_retransmits == 0) {
367 int mib_idx;
368
369 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
370 if (tcp_is_sack(tp))
371 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
372 else
373 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
374 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
375 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
376 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
377 tp->sacked_out) {
378 if (tcp_is_sack(tp))
379 mib_idx = LINUX_MIB_TCPSACKFAILURES;
380 else
381 mib_idx = LINUX_MIB_TCPRENOFAILURES;
382 } else {
383 mib_idx = LINUX_MIB_TCPTIMEOUTS;
384 }
385 NET_INC_STATS_BH(sock_net(sk), mib_idx);
386 }
387
388 if (tcp_use_frto(sk)) {
389 tcp_enter_frto(sk);
390 } else {
391 tcp_enter_loss(sk, 0);
392 }
393
394 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
395
396
397
398 if (!icsk->icsk_retransmits)
399 icsk->icsk_retransmits = 1;
400 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
401 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
402 TCP_RTO_MAX);
403 goto out;
404 }
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421 icsk->icsk_backoff++;
422 icsk->icsk_retransmits++;
423
424out_reset_timer:
425
426
427
428
429
430
431
432
433
434 if (sk->sk_state == TCP_ESTABLISHED &&
435 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
436 tcp_stream_is_thin(tp) &&
437 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
438 icsk->icsk_backoff = 0;
439 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
440 } else {
441
442 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
443 }
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
445 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
446 __sk_dst_reset(sk);
447
448out:;
449}
450
451static void tcp_write_timer(unsigned long data)
452{
453 struct sock *sk = (struct sock *)data;
454 struct inet_connection_sock *icsk = inet_csk(sk);
455 int event;
456
457 bh_lock_sock(sk);
458 if (sock_owned_by_user(sk)) {
459
460 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
461 goto out_unlock;
462 }
463
464 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
465 goto out;
466
467 if (time_after(icsk->icsk_timeout, jiffies)) {
468 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
469 goto out;
470 }
471
472 event = icsk->icsk_pending;
473 icsk->icsk_pending = 0;
474
475 switch (event) {
476 case ICSK_TIME_RETRANS:
477 tcp_retransmit_timer(sk);
478 break;
479 case ICSK_TIME_PROBE0:
480 tcp_probe_timer(sk);
481 break;
482 }
483
484out:
485 sk_mem_reclaim(sk);
486out_unlock:
487 bh_unlock_sock(sk);
488 sock_put(sk);
489}
490
491
492
493
494
495static void tcp_synack_timer(struct sock *sk)
496{
497 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
498 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
499}
500
501void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
502{
503 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
504}
505EXPORT_SYMBOL(tcp_syn_ack_timeout);
506
507void tcp_set_keepalive(struct sock *sk, int val)
508{
509 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
510 return;
511
512 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
513 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
514 else if (!val)
515 inet_csk_delete_keepalive_timer(sk);
516}
517
518
519static void tcp_keepalive_timer (unsigned long data)
520{
521 struct sock *sk = (struct sock *) data;
522 struct inet_connection_sock *icsk = inet_csk(sk);
523 struct tcp_sock *tp = tcp_sk(sk);
524 u32 elapsed;
525
526
527 bh_lock_sock(sk);
528 if (sock_owned_by_user(sk)) {
529
530 inet_csk_reset_keepalive_timer (sk, HZ/20);
531 goto out;
532 }
533
534 if (sk->sk_state == TCP_LISTEN) {
535 tcp_synack_timer(sk);
536 goto out;
537 }
538
539 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
540 if (tp->linger2 >= 0) {
541 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
542
543 if (tmo > 0) {
544 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
545 goto out;
546 }
547 }
548 tcp_send_active_reset(sk, GFP_ATOMIC);
549 goto death;
550 }
551
552 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
553 goto out;
554
555 elapsed = keepalive_time_when(tp);
556
557
558 if (tp->packets_out || tcp_send_head(sk))
559 goto resched;
560
561 elapsed = keepalive_time_elapsed(tp);
562
563 if (elapsed >= keepalive_time_when(tp)) {
564
565
566
567 if ((icsk->icsk_user_timeout != 0 &&
568 elapsed >= icsk->icsk_user_timeout &&
569 icsk->icsk_probes_out > 0) ||
570 (icsk->icsk_user_timeout == 0 &&
571 icsk->icsk_probes_out >= keepalive_probes(tp))) {
572 tcp_send_active_reset(sk, GFP_ATOMIC);
573 tcp_write_err(sk);
574 goto out;
575 }
576 if (tcp_write_wakeup(sk) <= 0) {
577 icsk->icsk_probes_out++;
578 elapsed = keepalive_intvl_when(tp);
579 } else {
580
581
582
583 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
584 }
585 } else {
586
587 elapsed = keepalive_time_when(tp) - elapsed;
588 }
589
590 sk_mem_reclaim(sk);
591
592resched:
593 inet_csk_reset_keepalive_timer (sk, elapsed);
594 goto out;
595
596death:
597 tcp_done(sk);
598
599out:
600 bh_unlock_sock(sk);
601 sock_put(sk);
602}
603