1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/gfp.h>
23#include <net/tcp.h>
24
25int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
26int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
27int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
28int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
29int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
30int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
31int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
32int sysctl_tcp_orphan_retries __read_mostly;
33int sysctl_tcp_thin_linear_timeouts __read_mostly;
34
35static void tcp_write_err(struct sock *sk)
36{
37 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
38 sk->sk_error_report(sk);
39
40 tcp_done(sk);
41 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
42}
43
44
45
46
47
48
49
50
51
52
53
54
55static int tcp_out_of_resources(struct sock *sk, bool do_reset)
56{
57 struct tcp_sock *tp = tcp_sk(sk);
58 int shift = 0;
59
60
61
62 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
63 shift++;
64
65
66 if (sk->sk_err_soft)
67 shift++;
68
69 if (tcp_check_oom(sk, shift)) {
70
71
72 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
73
74 (!tp->snd_wnd && !tp->packets_out))
75 do_reset = true;
76 if (do_reset)
77 tcp_send_active_reset(sk, GFP_ATOMIC);
78 tcp_done(sk);
79 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
80 return 1;
81 }
82 return 0;
83}
84
85
86static int tcp_orphan_retries(struct sock *sk, int alive)
87{
88 int retries = sysctl_tcp_orphan_retries;
89
90
91 if (sk->sk_err_soft && !alive)
92 retries = 0;
93
94
95
96
97 if (retries == 0 && alive)
98 retries = 8;
99 return retries;
100}
101
102static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
103{
104
105 if (sysctl_tcp_mtu_probing) {
106 if (!icsk->icsk_mtup.enabled) {
107 icsk->icsk_mtup.enabled = 1;
108 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
109 } else {
110 struct tcp_sock *tp = tcp_sk(sk);
111 int mss;
112
113 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
114 mss = min(sysctl_tcp_base_mss, mss);
115 mss = max(mss, 68 - tp->tcp_header_len);
116 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
117 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
118 }
119 }
120}
121
122
123
124
125
126
127static bool retransmits_timed_out(struct sock *sk,
128 unsigned int boundary,
129 unsigned int timeout,
130 bool syn_set)
131{
132 unsigned int linear_backoff_thresh, start_ts;
133 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
134
135 if (!inet_csk(sk)->icsk_retransmits)
136 return false;
137
138 start_ts = tcp_sk(sk)->retrans_stamp;
139 if (unlikely(!start_ts))
140 start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
141
142 if (likely(timeout == 0)) {
143 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
144
145 if (boundary <= linear_backoff_thresh)
146 timeout = ((2 << boundary) - 1) * rto_base;
147 else
148 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
149 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
150 }
151 return (tcp_time_stamp - start_ts) >= timeout;
152}
153
154
155static int tcp_write_timeout(struct sock *sk)
156{
157 struct inet_connection_sock *icsk = inet_csk(sk);
158 struct tcp_sock *tp = tcp_sk(sk);
159 int retry_until;
160 bool do_reset, syn_set = false;
161
162 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
163 if (icsk->icsk_retransmits) {
164 dst_negative_advice(sk);
165 if (tp->syn_fastopen || tp->syn_data)
166 tcp_fastopen_cache_set(sk, 0, NULL, true);
167 if (tp->syn_data)
168 NET_INC_STATS_BH(sock_net(sk),
169 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
170 }
171 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
172 syn_set = true;
173 } else {
174 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
175
176 tcp_mtu_probing(icsk, sk);
177
178 dst_negative_advice(sk);
179 }
180
181 retry_until = sysctl_tcp_retries2;
182 if (sock_flag(sk, SOCK_DEAD)) {
183 const int alive = icsk->icsk_rto < TCP_RTO_MAX;
184
185 retry_until = tcp_orphan_retries(sk, alive);
186 do_reset = alive ||
187 !retransmits_timed_out(sk, retry_until, 0, 0);
188
189 if (tcp_out_of_resources(sk, do_reset))
190 return 1;
191 }
192 }
193
194 if (retransmits_timed_out(sk, retry_until,
195 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
196
197 tcp_write_err(sk);
198 return 1;
199 }
200 return 0;
201}
202
203void tcp_delack_timer_handler(struct sock *sk)
204{
205 struct tcp_sock *tp = tcp_sk(sk);
206 struct inet_connection_sock *icsk = inet_csk(sk);
207
208 sk_mem_reclaim_partial(sk);
209
210 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
211 goto out;
212
213 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
214 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
215 goto out;
216 }
217 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
218
219 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
220 struct sk_buff *skb;
221
222 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
223
224 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
225 sk_backlog_rcv(sk, skb);
226
227 tp->ucopy.memory = 0;
228 }
229
230 if (inet_csk_ack_scheduled(sk)) {
231 if (!icsk->icsk_ack.pingpong) {
232
233 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
234 } else {
235
236
237
238 icsk->icsk_ack.pingpong = 0;
239 icsk->icsk_ack.ato = TCP_ATO_MIN;
240 }
241 tcp_send_ack(sk);
242 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
243 }
244
245out:
246 if (sk_under_memory_pressure(sk))
247 sk_mem_reclaim(sk);
248}
249
250static void tcp_delack_timer(unsigned long data)
251{
252 struct sock *sk = (struct sock *)data;
253
254 bh_lock_sock(sk);
255 if (!sock_owned_by_user(sk)) {
256 tcp_delack_timer_handler(sk);
257 } else {
258 inet_csk(sk)->icsk_ack.blocked = 1;
259 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
260
261 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
262 sock_hold(sk);
263 }
264 bh_unlock_sock(sk);
265 sock_put(sk);
266}
267
268static void tcp_probe_timer(struct sock *sk)
269{
270 struct inet_connection_sock *icsk = inet_csk(sk);
271 struct tcp_sock *tp = tcp_sk(sk);
272 int max_probes;
273 u32 start_ts;
274
275 if (tp->packets_out || !tcp_send_head(sk)) {
276 icsk->icsk_probes_out = 0;
277 return;
278 }
279
280
281
282
283
284
285
286
287
288 start_ts = tcp_skb_timestamp(tcp_send_head(sk));
289 if (!start_ts)
290 skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
291 else if (icsk->icsk_user_timeout &&
292 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
293 goto abort;
294
295 max_probes = sysctl_tcp_retries2;
296 if (sock_flag(sk, SOCK_DEAD)) {
297 const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
298
299 max_probes = tcp_orphan_retries(sk, alive);
300 if (!alive && icsk->icsk_backoff >= max_probes)
301 goto abort;
302 if (tcp_out_of_resources(sk, true))
303 return;
304 }
305
306 if (icsk->icsk_probes_out > max_probes) {
307abort: tcp_write_err(sk);
308 } else {
309
310 tcp_send_probe0(sk);
311 }
312}
313
314
315
316
317
318static void tcp_fastopen_synack_timer(struct sock *sk)
319{
320 struct inet_connection_sock *icsk = inet_csk(sk);
321 int max_retries = icsk->icsk_syn_retries ? :
322 sysctl_tcp_synack_retries + 1;
323 struct request_sock *req;
324
325 req = tcp_sk(sk)->fastopen_rsk;
326 req->rsk_ops->syn_ack_timeout(sk, req);
327
328 if (req->num_timeout >= max_retries) {
329 tcp_write_err(sk);
330 return;
331 }
332
333
334
335
336
337 inet_rtx_syn_ack(sk, req);
338 req->num_timeout++;
339 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
340 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
341}
342
343
344
345
346
347void tcp_retransmit_timer(struct sock *sk)
348{
349 struct tcp_sock *tp = tcp_sk(sk);
350 struct inet_connection_sock *icsk = inet_csk(sk);
351
352 if (tp->fastopen_rsk) {
353 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
354 sk->sk_state != TCP_FIN_WAIT1);
355 tcp_fastopen_synack_timer(sk);
356
357
358
359 return;
360 }
361 if (!tp->packets_out)
362 goto out;
363
364 WARN_ON(tcp_write_queue_empty(sk));
365
366 tp->tlp_high_seq = 0;
367
368 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
369 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
370
371
372
373
374
375 struct inet_sock *inet = inet_sk(sk);
376 if (sk->sk_family == AF_INET) {
377 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
378 &inet->inet_daddr,
379 ntohs(inet->inet_dport),
380 inet->inet_num,
381 tp->snd_una, tp->snd_nxt);
382 }
383#if IS_ENABLED(CONFIG_IPV6)
384 else if (sk->sk_family == AF_INET6) {
385 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
386 &sk->sk_v6_daddr,
387 ntohs(inet->inet_dport),
388 inet->inet_num,
389 tp->snd_una, tp->snd_nxt);
390 }
391#endif
392 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
393 tcp_write_err(sk);
394 goto out;
395 }
396 tcp_enter_loss(sk);
397 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
398 __sk_dst_reset(sk);
399 goto out_reset_timer;
400 }
401
402 if (tcp_write_timeout(sk))
403 goto out;
404
405 if (icsk->icsk_retransmits == 0) {
406 int mib_idx;
407
408 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
409 if (tcp_is_sack(tp))
410 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
411 else
412 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
413 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
414 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
415 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
416 tp->sacked_out) {
417 if (tcp_is_sack(tp))
418 mib_idx = LINUX_MIB_TCPSACKFAILURES;
419 else
420 mib_idx = LINUX_MIB_TCPRENOFAILURES;
421 } else {
422 mib_idx = LINUX_MIB_TCPTIMEOUTS;
423 }
424 NET_INC_STATS_BH(sock_net(sk), mib_idx);
425 }
426
427 tcp_enter_loss(sk);
428
429 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
430
431
432
433 if (!icsk->icsk_retransmits)
434 icsk->icsk_retransmits = 1;
435 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
436 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
437 TCP_RTO_MAX);
438 goto out;
439 }
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456 icsk->icsk_backoff++;
457 icsk->icsk_retransmits++;
458
459out_reset_timer:
460
461
462
463
464
465
466
467
468
469 if (sk->sk_state == TCP_ESTABLISHED &&
470 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
471 tcp_stream_is_thin(tp) &&
472 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
473 icsk->icsk_backoff = 0;
474 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
475 } else {
476
477 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
478 }
479 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
480 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
481 __sk_dst_reset(sk);
482
483out:;
484}
485
486void tcp_write_timer_handler(struct sock *sk)
487{
488 struct inet_connection_sock *icsk = inet_csk(sk);
489 int event;
490
491 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
492 goto out;
493
494 if (time_after(icsk->icsk_timeout, jiffies)) {
495 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
496 goto out;
497 }
498
499 event = icsk->icsk_pending;
500
501 switch (event) {
502 case ICSK_TIME_EARLY_RETRANS:
503 tcp_resume_early_retransmit(sk);
504 break;
505 case ICSK_TIME_LOSS_PROBE:
506 tcp_send_loss_probe(sk);
507 break;
508 case ICSK_TIME_RETRANS:
509 icsk->icsk_pending = 0;
510 tcp_retransmit_timer(sk);
511 break;
512 case ICSK_TIME_PROBE0:
513 icsk->icsk_pending = 0;
514 tcp_probe_timer(sk);
515 break;
516 }
517
518out:
519 sk_mem_reclaim(sk);
520}
521
522static void tcp_write_timer(unsigned long data)
523{
524 struct sock *sk = (struct sock *)data;
525
526 bh_lock_sock(sk);
527 if (!sock_owned_by_user(sk)) {
528 tcp_write_timer_handler(sk);
529 } else {
530
531 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
532 sock_hold(sk);
533 }
534 bh_unlock_sock(sk);
535 sock_put(sk);
536}
537
538
539
540
541
542static void tcp_synack_timer(struct sock *sk)
543{
544 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
545 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
546}
547
548void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
549{
550 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
551}
552EXPORT_SYMBOL(tcp_syn_ack_timeout);
553
554void tcp_set_keepalive(struct sock *sk, int val)
555{
556 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
557 return;
558
559 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
560 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
561 else if (!val)
562 inet_csk_delete_keepalive_timer(sk);
563}
564
565
566static void tcp_keepalive_timer (unsigned long data)
567{
568 struct sock *sk = (struct sock *) data;
569 struct inet_connection_sock *icsk = inet_csk(sk);
570 struct tcp_sock *tp = tcp_sk(sk);
571 u32 elapsed;
572
573
574 bh_lock_sock(sk);
575 if (sock_owned_by_user(sk)) {
576
577 inet_csk_reset_keepalive_timer (sk, HZ/20);
578 goto out;
579 }
580
581 if (sk->sk_state == TCP_LISTEN) {
582 tcp_synack_timer(sk);
583 goto out;
584 }
585
586 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
587 if (tp->linger2 >= 0) {
588 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
589
590 if (tmo > 0) {
591 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
592 goto out;
593 }
594 }
595 tcp_send_active_reset(sk, GFP_ATOMIC);
596 goto death;
597 }
598
599 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
600 goto out;
601
602 elapsed = keepalive_time_when(tp);
603
604
605 if (tp->packets_out || tcp_send_head(sk))
606 goto resched;
607
608 elapsed = keepalive_time_elapsed(tp);
609
610 if (elapsed >= keepalive_time_when(tp)) {
611
612
613
614 if ((icsk->icsk_user_timeout != 0 &&
615 elapsed >= icsk->icsk_user_timeout &&
616 icsk->icsk_probes_out > 0) ||
617 (icsk->icsk_user_timeout == 0 &&
618 icsk->icsk_probes_out >= keepalive_probes(tp))) {
619 tcp_send_active_reset(sk, GFP_ATOMIC);
620 tcp_write_err(sk);
621 goto out;
622 }
623 if (tcp_write_wakeup(sk) <= 0) {
624 icsk->icsk_probes_out++;
625 elapsed = keepalive_intvl_when(tp);
626 } else {
627
628
629
630 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
631 }
632 } else {
633
634 elapsed = keepalive_time_when(tp) - elapsed;
635 }
636
637 sk_mem_reclaim(sk);
638
639resched:
640 inet_csk_reset_keepalive_timer (sk, elapsed);
641 goto out;
642
643death:
644 tcp_done(sk);
645
646out:
647 bh_unlock_sock(sk);
648 sock_put(sk);
649}
650
651void tcp_init_xmit_timers(struct sock *sk)
652{
653 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
654 &tcp_keepalive_timer);
655}
656EXPORT_SYMBOL(tcp_init_xmit_timers);
657