1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/gfp.h>
23#include <net/tcp.h>
24
25int sysctl_tcp_thin_linear_timeouts __read_mostly;
26
27
28
29
30
31
32
33
34static void tcp_write_err(struct sock *sk)
35{
36 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
37 sk->sk_error_report(sk);
38
39 tcp_done(sk);
40 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
41}
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59static int tcp_out_of_resources(struct sock *sk, bool do_reset)
60{
61 struct tcp_sock *tp = tcp_sk(sk);
62 int shift = 0;
63
64
65
66 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
67 shift++;
68
69
70 if (sk->sk_err_soft)
71 shift++;
72
73 if (tcp_check_oom(sk, shift)) {
74
75
76 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
77
78 (!tp->snd_wnd && !tp->packets_out))
79 do_reset = true;
80 if (do_reset)
81 tcp_send_active_reset(sk, GFP_ATOMIC);
82 tcp_done(sk);
83 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
84 return 1;
85 }
86 return 0;
87}
88
89
90
91
92
93
94static int tcp_orphan_retries(struct sock *sk, bool alive)
95{
96 int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries;
97
98
99 if (sk->sk_err_soft && !alive)
100 retries = 0;
101
102
103
104
105 if (retries == 0 && alive)
106 retries = 8;
107 return retries;
108}
109
110static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
111{
112 struct net *net = sock_net(sk);
113
114
115 if (net->ipv4.sysctl_tcp_mtu_probing) {
116 if (!icsk->icsk_mtup.enabled) {
117 icsk->icsk_mtup.enabled = 1;
118 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
119 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
120 } else {
121 struct net *net = sock_net(sk);
122 struct tcp_sock *tp = tcp_sk(sk);
123 int mss;
124
125 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
126 mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
127 mss = max(mss, 68 - tp->tcp_header_len);
128 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
129 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
130 }
131 }
132}
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151static bool retransmits_timed_out(struct sock *sk,
152 unsigned int boundary,
153 unsigned int timeout,
154 bool syn_set)
155{
156 unsigned int linear_backoff_thresh, start_ts;
157 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
158
159 if (!inet_csk(sk)->icsk_retransmits)
160 return false;
161
162 start_ts = tcp_sk(sk)->retrans_stamp;
163 if (unlikely(!start_ts))
164 start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
165
166 if (likely(timeout == 0)) {
167 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
168
169 if (boundary <= linear_backoff_thresh)
170 timeout = ((2 << boundary) - 1) * rto_base;
171 else
172 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
173 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
174 }
175 return (tcp_time_stamp - start_ts) >= timeout;
176}
177
178
179static int tcp_write_timeout(struct sock *sk)
180{
181 struct inet_connection_sock *icsk = inet_csk(sk);
182 struct tcp_sock *tp = tcp_sk(sk);
183 struct net *net = sock_net(sk);
184 int retry_until;
185 bool do_reset, syn_set = false;
186
187 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
188 if (icsk->icsk_retransmits) {
189 dst_negative_advice(sk);
190 if (tp->syn_fastopen || tp->syn_data)
191 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
192 if (tp->syn_data && icsk->icsk_retransmits == 1)
193 NET_INC_STATS(sock_net(sk),
194 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
195 }
196 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
197 syn_set = true;
198 } else {
199 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) {
200
201
202
203
204
205 if (tp->syn_data_acked &&
206 tp->bytes_acked <= tp->rx_opt.mss_clamp) {
207 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
208 if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
209 NET_INC_STATS(sock_net(sk),
210 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
211 }
212
213 tcp_mtu_probing(icsk, sk);
214
215 dst_negative_advice(sk);
216 }
217
218 retry_until = net->ipv4.sysctl_tcp_retries2;
219 if (sock_flag(sk, SOCK_DEAD)) {
220 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
221
222 retry_until = tcp_orphan_retries(sk, alive);
223 do_reset = alive ||
224 !retransmits_timed_out(sk, retry_until, 0, 0);
225
226 if (tcp_out_of_resources(sk, do_reset))
227 return 1;
228 }
229 }
230
231 if (retransmits_timed_out(sk, retry_until,
232 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
233
234 tcp_write_err(sk);
235 return 1;
236 }
237 return 0;
238}
239
240
241void tcp_delack_timer_handler(struct sock *sk)
242{
243 struct tcp_sock *tp = tcp_sk(sk);
244 struct inet_connection_sock *icsk = inet_csk(sk);
245
246 sk_mem_reclaim_partial(sk);
247
248 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
249 goto out;
250
251 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
252 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
253 goto out;
254 }
255 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
256
257 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
258 struct sk_buff *skb;
259
260 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
261
262 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
263 sk_backlog_rcv(sk, skb);
264
265 tp->ucopy.memory = 0;
266 }
267
268 if (inet_csk_ack_scheduled(sk)) {
269 if (!icsk->icsk_ack.pingpong) {
270
271 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
272 } else {
273
274
275
276 icsk->icsk_ack.pingpong = 0;
277 icsk->icsk_ack.ato = TCP_ATO_MIN;
278 }
279 tcp_send_ack(sk);
280 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
281 }
282
283out:
284 if (tcp_under_memory_pressure(sk))
285 sk_mem_reclaim(sk);
286}
287
288
289
290
291
292
293
294
295
296
297
298static void tcp_delack_timer(unsigned long data)
299{
300 struct sock *sk = (struct sock *)data;
301
302 bh_lock_sock(sk);
303 if (!sock_owned_by_user(sk)) {
304 tcp_delack_timer_handler(sk);
305 } else {
306 inet_csk(sk)->icsk_ack.blocked = 1;
307 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
308
309 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
310 sock_hold(sk);
311 }
312 bh_unlock_sock(sk);
313 sock_put(sk);
314}
315
316static void tcp_probe_timer(struct sock *sk)
317{
318 struct inet_connection_sock *icsk = inet_csk(sk);
319 struct tcp_sock *tp = tcp_sk(sk);
320 int max_probes;
321 u32 start_ts;
322
323 if (tp->packets_out || !tcp_send_head(sk)) {
324 icsk->icsk_probes_out = 0;
325 return;
326 }
327
328
329
330
331
332
333
334
335
336 start_ts = tcp_skb_timestamp(tcp_send_head(sk));
337 if (!start_ts)
338 skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
339 else if (icsk->icsk_user_timeout &&
340 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
341 goto abort;
342
343 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
344 if (sock_flag(sk, SOCK_DEAD)) {
345 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
346
347 max_probes = tcp_orphan_retries(sk, alive);
348 if (!alive && icsk->icsk_backoff >= max_probes)
349 goto abort;
350 if (tcp_out_of_resources(sk, true))
351 return;
352 }
353
354 if (icsk->icsk_probes_out > max_probes) {
355abort: tcp_write_err(sk);
356 } else {
357
358 tcp_send_probe0(sk);
359 }
360}
361
362
363
364
365
366static void tcp_fastopen_synack_timer(struct sock *sk)
367{
368 struct inet_connection_sock *icsk = inet_csk(sk);
369 int max_retries = icsk->icsk_syn_retries ? :
370 sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1;
371 struct request_sock *req;
372
373 req = tcp_sk(sk)->fastopen_rsk;
374 req->rsk_ops->syn_ack_timeout(req);
375
376 if (req->num_timeout >= max_retries) {
377 tcp_write_err(sk);
378 return;
379 }
380
381
382
383
384
385 inet_rtx_syn_ack(sk, req);
386 req->num_timeout++;
387 icsk->icsk_retransmits++;
388 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
389 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
390}
391
392
393
394
395
396
397
398
399
400
401
402
403
404void tcp_retransmit_timer(struct sock *sk)
405{
406 struct tcp_sock *tp = tcp_sk(sk);
407 struct net *net = sock_net(sk);
408 struct inet_connection_sock *icsk = inet_csk(sk);
409
410 if (tp->fastopen_rsk) {
411 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
412 sk->sk_state != TCP_FIN_WAIT1);
413 tcp_fastopen_synack_timer(sk);
414
415
416
417 return;
418 }
419 if (!tp->packets_out)
420 goto out;
421
422 WARN_ON(tcp_write_queue_empty(sk));
423
424 tp->tlp_high_seq = 0;
425
426 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
427 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
428
429
430
431
432
433 struct inet_sock *inet = inet_sk(sk);
434 if (sk->sk_family == AF_INET) {
435 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
436 &inet->inet_daddr,
437 ntohs(inet->inet_dport),
438 inet->inet_num,
439 tp->snd_una, tp->snd_nxt);
440 }
441#if IS_ENABLED(CONFIG_IPV6)
442 else if (sk->sk_family == AF_INET6) {
443 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
444 &sk->sk_v6_daddr,
445 ntohs(inet->inet_dport),
446 inet->inet_num,
447 tp->snd_una, tp->snd_nxt);
448 }
449#endif
450 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
451 tcp_write_err(sk);
452 goto out;
453 }
454 tcp_enter_loss(sk);
455 tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1);
456 __sk_dst_reset(sk);
457 goto out_reset_timer;
458 }
459
460 if (tcp_write_timeout(sk))
461 goto out;
462
463 if (icsk->icsk_retransmits == 0) {
464 int mib_idx;
465
466 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
467 if (tcp_is_sack(tp))
468 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
469 else
470 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
471 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
472 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
473 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
474 tp->sacked_out) {
475 if (tcp_is_sack(tp))
476 mib_idx = LINUX_MIB_TCPSACKFAILURES;
477 else
478 mib_idx = LINUX_MIB_TCPRENOFAILURES;
479 } else {
480 mib_idx = LINUX_MIB_TCPTIMEOUTS;
481 }
482 __NET_INC_STATS(sock_net(sk), mib_idx);
483 }
484
485 tcp_enter_loss(sk);
486
487 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1) > 0) {
488
489
490
491 if (!icsk->icsk_retransmits)
492 icsk->icsk_retransmits = 1;
493 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
494 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
495 TCP_RTO_MAX);
496 goto out;
497 }
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514 icsk->icsk_backoff++;
515 icsk->icsk_retransmits++;
516
517out_reset_timer:
518
519
520
521
522
523
524
525
526
527 if (sk->sk_state == TCP_ESTABLISHED &&
528 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
529 tcp_stream_is_thin(tp) &&
530 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
531 icsk->icsk_backoff = 0;
532 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
533 } else {
534
535 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
536 }
537 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
538 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0))
539 __sk_dst_reset(sk);
540
541out:;
542}
543
544
545
546void tcp_write_timer_handler(struct sock *sk)
547{
548 struct inet_connection_sock *icsk = inet_csk(sk);
549 int event;
550
551 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
552 goto out;
553
554 if (time_after(icsk->icsk_timeout, jiffies)) {
555 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
556 goto out;
557 }
558
559 event = icsk->icsk_pending;
560
561 switch (event) {
562 case ICSK_TIME_EARLY_RETRANS:
563 tcp_resume_early_retransmit(sk);
564 break;
565 case ICSK_TIME_LOSS_PROBE:
566 tcp_send_loss_probe(sk);
567 break;
568 case ICSK_TIME_RETRANS:
569 icsk->icsk_pending = 0;
570 tcp_retransmit_timer(sk);
571 break;
572 case ICSK_TIME_PROBE0:
573 icsk->icsk_pending = 0;
574 tcp_probe_timer(sk);
575 break;
576 }
577
578out:
579 sk_mem_reclaim(sk);
580}
581
582static void tcp_write_timer(unsigned long data)
583{
584 struct sock *sk = (struct sock *)data;
585
586 bh_lock_sock(sk);
587 if (!sock_owned_by_user(sk)) {
588 tcp_write_timer_handler(sk);
589 } else {
590
591 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
592 sock_hold(sk);
593 }
594 bh_unlock_sock(sk);
595 sock_put(sk);
596}
597
598void tcp_syn_ack_timeout(const struct request_sock *req)
599{
600 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
601
602 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
603}
604EXPORT_SYMBOL(tcp_syn_ack_timeout);
605
606void tcp_set_keepalive(struct sock *sk, int val)
607{
608 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
609 return;
610
611 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
612 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
613 else if (!val)
614 inet_csk_delete_keepalive_timer(sk);
615}
616
617
618static void tcp_keepalive_timer (unsigned long data)
619{
620 struct sock *sk = (struct sock *) data;
621 struct inet_connection_sock *icsk = inet_csk(sk);
622 struct tcp_sock *tp = tcp_sk(sk);
623 u32 elapsed;
624
625
626 bh_lock_sock(sk);
627 if (sock_owned_by_user(sk)) {
628
629 inet_csk_reset_keepalive_timer (sk, HZ/20);
630 goto out;
631 }
632
633 if (sk->sk_state == TCP_LISTEN) {
634 pr_err("Hmm... keepalive on a LISTEN ???\n");
635 goto out;
636 }
637
638 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
639 if (tp->linger2 >= 0) {
640 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
641
642 if (tmo > 0) {
643 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
644 goto out;
645 }
646 }
647 tcp_send_active_reset(sk, GFP_ATOMIC);
648 goto death;
649 }
650
651 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
652 goto out;
653
654 elapsed = keepalive_time_when(tp);
655
656
657 if (tp->packets_out || tcp_send_head(sk))
658 goto resched;
659
660 elapsed = keepalive_time_elapsed(tp);
661
662 if (elapsed >= keepalive_time_when(tp)) {
663
664
665
666 if ((icsk->icsk_user_timeout != 0 &&
667 elapsed >= icsk->icsk_user_timeout &&
668 icsk->icsk_probes_out > 0) ||
669 (icsk->icsk_user_timeout == 0 &&
670 icsk->icsk_probes_out >= keepalive_probes(tp))) {
671 tcp_send_active_reset(sk, GFP_ATOMIC);
672 tcp_write_err(sk);
673 goto out;
674 }
675 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
676 icsk->icsk_probes_out++;
677 elapsed = keepalive_intvl_when(tp);
678 } else {
679
680
681
682 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
683 }
684 } else {
685
686 elapsed = keepalive_time_when(tp) - elapsed;
687 }
688
689 sk_mem_reclaim(sk);
690
691resched:
692 inet_csk_reset_keepalive_timer (sk, elapsed);
693 goto out;
694
695death:
696 tcp_done(sk);
697
698out:
699 bh_unlock_sock(sk);
700 sock_put(sk);
701}
702
703void tcp_init_xmit_timers(struct sock *sk)
704{
705 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
706 &tcp_keepalive_timer);
707}
708