1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/gfp.h>
23#include <net/tcp.h>
24
25int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
26int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
27int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
28int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
29int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
30int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
31int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
32int sysctl_tcp_orphan_retries __read_mostly;
33int sysctl_tcp_thin_linear_timeouts __read_mostly;
34
35static void tcp_write_err(struct sock *sk)
36{
37 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
38 sk->sk_error_report(sk);
39
40 tcp_done(sk);
41 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
42}
43
44
45
46
47
48
49
50
51
52
53
54
55static int tcp_out_of_resources(struct sock *sk, int do_reset)
56{
57 struct tcp_sock *tp = tcp_sk(sk);
58 int shift = 0;
59
60
61
62 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
63 shift++;
64
65
66 if (sk->sk_err_soft)
67 shift++;
68
69 if (tcp_check_oom(sk, shift)) {
70
71
72 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
73
74 (!tp->snd_wnd && !tp->packets_out))
75 do_reset = 1;
76 if (do_reset)
77 tcp_send_active_reset(sk, GFP_ATOMIC);
78 tcp_done(sk);
79 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
80 return 1;
81 }
82 return 0;
83}
84
85
86static int tcp_orphan_retries(struct sock *sk, int alive)
87{
88 int retries = sysctl_tcp_orphan_retries;
89
90
91 if (sk->sk_err_soft && !alive)
92 retries = 0;
93
94
95
96
97 if (retries == 0 && alive)
98 retries = 8;
99 return retries;
100}
101
102static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
103{
104
105 if (sysctl_tcp_mtu_probing) {
106 if (!icsk->icsk_mtup.enabled) {
107 icsk->icsk_mtup.enabled = 1;
108 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
109 } else {
110 struct tcp_sock *tp = tcp_sk(sk);
111 int mss;
112
113 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
114 mss = min(sysctl_tcp_base_mss, mss);
115 mss = max(mss, 68 - tp->tcp_header_len);
116 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
117 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
118 }
119 }
120}
121
122
123
124
125
126
127static bool retransmits_timed_out(struct sock *sk,
128 unsigned int boundary,
129 unsigned int timeout,
130 bool syn_set)
131{
132 unsigned int linear_backoff_thresh, start_ts;
133 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
134
135 if (!inet_csk(sk)->icsk_retransmits)
136 return false;
137
138 if (unlikely(!tcp_sk(sk)->retrans_stamp))
139 start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
140 else
141 start_ts = tcp_sk(sk)->retrans_stamp;
142
143 if (likely(timeout == 0)) {
144 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
145
146 if (boundary <= linear_backoff_thresh)
147 timeout = ((2 << boundary) - 1) * rto_base;
148 else
149 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
150 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
151 }
152 return (tcp_time_stamp - start_ts) >= timeout;
153}
154
155
156static int tcp_write_timeout(struct sock *sk)
157{
158 struct inet_connection_sock *icsk = inet_csk(sk);
159 int retry_until;
160 bool do_reset, syn_set = false;
161
162 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
163 if (icsk->icsk_retransmits)
164 dst_negative_advice(sk);
165 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
166 syn_set = true;
167 } else {
168 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
169
170 tcp_mtu_probing(icsk, sk);
171
172 dst_negative_advice(sk);
173 }
174
175 retry_until = sysctl_tcp_retries2;
176 if (sock_flag(sk, SOCK_DEAD)) {
177 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
178
179 retry_until = tcp_orphan_retries(sk, alive);
180 do_reset = alive ||
181 !retransmits_timed_out(sk, retry_until, 0, 0);
182
183 if (tcp_out_of_resources(sk, do_reset))
184 return 1;
185 }
186 }
187
188 if (retransmits_timed_out(sk, retry_until,
189 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
190
191 tcp_write_err(sk);
192 return 1;
193 }
194 return 0;
195}
196
197void tcp_delack_timer_handler(struct sock *sk)
198{
199 struct tcp_sock *tp = tcp_sk(sk);
200 struct inet_connection_sock *icsk = inet_csk(sk);
201
202 sk_mem_reclaim_partial(sk);
203
204 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
205 goto out;
206
207 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
208 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
209 goto out;
210 }
211 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
212
213 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
214 struct sk_buff *skb;
215
216 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
217
218 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
219 sk_backlog_rcv(sk, skb);
220
221 tp->ucopy.memory = 0;
222 }
223
224 if (inet_csk_ack_scheduled(sk)) {
225 if (!icsk->icsk_ack.pingpong) {
226
227 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
228 } else {
229
230
231
232 icsk->icsk_ack.pingpong = 0;
233 icsk->icsk_ack.ato = TCP_ATO_MIN;
234 }
235 tcp_send_ack(sk);
236 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
237 }
238
239out:
240 if (sk_under_memory_pressure(sk))
241 sk_mem_reclaim(sk);
242}
243
244static void tcp_delack_timer(unsigned long data)
245{
246 struct sock *sk = (struct sock *)data;
247
248 bh_lock_sock(sk);
249 if (!sock_owned_by_user(sk)) {
250 tcp_delack_timer_handler(sk);
251 } else {
252 inet_csk(sk)->icsk_ack.blocked = 1;
253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
254
255 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
256 sock_hold(sk);
257 }
258 bh_unlock_sock(sk);
259 sock_put(sk);
260}
261
262static void tcp_probe_timer(struct sock *sk)
263{
264 struct inet_connection_sock *icsk = inet_csk(sk);
265 struct tcp_sock *tp = tcp_sk(sk);
266 int max_probes;
267
268 if (tp->packets_out || !tcp_send_head(sk)) {
269 icsk->icsk_probes_out = 0;
270 return;
271 }
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288 max_probes = sysctl_tcp_retries2;
289
290 if (sock_flag(sk, SOCK_DEAD)) {
291 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
292
293 max_probes = tcp_orphan_retries(sk, alive);
294
295 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
296 return;
297 }
298
299 if (icsk->icsk_probes_out > max_probes) {
300 tcp_write_err(sk);
301 } else {
302
303 tcp_send_probe0(sk);
304 }
305}
306
307
308
309
310
311static void tcp_fastopen_synack_timer(struct sock *sk)
312{
313 struct inet_connection_sock *icsk = inet_csk(sk);
314 int max_retries = icsk->icsk_syn_retries ? :
315 sysctl_tcp_synack_retries + 1;
316 struct request_sock *req;
317
318 req = tcp_sk(sk)->fastopen_rsk;
319 req->rsk_ops->syn_ack_timeout(sk, req);
320
321 if (req->num_timeout >= max_retries) {
322 tcp_write_err(sk);
323 return;
324 }
325
326
327
328
329
330 inet_rtx_syn_ack(sk, req);
331 req->num_timeout++;
332 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
333 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
334}
335
336
337
338
339
340void tcp_retransmit_timer(struct sock *sk)
341{
342 struct tcp_sock *tp = tcp_sk(sk);
343 struct inet_connection_sock *icsk = inet_csk(sk);
344
345 if (tp->early_retrans_delayed) {
346 tcp_resume_early_retransmit(sk);
347 return;
348 }
349 if (tp->fastopen_rsk) {
350 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
351 sk->sk_state != TCP_FIN_WAIT1);
352 tcp_fastopen_synack_timer(sk);
353
354
355
356 return;
357 }
358 if (!tp->packets_out)
359 goto out;
360
361 WARN_ON(tcp_write_queue_empty(sk));
362
363 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
364 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
365
366
367
368
369
370 struct inet_sock *inet = inet_sk(sk);
371 if (sk->sk_family == AF_INET) {
372 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
373 &inet->inet_daddr,
374 ntohs(inet->inet_dport), inet->inet_num,
375 tp->snd_una, tp->snd_nxt);
376 }
377#if IS_ENABLED(CONFIG_IPV6)
378 else if (sk->sk_family == AF_INET6) {
379 struct ipv6_pinfo *np = inet6_sk(sk);
380 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
381 &np->daddr,
382 ntohs(inet->inet_dport), inet->inet_num,
383 tp->snd_una, tp->snd_nxt);
384 }
385#endif
386 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
387 tcp_write_err(sk);
388 goto out;
389 }
390 tcp_enter_loss(sk, 0);
391 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
392 __sk_dst_reset(sk);
393 goto out_reset_timer;
394 }
395
396 if (tcp_write_timeout(sk))
397 goto out;
398
399 if (icsk->icsk_retransmits == 0) {
400 int mib_idx;
401
402 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
403 if (tcp_is_sack(tp))
404 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
405 else
406 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
407 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
408 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
409 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
410 tp->sacked_out) {
411 if (tcp_is_sack(tp))
412 mib_idx = LINUX_MIB_TCPSACKFAILURES;
413 else
414 mib_idx = LINUX_MIB_TCPRENOFAILURES;
415 } else {
416 mib_idx = LINUX_MIB_TCPTIMEOUTS;
417 }
418 NET_INC_STATS_BH(sock_net(sk), mib_idx);
419 }
420
421 if (tcp_use_frto(sk)) {
422 tcp_enter_frto(sk);
423 } else {
424 tcp_enter_loss(sk, 0);
425 }
426
427 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
428
429
430
431 if (!icsk->icsk_retransmits)
432 icsk->icsk_retransmits = 1;
433 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
434 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
435 TCP_RTO_MAX);
436 goto out;
437 }
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454 icsk->icsk_backoff++;
455 icsk->icsk_retransmits++;
456
457out_reset_timer:
458
459
460
461
462
463
464
465
466
467 if (sk->sk_state == TCP_ESTABLISHED &&
468 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
469 tcp_stream_is_thin(tp) &&
470 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
471 icsk->icsk_backoff = 0;
472 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
473 } else {
474
475 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
476 }
477 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
478 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
479 __sk_dst_reset(sk);
480
481out:;
482}
483
484void tcp_write_timer_handler(struct sock *sk)
485{
486 struct inet_connection_sock *icsk = inet_csk(sk);
487 int event;
488
489 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
490 goto out;
491
492 if (time_after(icsk->icsk_timeout, jiffies)) {
493 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
494 goto out;
495 }
496
497 event = icsk->icsk_pending;
498 icsk->icsk_pending = 0;
499
500 switch (event) {
501 case ICSK_TIME_RETRANS:
502 tcp_retransmit_timer(sk);
503 break;
504 case ICSK_TIME_PROBE0:
505 tcp_probe_timer(sk);
506 break;
507 }
508
509out:
510 sk_mem_reclaim(sk);
511}
512
513static void tcp_write_timer(unsigned long data)
514{
515 struct sock *sk = (struct sock *)data;
516
517 bh_lock_sock(sk);
518 if (!sock_owned_by_user(sk)) {
519 tcp_write_timer_handler(sk);
520 } else {
521
522 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
523 sock_hold(sk);
524 }
525 bh_unlock_sock(sk);
526 sock_put(sk);
527}
528
529
530
531
532
533static void tcp_synack_timer(struct sock *sk)
534{
535 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
536 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
537}
538
539void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
540{
541 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
542}
543EXPORT_SYMBOL(tcp_syn_ack_timeout);
544
545void tcp_set_keepalive(struct sock *sk, int val)
546{
547 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
548 return;
549
550 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
551 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
552 else if (!val)
553 inet_csk_delete_keepalive_timer(sk);
554}
555
556
557static void tcp_keepalive_timer (unsigned long data)
558{
559 struct sock *sk = (struct sock *) data;
560 struct inet_connection_sock *icsk = inet_csk(sk);
561 struct tcp_sock *tp = tcp_sk(sk);
562 u32 elapsed;
563
564
565 bh_lock_sock(sk);
566 if (sock_owned_by_user(sk)) {
567
568 inet_csk_reset_keepalive_timer (sk, HZ/20);
569 goto out;
570 }
571
572 if (sk->sk_state == TCP_LISTEN) {
573 tcp_synack_timer(sk);
574 goto out;
575 }
576
577 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
578 if (tp->linger2 >= 0) {
579 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
580
581 if (tmo > 0) {
582 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
583 goto out;
584 }
585 }
586 tcp_send_active_reset(sk, GFP_ATOMIC);
587 goto death;
588 }
589
590 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
591 goto out;
592
593 elapsed = keepalive_time_when(tp);
594
595
596 if (tp->packets_out || tcp_send_head(sk))
597 goto resched;
598
599 elapsed = keepalive_time_elapsed(tp);
600
601 if (elapsed >= keepalive_time_when(tp)) {
602
603
604
605 if ((icsk->icsk_user_timeout != 0 &&
606 elapsed >= icsk->icsk_user_timeout &&
607 icsk->icsk_probes_out > 0) ||
608 (icsk->icsk_user_timeout == 0 &&
609 icsk->icsk_probes_out >= keepalive_probes(tp))) {
610 tcp_send_active_reset(sk, GFP_ATOMIC);
611 tcp_write_err(sk);
612 goto out;
613 }
614 if (tcp_write_wakeup(sk) <= 0) {
615 icsk->icsk_probes_out++;
616 elapsed = keepalive_intvl_when(tp);
617 } else {
618
619
620
621 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
622 }
623 } else {
624
625 elapsed = keepalive_time_when(tp) - elapsed;
626 }
627
628 sk_mem_reclaim(sk);
629
630resched:
631 inet_csk_reset_keepalive_timer (sk, elapsed);
632 goto out;
633
634death:
635 tcp_done(sk);
636
637out:
638 bh_unlock_sock(sk);
639 sock_put(sk);
640}
641
642void tcp_init_xmit_timers(struct sock *sk)
643{
644 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
645 &tcp_keepalive_timer);
646}
647EXPORT_SYMBOL(tcp_init_xmit_timers);
648