1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/sysctl.h>
26#include <linux/workqueue.h>
27#include <linux/static_key.h>
28#include <net/tcp.h>
29#include <net/inet_common.h>
30#include <net/xfrm.h>
31#include <net/busy_poll.h>
32
33static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
34{
35 if (seq == s_win)
36 return true;
37 if (after(end_seq, s_win) && before(seq, e_win))
38 return true;
39 return seq == e_win && seq == end_seq;
40}
41
42static enum tcp_tw_status
43tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
44 const struct sk_buff *skb, int mib_idx)
45{
46 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
47
48 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
49 &tcptw->tw_last_oow_ack_time)) {
50
51
52
53 return TCP_TW_ACK;
54 }
55
56
57 inet_twsk_put(tw);
58 return TCP_TW_SUCCESS;
59}
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91enum tcp_tw_status
92tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93 const struct tcphdr *th)
94{
95 struct tcp_options_received tmp_opt;
96 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
97 bool paws_reject = false;
98
99 tmp_opt.saw_tstamp = 0;
100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
102
103 if (tmp_opt.saw_tstamp) {
104 if (tmp_opt.rcv_tsecr)
105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
106 tmp_opt.ts_recent = tcptw->tw_ts_recent;
107 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
108 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
109 }
110 }
111
112 if (tw->tw_substate == TCP_FIN_WAIT2) {
113
114
115
116 if (paws_reject ||
117 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
118 tcptw->tw_rcv_nxt,
119 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
120 return tcp_timewait_check_oow_rate_limit(
121 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
122
123 if (th->rst)
124 goto kill;
125
126 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
127 return TCP_TW_RST;
128
129
130 if (!th->ack ||
131 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
133 inet_twsk_put(tw);
134 return TCP_TW_SUCCESS;
135 }
136
137
138
139
140 if (!th->fin ||
141 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
142 return TCP_TW_RST;
143
144
145 tw->tw_substate = TCP_TIME_WAIT;
146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
147 if (tmp_opt.saw_tstamp) {
148 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
150 }
151
152 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
153 return TCP_TW_ACK;
154 }
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173 if (!paws_reject &&
174 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
175 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
176
177
178 if (th->rst) {
179
180
181
182
183 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
184kill:
185 inet_twsk_deschedule_put(tw);
186 return TCP_TW_SUCCESS;
187 }
188 } else {
189 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
190 }
191
192 if (tmp_opt.saw_tstamp) {
193 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
194 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
195 }
196
197 inet_twsk_put(tw);
198 return TCP_TW_SUCCESS;
199 }
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218 if (th->syn && !th->rst && !th->ack && !paws_reject &&
219 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
220 (tmp_opt.saw_tstamp &&
221 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
222 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
223 if (isn == 0)
224 isn++;
225 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
226 return TCP_TW_SYN;
227 }
228
229 if (paws_reject)
230 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
231
232 if (!th->rst) {
233
234
235
236
237
238
239 if (paws_reject || th->ack)
240 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
241
242 return tcp_timewait_check_oow_rate_limit(
243 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
244 }
245 inet_twsk_put(tw);
246 return TCP_TW_SUCCESS;
247}
248EXPORT_SYMBOL(tcp_timewait_state_process);
249
250
251
252
253void tcp_time_wait(struct sock *sk, int state, int timeo)
254{
255 const struct inet_connection_sock *icsk = inet_csk(sk);
256 const struct tcp_sock *tp = tcp_sk(sk);
257 struct inet_timewait_sock *tw;
258 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
259
260 tw = inet_twsk_alloc(sk, tcp_death_row, state);
261
262 if (tw) {
263 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
264 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
265 struct inet_sock *inet = inet_sk(sk);
266
267 tw->tw_transparent = inet->transparent;
268 tw->tw_mark = sk->sk_mark;
269 tw->tw_priority = sk->sk_priority;
270 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
271 tcptw->tw_rcv_nxt = tp->rcv_nxt;
272 tcptw->tw_snd_nxt = tp->snd_nxt;
273 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
274 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
275 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
276 tcptw->tw_ts_offset = tp->tsoffset;
277 tcptw->tw_last_oow_ack_time = 0;
278 tcptw->tw_tx_delay = tp->tcp_tx_delay;
279#if IS_ENABLED(CONFIG_IPV6)
280 if (tw->tw_family == PF_INET6) {
281 struct ipv6_pinfo *np = inet6_sk(sk);
282
283 tw->tw_v6_daddr = sk->sk_v6_daddr;
284 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
285 tw->tw_tclass = np->tclass;
286 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
287 tw->tw_txhash = sk->sk_txhash;
288 tw->tw_ipv6only = sk->sk_ipv6only;
289 }
290#endif
291
292#ifdef CONFIG_TCP_MD5SIG
293
294
295
296
297
298
299 do {
300 tcptw->tw_md5_key = NULL;
301 if (static_branch_unlikely(&tcp_md5_needed)) {
302 struct tcp_md5sig_key *key;
303
304 key = tp->af_specific->md5_lookup(sk, sk);
305 if (key) {
306 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
307 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
308 }
309 }
310 } while (0);
311#endif
312
313
314 if (timeo < rto)
315 timeo = rto;
316
317 if (state == TCP_TIME_WAIT)
318 timeo = TCP_TIMEWAIT_LEN;
319
320
321
322
323
324 local_bh_disable();
325 inet_twsk_schedule(tw, timeo);
326
327
328
329 inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
330 local_bh_enable();
331 } else {
332
333
334
335
336 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
337 }
338
339 tcp_update_metrics(sk);
340 tcp_done(sk);
341}
342EXPORT_SYMBOL(tcp_time_wait);
343
344void tcp_twsk_destructor(struct sock *sk)
345{
346#ifdef CONFIG_TCP_MD5SIG
347 if (static_branch_unlikely(&tcp_md5_needed)) {
348 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
349
350 if (twsk->tw_md5_key)
351 kfree_rcu(twsk->tw_md5_key, rcu);
352 }
353#endif
354}
355EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
356
357
358
359
360void tcp_openreq_init_rwin(struct request_sock *req,
361 const struct sock *sk_listener,
362 const struct dst_entry *dst)
363{
364 struct inet_request_sock *ireq = inet_rsk(req);
365 const struct tcp_sock *tp = tcp_sk(sk_listener);
366 int full_space = tcp_full_space(sk_listener);
367 u32 window_clamp;
368 __u8 rcv_wscale;
369 u32 rcv_wnd;
370 int mss;
371
372 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
373 window_clamp = READ_ONCE(tp->window_clamp);
374
375 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
376
377
378 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
379 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
380 req->rsk_window_clamp = full_space;
381
382 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
383 if (rcv_wnd == 0)
384 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
385 else if (full_space < rcv_wnd * mss)
386 full_space = rcv_wnd * mss;
387
388
389 tcp_select_initial_window(sk_listener, full_space,
390 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
391 &req->rsk_rcv_wnd,
392 &req->rsk_window_clamp,
393 ireq->wscale_ok,
394 &rcv_wscale,
395 rcv_wnd);
396 ireq->rcv_wscale = rcv_wscale;
397}
398EXPORT_SYMBOL(tcp_openreq_init_rwin);
399
400static void tcp_ecn_openreq_child(struct tcp_sock *tp,
401 const struct request_sock *req)
402{
403 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
404}
405
406void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
407{
408 struct inet_connection_sock *icsk = inet_csk(sk);
409 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
410 bool ca_got_dst = false;
411
412 if (ca_key != TCP_CA_UNSPEC) {
413 const struct tcp_congestion_ops *ca;
414
415 rcu_read_lock();
416 ca = tcp_ca_find_key(ca_key);
417 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
418 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
419 icsk->icsk_ca_ops = ca;
420 ca_got_dst = true;
421 }
422 rcu_read_unlock();
423 }
424
425
426 if (!ca_got_dst &&
427 (!icsk->icsk_ca_setsockopt ||
428 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
429 tcp_assign_congestion_control(sk);
430
431 tcp_set_ca_state(sk, TCP_CA_Open);
432}
433EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
434
435static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
436 struct request_sock *req,
437 struct tcp_sock *newtp)
438{
439#if IS_ENABLED(CONFIG_SMC)
440 struct inet_request_sock *ireq;
441
442 if (static_branch_unlikely(&tcp_have_smc)) {
443 ireq = inet_rsk(req);
444 if (oldtp->syn_smc && !ireq->smc_ok)
445 newtp->syn_smc = 0;
446 }
447#endif
448}
449
450
451
452
453
454
455
456struct sock *tcp_create_openreq_child(const struct sock *sk,
457 struct request_sock *req,
458 struct sk_buff *skb)
459{
460 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
461 const struct inet_request_sock *ireq = inet_rsk(req);
462 struct tcp_request_sock *treq = tcp_rsk(req);
463 struct inet_connection_sock *newicsk;
464 struct tcp_sock *oldtp, *newtp;
465 u32 seq;
466
467 if (!newsk)
468 return NULL;
469
470 newicsk = inet_csk(newsk);
471 newtp = tcp_sk(newsk);
472 oldtp = tcp_sk(sk);
473
474 smc_check_reset_syn_req(oldtp, req, newtp);
475
476
477 newtp->pred_flags = 0;
478
479 seq = treq->rcv_isn + 1;
480 newtp->rcv_wup = seq;
481 WRITE_ONCE(newtp->copied_seq, seq);
482 WRITE_ONCE(newtp->rcv_nxt, seq);
483 newtp->segs_in = 1;
484
485 seq = treq->snt_isn + 1;
486 newtp->snd_sml = newtp->snd_una = seq;
487 WRITE_ONCE(newtp->snd_nxt, seq);
488 newtp->snd_up = seq;
489
490 INIT_LIST_HEAD(&newtp->tsq_node);
491 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
492
493 tcp_init_wl(newtp, treq->rcv_isn);
494
495 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
496 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
497
498 newtp->lsndtime = tcp_jiffies32;
499 newsk->sk_txhash = treq->txhash;
500 newtp->total_retrans = req->num_retrans;
501
502 tcp_init_xmit_timers(newsk);
503 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
504
505 if (sock_flag(newsk, SOCK_KEEPOPEN))
506 inet_csk_reset_keepalive_timer(newsk,
507 keepalive_time_when(newtp));
508
509 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
510 newtp->rx_opt.sack_ok = ireq->sack_ok;
511 newtp->window_clamp = req->rsk_window_clamp;
512 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
513 newtp->rcv_wnd = req->rsk_rcv_wnd;
514 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
515 if (newtp->rx_opt.wscale_ok) {
516 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
517 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
518 } else {
519 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
520 newtp->window_clamp = min(newtp->window_clamp, 65535U);
521 }
522 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
523 newtp->max_window = newtp->snd_wnd;
524
525 if (newtp->rx_opt.tstamp_ok) {
526 newtp->rx_opt.ts_recent = req->ts_recent;
527 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
528 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
529 } else {
530 newtp->rx_opt.ts_recent_stamp = 0;
531 newtp->tcp_header_len = sizeof(struct tcphdr);
532 }
533 if (req->num_timeout) {
534 newtp->undo_marker = treq->snt_isn;
535 newtp->retrans_stamp = div_u64(treq->snt_synack,
536 USEC_PER_SEC / TCP_TS_HZ);
537 }
538 newtp->tsoffset = treq->ts_off;
539#ifdef CONFIG_TCP_MD5SIG
540 newtp->md5sig_info = NULL;
541 if (newtp->af_specific->md5_lookup(sk, newsk))
542 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
543#endif
544 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
545 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
546 newtp->rx_opt.mss_clamp = req->mss;
547 tcp_ecn_openreq_child(newtp, req);
548 newtp->fastopen_req = NULL;
549 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
550
551 tcp_bpf_clone(sk, newsk);
552
553 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
554
555 return newsk;
556}
557EXPORT_SYMBOL(tcp_create_openreq_child);
558
559
560
561
562
563
564
565
566
567
568
569
570struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
571 struct request_sock *req,
572 bool fastopen, bool *req_stolen)
573{
574 struct tcp_options_received tmp_opt;
575 struct sock *child;
576 const struct tcphdr *th = tcp_hdr(skb);
577 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
578 bool paws_reject = false;
579 bool own_req;
580
581 tmp_opt.saw_tstamp = 0;
582 if (th->doff > (sizeof(struct tcphdr)>>2)) {
583 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
584
585 if (tmp_opt.saw_tstamp) {
586 tmp_opt.ts_recent = req->ts_recent;
587 if (tmp_opt.rcv_tsecr)
588 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
589
590
591
592
593 tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
594 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
595 }
596 }
597
598
599 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
600 flg == TCP_FLAG_SYN &&
601 !paws_reject) {
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625 if (!tcp_oow_rate_limited(sock_net(sk), skb,
626 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
627 &tcp_rsk(req)->last_oow_ack_time) &&
628
629 !inet_rtx_syn_ack(sk, req)) {
630 unsigned long expires = jiffies;
631
632 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
633 TCP_RTO_MAX);
634 if (!fastopen)
635 mod_timer_pending(&req->rsk_timer, expires);
636 else
637 req->rsk_timer.expires = expires;
638 }
639 return NULL;
640 }
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699 if ((flg & TCP_FLAG_ACK) && !fastopen &&
700 (TCP_SKB_CB(skb)->ack_seq !=
701 tcp_rsk(req)->snt_isn + 1))
702 return sk;
703
704
705
706
707
708
709
710
711 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
712 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
713
714 if (!(flg & TCP_FLAG_RST) &&
715 !tcp_oow_rate_limited(sock_net(sk), skb,
716 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
717 &tcp_rsk(req)->last_oow_ack_time))
718 req->rsk_ops->send_ack(sk, skb, req);
719 if (paws_reject)
720 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
721 return NULL;
722 }
723
724
725
726 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
727 req->ts_recent = tmp_opt.rcv_tsval;
728
729 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
730
731
732 flg &= ~TCP_FLAG_SYN;
733 }
734
735
736
737
738 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
739 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
740 goto embryonic_reset;
741 }
742
743
744
745
746
747
748
749 if (!(flg & TCP_FLAG_ACK))
750 return NULL;
751
752
753
754
755 if (fastopen)
756 return sk;
757
758
759 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
760 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
761 inet_rsk(req)->acked = 1;
762 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
763 return NULL;
764 }
765
766
767
768
769
770
771
772 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
773 req, &own_req);
774 if (!child)
775 goto listen_overflow;
776
777 if (own_req && rsk_drop_req(req)) {
778 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
779 inet_csk_reqsk_queue_drop_and_put(sk, req);
780 return child;
781 }
782
783 sock_rps_save_rxhash(child, skb);
784 tcp_synack_rtt_meas(child, req);
785 *req_stolen = !own_req;
786 return inet_csk_complete_hashdance(sk, child, req, own_req);
787
788listen_overflow:
789 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
790 inet_rsk(req)->acked = 1;
791 return NULL;
792 }
793
794embryonic_reset:
795 if (!(flg & TCP_FLAG_RST)) {
796
797
798
799
800
801 req->rsk_ops->send_reset(sk, skb);
802 } else if (fastopen) {
803 reqsk_fastopen_remove(sk, req, true);
804 tcp_reset(sk);
805 }
806 if (!fastopen) {
807 inet_csk_reqsk_queue_drop(sk, req);
808 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
809 }
810 return NULL;
811}
812EXPORT_SYMBOL(tcp_check_req);
813
814
815
816
817
818
819
820
821
822
823
824
825
826int tcp_child_process(struct sock *parent, struct sock *child,
827 struct sk_buff *skb)
828 __releases(&((child)->sk_lock.slock))
829{
830 int ret = 0;
831 int state = child->sk_state;
832
833
834 sk_mark_napi_id(child, skb);
835
836 tcp_segs_in(tcp_sk(child), skb);
837 if (!sock_owned_by_user(child)) {
838 ret = tcp_rcv_state_process(child, skb);
839
840 if (state == TCP_SYN_RECV && child->sk_state != state)
841 parent->sk_data_ready(parent);
842 } else {
843
844
845
846
847 __sk_add_backlog(child, skb);
848 }
849
850 bh_unlock_sock(child);
851 sock_put(child);
852 return ret;
853}
854EXPORT_SYMBOL(tcp_child_process);
855