1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/sysctl.h>
25#include <linux/workqueue.h>
26#include <net/tcp.h>
27#include <net/inet_common.h>
28#include <net/xfrm.h>
29
30int sysctl_tcp_abort_on_overflow __read_mostly;
31
32struct inet_timewait_death_row tcp_death_row = {
33 .sysctl_max_tw_buckets = NR_FILE * 2,
34 .hashinfo = &tcp_hashinfo,
35};
36EXPORT_SYMBOL_GPL(tcp_death_row);
37
38static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
39{
40 if (seq == s_win)
41 return true;
42 if (after(end_seq, s_win) && before(seq, e_win))
43 return true;
44 return seq == e_win && seq == end_seq;
45}
46
47static enum tcp_tw_status
48tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
49 const struct sk_buff *skb, int mib_idx)
50{
51 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
52
53 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
54 &tcptw->tw_last_oow_ack_time)) {
55
56
57
58 return TCP_TW_ACK;
59 }
60
61
62 inet_twsk_put(tw);
63 return TCP_TW_SUCCESS;
64}
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96enum tcp_tw_status
97tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
98 const struct tcphdr *th)
99{
100 struct tcp_options_received tmp_opt;
101 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102 bool paws_reject = false;
103
104 tmp_opt.saw_tstamp = 0;
105 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
106 tcp_parse_options(skb, &tmp_opt, 0, NULL);
107
108 if (tmp_opt.saw_tstamp) {
109 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
110 tmp_opt.ts_recent = tcptw->tw_ts_recent;
111 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
112 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
113 }
114 }
115
116 if (tw->tw_substate == TCP_FIN_WAIT2) {
117
118
119
120 if (paws_reject ||
121 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
122 tcptw->tw_rcv_nxt,
123 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
124 return tcp_timewait_check_oow_rate_limit(
125 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
126
127 if (th->rst)
128 goto kill;
129
130 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
131 return TCP_TW_RST;
132
133
134 if (!th->ack ||
135 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
136 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
137 inet_twsk_put(tw);
138 return TCP_TW_SUCCESS;
139 }
140
141
142
143
144 if (!th->fin ||
145 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
146 return TCP_TW_RST;
147
148
149 tw->tw_substate = TCP_TIME_WAIT;
150 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
151 if (tmp_opt.saw_tstamp) {
152 tcptw->tw_ts_recent_stamp = get_seconds();
153 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
154 }
155
156 if (tcp_death_row.sysctl_tw_recycle &&
157 tcptw->tw_ts_recent_stamp &&
158 tcp_tw_remember_stamp(tw))
159 inet_twsk_reschedule(tw, tw->tw_timeout);
160 else
161 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
162 return TCP_TW_ACK;
163 }
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182 if (!paws_reject &&
183 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
184 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
185
186
187 if (th->rst) {
188
189
190
191
192 if (sysctl_tcp_rfc1337 == 0) {
193kill:
194 inet_twsk_deschedule_put(tw);
195 return TCP_TW_SUCCESS;
196 }
197 }
198 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
199
200 if (tmp_opt.saw_tstamp) {
201 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
202 tcptw->tw_ts_recent_stamp = get_seconds();
203 }
204
205 inet_twsk_put(tw);
206 return TCP_TW_SUCCESS;
207 }
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226 if (th->syn && !th->rst && !th->ack && !paws_reject &&
227 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
228 (tmp_opt.saw_tstamp &&
229 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
230 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
231 if (isn == 0)
232 isn++;
233 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
234 return TCP_TW_SYN;
235 }
236
237 if (paws_reject)
238 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
239
240 if (!th->rst) {
241
242
243
244
245
246
247 if (paws_reject || th->ack)
248 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
249
250 return tcp_timewait_check_oow_rate_limit(
251 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
252 }
253 inet_twsk_put(tw);
254 return TCP_TW_SUCCESS;
255}
256EXPORT_SYMBOL(tcp_timewait_state_process);
257
258
259
260
261void tcp_time_wait(struct sock *sk, int state, int timeo)
262{
263 const struct inet_connection_sock *icsk = inet_csk(sk);
264 const struct tcp_sock *tp = tcp_sk(sk);
265 struct inet_timewait_sock *tw;
266 bool recycle_ok = false;
267
268 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
269 recycle_ok = tcp_remember_stamp(sk);
270
271 tw = inet_twsk_alloc(sk, &tcp_death_row, state);
272
273 if (tw) {
274 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
275 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
276 struct inet_sock *inet = inet_sk(sk);
277
278 tw->tw_transparent = inet->transparent;
279 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
280 tcptw->tw_rcv_nxt = tp->rcv_nxt;
281 tcptw->tw_snd_nxt = tp->snd_nxt;
282 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
283 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
284 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
285 tcptw->tw_ts_offset = tp->tsoffset;
286 tcptw->tw_last_oow_ack_time = 0;
287
288#if IS_ENABLED(CONFIG_IPV6)
289 if (tw->tw_family == PF_INET6) {
290 struct ipv6_pinfo *np = inet6_sk(sk);
291
292 tw->tw_v6_daddr = sk->sk_v6_daddr;
293 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
294 tw->tw_tclass = np->tclass;
295 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
296 tw->tw_ipv6only = sk->sk_ipv6only;
297 }
298#endif
299
300#ifdef CONFIG_TCP_MD5SIG
301
302
303
304
305
306
307 do {
308 struct tcp_md5sig_key *key;
309 tcptw->tw_md5_key = NULL;
310 key = tp->af_specific->md5_lookup(sk, sk);
311 if (key) {
312 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
313 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
314 BUG();
315 }
316 } while (0);
317#endif
318
319
320 if (timeo < rto)
321 timeo = rto;
322
323 if (recycle_ok) {
324 tw->tw_timeout = rto;
325 } else {
326 tw->tw_timeout = TCP_TIMEWAIT_LEN;
327 if (state == TCP_TIME_WAIT)
328 timeo = TCP_TIMEWAIT_LEN;
329 }
330
331 inet_twsk_schedule(tw, timeo);
332
333 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
334 inet_twsk_put(tw);
335 } else {
336
337
338
339
340 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
341 }
342
343 tcp_update_metrics(sk);
344 tcp_done(sk);
345}
346
347void tcp_twsk_destructor(struct sock *sk)
348{
349#ifdef CONFIG_TCP_MD5SIG
350 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
351
352 if (twsk->tw_md5_key)
353 kfree_rcu(twsk->tw_md5_key, rcu);
354#endif
355}
356EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
357
358
359
360
361void tcp_openreq_init_rwin(struct request_sock *req,
362 const struct sock *sk_listener,
363 const struct dst_entry *dst)
364{
365 struct inet_request_sock *ireq = inet_rsk(req);
366 const struct tcp_sock *tp = tcp_sk(sk_listener);
367 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
368 int full_space = tcp_full_space(sk_listener);
369 int mss = dst_metric_advmss(dst);
370 u32 window_clamp;
371 __u8 rcv_wscale;
372
373 if (user_mss && user_mss < mss)
374 mss = user_mss;
375
376 window_clamp = READ_ONCE(tp->window_clamp);
377
378 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
379
380
381 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
382 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
383 req->rsk_window_clamp = full_space;
384
385
386 tcp_select_initial_window(full_space,
387 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
388 &req->rsk_rcv_wnd,
389 &req->rsk_window_clamp,
390 ireq->wscale_ok,
391 &rcv_wscale,
392 dst_metric(dst, RTAX_INITRWND));
393 ireq->rcv_wscale = rcv_wscale;
394}
395EXPORT_SYMBOL(tcp_openreq_init_rwin);
396
397static void tcp_ecn_openreq_child(struct tcp_sock *tp,
398 const struct request_sock *req)
399{
400 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
401}
402
403void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
404{
405 struct inet_connection_sock *icsk = inet_csk(sk);
406 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
407 bool ca_got_dst = false;
408
409 if (ca_key != TCP_CA_UNSPEC) {
410 const struct tcp_congestion_ops *ca;
411
412 rcu_read_lock();
413 ca = tcp_ca_find_key(ca_key);
414 if (likely(ca && try_module_get(ca->owner))) {
415 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
416 icsk->icsk_ca_ops = ca;
417 ca_got_dst = true;
418 }
419 rcu_read_unlock();
420 }
421
422
423 if (!ca_got_dst &&
424 (!icsk->icsk_ca_setsockopt ||
425 !try_module_get(icsk->icsk_ca_ops->owner)))
426 tcp_assign_congestion_control(sk);
427
428 tcp_set_ca_state(sk, TCP_CA_Open);
429}
430EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
431
432
433
434
435
436
437
438struct sock *tcp_create_openreq_child(const struct sock *sk,
439 struct request_sock *req,
440 struct sk_buff *skb)
441{
442 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
443
444 if (newsk) {
445 const struct inet_request_sock *ireq = inet_rsk(req);
446 struct tcp_request_sock *treq = tcp_rsk(req);
447 struct inet_connection_sock *newicsk = inet_csk(newsk);
448 struct tcp_sock *newtp = tcp_sk(newsk);
449
450
451 newtp->pred_flags = 0;
452
453 newtp->rcv_wup = newtp->copied_seq =
454 newtp->rcv_nxt = treq->rcv_isn + 1;
455 newtp->segs_in = 1;
456
457 newtp->snd_sml = newtp->snd_una =
458 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
459
460 tcp_prequeue_init(newtp);
461 INIT_LIST_HEAD(&newtp->tsq_node);
462
463 tcp_init_wl(newtp, treq->rcv_isn);
464
465 newtp->srtt_us = 0;
466 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
467 newtp->rtt_min[0].rtt = ~0U;
468 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
469
470 newtp->packets_out = 0;
471 newtp->retrans_out = 0;
472 newtp->sacked_out = 0;
473 newtp->fackets_out = 0;
474 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
475 tcp_enable_early_retrans(newtp);
476 newtp->tlp_high_seq = 0;
477 newtp->lsndtime = treq->snt_synack.stamp_jiffies;
478 newsk->sk_txhash = treq->txhash;
479 newtp->last_oow_ack_time = 0;
480 newtp->total_retrans = req->num_retrans;
481
482
483
484
485
486
487 newtp->snd_cwnd = TCP_INIT_CWND;
488 newtp->snd_cwnd_cnt = 0;
489
490 tcp_init_xmit_timers(newsk);
491 __skb_queue_head_init(&newtp->out_of_order_queue);
492 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
493
494 newtp->rx_opt.saw_tstamp = 0;
495
496 newtp->rx_opt.dsack = 0;
497 newtp->rx_opt.num_sacks = 0;
498
499 newtp->urg_data = 0;
500
501 if (sock_flag(newsk, SOCK_KEEPOPEN))
502 inet_csk_reset_keepalive_timer(newsk,
503 keepalive_time_when(newtp));
504
505 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
506 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
507 if (sysctl_tcp_fack)
508 tcp_enable_fack(newtp);
509 }
510 newtp->window_clamp = req->rsk_window_clamp;
511 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
512 newtp->rcv_wnd = req->rsk_rcv_wnd;
513 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
514 if (newtp->rx_opt.wscale_ok) {
515 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
516 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
517 } else {
518 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
519 newtp->window_clamp = min(newtp->window_clamp, 65535U);
520 }
521 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
522 newtp->rx_opt.snd_wscale);
523 newtp->max_window = newtp->snd_wnd;
524
525 if (newtp->rx_opt.tstamp_ok) {
526 newtp->rx_opt.ts_recent = req->ts_recent;
527 newtp->rx_opt.ts_recent_stamp = get_seconds();
528 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
529 } else {
530 newtp->rx_opt.ts_recent_stamp = 0;
531 newtp->tcp_header_len = sizeof(struct tcphdr);
532 }
533 newtp->tsoffset = 0;
534#ifdef CONFIG_TCP_MD5SIG
535 newtp->md5sig_info = NULL;
536 if (newtp->af_specific->md5_lookup(sk, newsk))
537 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
538#endif
539 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
540 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
541 newtp->rx_opt.mss_clamp = req->mss;
542 tcp_ecn_openreq_child(newtp, req);
543 newtp->fastopen_rsk = NULL;
544 newtp->syn_data_acked = 0;
545 newtp->rack.mstamp.v64 = 0;
546 newtp->rack.advanced = 0;
547
548 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
549 }
550 return newsk;
551}
552EXPORT_SYMBOL(tcp_create_openreq_child);
553
554
555
556
557
558
559
560
561
562
563
564
565struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
566 struct request_sock *req,
567 bool fastopen)
568{
569 struct tcp_options_received tmp_opt;
570 struct sock *child;
571 const struct tcphdr *th = tcp_hdr(skb);
572 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
573 bool paws_reject = false;
574 bool own_req;
575
576 tmp_opt.saw_tstamp = 0;
577 if (th->doff > (sizeof(struct tcphdr)>>2)) {
578 tcp_parse_options(skb, &tmp_opt, 0, NULL);
579
580 if (tmp_opt.saw_tstamp) {
581 tmp_opt.ts_recent = req->ts_recent;
582
583
584
585
586 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
587 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
588 }
589 }
590
591
592 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
593 flg == TCP_FLAG_SYN &&
594 !paws_reject) {
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618 if (!tcp_oow_rate_limited(sock_net(sk), skb,
619 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
620 &tcp_rsk(req)->last_oow_ack_time) &&
621
622 !inet_rtx_syn_ack(sk, req)) {
623 unsigned long expires = jiffies;
624
625 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
626 TCP_RTO_MAX);
627 if (!fastopen)
628 mod_timer_pending(&req->rsk_timer, expires);
629 else
630 req->rsk_timer.expires = expires;
631 }
632 return NULL;
633 }
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692 if ((flg & TCP_FLAG_ACK) && !fastopen &&
693 (TCP_SKB_CB(skb)->ack_seq !=
694 tcp_rsk(req)->snt_isn + 1))
695 return sk;
696
697
698
699
700
701
702
703
704 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
705 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
706
707 if (!(flg & TCP_FLAG_RST))
708 req->rsk_ops->send_ack(sk, skb, req);
709 if (paws_reject)
710 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
711 return NULL;
712 }
713
714
715
716 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
717 req->ts_recent = tmp_opt.rcv_tsval;
718
719 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
720
721
722 flg &= ~TCP_FLAG_SYN;
723 }
724
725
726
727
728 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
729 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
730 goto embryonic_reset;
731 }
732
733
734
735
736
737
738
739 if (!(flg & TCP_FLAG_ACK))
740 return NULL;
741
742
743
744
745 if (fastopen)
746 return sk;
747
748
749 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
750 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
751 inet_rsk(req)->acked = 1;
752 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
753 return NULL;
754 }
755
756
757
758
759
760
761
762 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
763 req, &own_req);
764 if (!child)
765 goto listen_overflow;
766
767 sock_rps_save_rxhash(child, skb);
768 tcp_synack_rtt_meas(child, req);
769 return inet_csk_complete_hashdance(sk, child, req, own_req);
770
771listen_overflow:
772 if (!sysctl_tcp_abort_on_overflow) {
773 inet_rsk(req)->acked = 1;
774 return NULL;
775 }
776
777embryonic_reset:
778 if (!(flg & TCP_FLAG_RST)) {
779
780
781
782
783
784 req->rsk_ops->send_reset(sk, skb);
785 } else if (fastopen) {
786 reqsk_fastopen_remove(sk, req, true);
787 tcp_reset(sk);
788 }
789 if (!fastopen) {
790 inet_csk_reqsk_queue_drop(sk, req);
791 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
792 }
793 return NULL;
794}
795EXPORT_SYMBOL(tcp_check_req);
796
797
798
799
800
801
802
803
804
805
806
807
808
809int tcp_child_process(struct sock *parent, struct sock *child,
810 struct sk_buff *skb)
811{
812 int ret = 0;
813 int state = child->sk_state;
814
815 tcp_segs_in(tcp_sk(child), skb);
816 if (!sock_owned_by_user(child)) {
817 ret = tcp_rcv_state_process(child, skb);
818
819 if (state == TCP_SYN_RECV && child->sk_state != state)
820 parent->sk_data_ready(parent);
821 } else {
822
823
824
825
826 __sk_add_backlog(child, skb);
827 }
828
829 bh_unlock_sock(child);
830 sock_put(child);
831 return ret;
832}
833EXPORT_SYMBOL(tcp_child_process);
834