1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/bottom_half.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41#include <linux/slab.h>
42#include <linux/uaccess.h>
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
49#include <net/inet6_hashtables.h>
50#include <net/inet6_connection_sock.h>
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/snmp.h>
60#include <net/dsfield.h>
61#include <net/timewait_sock.h>
62#include <net/inet_common.h>
63#include <net/secure_seq.h>
64#include <net/tcp_memcontrol.h>
65#include <net/busy_poll.h>
66
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
70#include <linux/crypto.h>
71#include <linux/scatterlist.h>
72
73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79static const struct inet_connection_sock_af_ops ipv6_mapped;
80static const struct inet_connection_sock_af_ops ipv6_specific;
81#ifdef CONFIG_TCP_MD5SIG
82static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84#else
85static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
87{
88 return NULL;
89}
90#endif
91
92static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93{
94 struct dst_entry *dst = skb_dst(skb);
95
96 if (dst) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 if (rt->rt6i_node)
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 }
105}
106
107static void tcp_v6_hash(struct sock *sk)
108{
109 if (sk->sk_state != TCP_CLOSE) {
110 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
111 tcp_prot.hash(sk);
112 return;
113 }
114 local_bh_disable();
115 __inet6_hash(sk, NULL);
116 local_bh_enable();
117 }
118}
119
120static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
121{
122 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 ipv6_hdr(skb)->saddr.s6_addr32,
124 tcp_hdr(skb)->dest,
125 tcp_hdr(skb)->source);
126}
127
128static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 int addr_len)
130{
131 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132 struct inet_sock *inet = inet_sk(sk);
133 struct inet_connection_sock *icsk = inet_csk(sk);
134 struct ipv6_pinfo *np = inet6_sk(sk);
135 struct tcp_sock *tp = tcp_sk(sk);
136 struct in6_addr *saddr = NULL, *final_p, final;
137 struct rt6_info *rt;
138 struct flowi6 fl6;
139 struct dst_entry *dst;
140 int addr_type;
141 int err;
142
143 if (addr_len < SIN6_LEN_RFC2133)
144 return -EINVAL;
145
146 if (usin->sin6_family != AF_INET6)
147 return -EAFNOSUPPORT;
148
149 memset(&fl6, 0, sizeof(fl6));
150
151 if (np->sndflow) {
152 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 IP6_ECN_flow_init(fl6.flowlabel);
154 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155 struct ip6_flowlabel *flowlabel;
156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 if (flowlabel == NULL)
158 return -EINVAL;
159 fl6_sock_release(flowlabel);
160 }
161 }
162
163
164
165
166
167 if (ipv6_addr_any(&usin->sin6_addr))
168 usin->sin6_addr.s6_addr[15] = 0x1;
169
170 addr_type = ipv6_addr_type(&usin->sin6_addr);
171
172 if (addr_type & IPV6_ADDR_MULTICAST)
173 return -ENETUNREACH;
174
175 if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 if (addr_len >= sizeof(struct sockaddr_in6) &&
177 usin->sin6_scope_id) {
178
179
180
181 if (sk->sk_bound_dev_if &&
182 sk->sk_bound_dev_if != usin->sin6_scope_id)
183 return -EINVAL;
184
185 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 }
187
188
189 if (!sk->sk_bound_dev_if)
190 return -EINVAL;
191 }
192
193 if (tp->rx_opt.ts_recent_stamp &&
194 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
197 tp->write_seq = 0;
198 }
199
200 sk->sk_v6_daddr = usin->sin6_addr;
201 np->flow_label = fl6.flowlabel;
202
203
204
205
206
207 if (addr_type == IPV6_ADDR_MAPPED) {
208 u32 exthdrlen = icsk->icsk_ext_hdr_len;
209 struct sockaddr_in sin;
210
211 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212
213 if (__ipv6_only_sock(sk))
214 return -ENETUNREACH;
215
216 sin.sin_family = AF_INET;
217 sin.sin_port = usin->sin6_port;
218 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219
220 icsk->icsk_af_ops = &ipv6_mapped;
221 sk->sk_backlog_rcv = tcp_v4_do_rcv;
222#ifdef CONFIG_TCP_MD5SIG
223 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224#endif
225
226 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227
228 if (err) {
229 icsk->icsk_ext_hdr_len = exthdrlen;
230 icsk->icsk_af_ops = &ipv6_specific;
231 sk->sk_backlog_rcv = tcp_v6_do_rcv;
232#ifdef CONFIG_TCP_MD5SIG
233 tp->af_specific = &tcp_sock_ipv6_specific;
234#endif
235 goto failure;
236 } else {
237 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
238 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
239 &sk->sk_v6_rcv_saddr);
240 }
241
242 return err;
243 }
244
245 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
246 saddr = &sk->sk_v6_rcv_saddr;
247
248 fl6.flowi6_proto = IPPROTO_TCP;
249 fl6.daddr = sk->sk_v6_daddr;
250 fl6.saddr = saddr ? *saddr : np->saddr;
251 fl6.flowi6_oif = sk->sk_bound_dev_if;
252 fl6.flowi6_mark = sk->sk_mark;
253 fl6.fl6_dport = usin->sin6_port;
254 fl6.fl6_sport = inet->inet_sport;
255
256 final_p = fl6_update_dst(&fl6, np->opt, &final);
257
258 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259
260 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
261 if (IS_ERR(dst)) {
262 err = PTR_ERR(dst);
263 goto failure;
264 }
265
266 if (saddr == NULL) {
267 saddr = &fl6.saddr;
268 sk->sk_v6_rcv_saddr = *saddr;
269 }
270
271
272 np->saddr = *saddr;
273 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274
275 sk->sk_gso_type = SKB_GSO_TCPV6;
276 __ip6_dst_store(sk, dst, NULL, NULL);
277
278 rt = (struct rt6_info *) dst;
279 if (tcp_death_row.sysctl_tw_recycle &&
280 !tp->rx_opt.ts_recent_stamp &&
281 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
282 tcp_fetch_timewait_stamp(sk, dst);
283
284 icsk->icsk_ext_hdr_len = 0;
285 if (np->opt)
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 np->opt->opt_nflen);
288
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290
291 inet->inet_dport = usin->sin6_port;
292
293 tcp_set_state(sk, TCP_SYN_SENT);
294 err = inet6_hash_connect(&tcp_death_row, sk);
295 if (err)
296 goto late_failure;
297
298 ip6_set_txhash(sk);
299
300 if (!tp->write_seq && likely(!tp->repair))
301 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
302 sk->sk_v6_daddr.s6_addr32,
303 inet->inet_sport,
304 inet->inet_dport);
305
306 err = tcp_connect(sk);
307 if (err)
308 goto late_failure;
309
310 return 0;
311
312late_failure:
313 tcp_set_state(sk, TCP_CLOSE);
314 __sk_dst_reset(sk);
315failure:
316 inet->inet_dport = 0;
317 sk->sk_route_caps = 0;
318 return err;
319}
320
321static void tcp_v6_mtu_reduced(struct sock *sk)
322{
323 struct dst_entry *dst;
324
325 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
326 return;
327
328 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
329 if (!dst)
330 return;
331
332 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 tcp_sync_mss(sk, dst_mtu(dst));
334 tcp_simple_retransmit(sk);
335 }
336}
337
338static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339 u8 type, u8 code, int offset, __be32 info)
340{
341 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343 struct ipv6_pinfo *np;
344 struct sock *sk;
345 int err;
346 struct tcp_sock *tp;
347 struct request_sock *fastopen;
348 __u32 seq, snd_una;
349 struct net *net = dev_net(skb->dev);
350
351 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
352 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
353
354 if (sk == NULL) {
355 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
356 ICMP6_MIB_INERRORS);
357 return;
358 }
359
360 if (sk->sk_state == TCP_TIME_WAIT) {
361 inet_twsk_put(inet_twsk(sk));
362 return;
363 }
364
365 bh_lock_sock(sk);
366 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
367 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
368
369 if (sk->sk_state == TCP_CLOSE)
370 goto out;
371
372 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
373 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
374 goto out;
375 }
376
377 tp = tcp_sk(sk);
378 seq = ntohl(th->seq);
379
380 fastopen = tp->fastopen_rsk;
381 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, snd_una, tp->snd_nxt)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
385 goto out;
386 }
387
388 np = inet6_sk(sk);
389
390 if (type == NDISC_REDIRECT) {
391 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
392
393 if (dst)
394 dst->ops->redirect(dst, sk, skb);
395 goto out;
396 }
397
398 if (type == ICMPV6_PKT_TOOBIG) {
399
400
401
402
403 if (sk->sk_state == TCP_LISTEN)
404 goto out;
405
406 if (!ip6_sk_accept_pmtu(sk))
407 goto out;
408
409 tp->mtu_info = ntohl(info);
410 if (!sock_owned_by_user(sk))
411 tcp_v6_mtu_reduced(sk);
412 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
413 &tp->tsq_flags))
414 sock_hold(sk);
415 goto out;
416 }
417
418 icmpv6_err_convert(type, code, &err);
419
420
421 switch (sk->sk_state) {
422 struct request_sock *req, **prev;
423 case TCP_LISTEN:
424 if (sock_owned_by_user(sk))
425 goto out;
426
427
428 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
429 &hdr->saddr, inet6_iif(skb));
430 if (!req)
431 goto out;
432
433
434
435
436 WARN_ON(req->sk != NULL);
437
438 if (seq != tcp_rsk(req)->snt_isn) {
439 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
440 goto out;
441 }
442
443 inet_csk_reqsk_queue_drop(sk, req, prev);
444 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
445 goto out;
446
447 case TCP_SYN_SENT:
448 case TCP_SYN_RECV:
449
450
451
452 if (fastopen && fastopen->sk == NULL)
453 break;
454
455 if (!sock_owned_by_user(sk)) {
456 sk->sk_err = err;
457 sk->sk_error_report(sk);
458
459 tcp_done(sk);
460 } else
461 sk->sk_err_soft = err;
462 goto out;
463 }
464
465 if (!sock_owned_by_user(sk) && np->recverr) {
466 sk->sk_err = err;
467 sk->sk_error_report(sk);
468 } else
469 sk->sk_err_soft = err;
470
471out:
472 bh_unlock_sock(sk);
473 sock_put(sk);
474}
475
476
477static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
478 struct flowi *fl,
479 struct request_sock *req,
480 u16 queue_mapping,
481 struct tcp_fastopen_cookie *foc)
482{
483 struct inet_request_sock *ireq = inet_rsk(req);
484 struct ipv6_pinfo *np = inet6_sk(sk);
485 struct flowi6 *fl6 = &fl->u.ip6;
486 struct sk_buff *skb;
487 int err = -ENOMEM;
488
489
490 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
491 goto done;
492
493 skb = tcp_make_synack(sk, dst, req, foc);
494
495 if (skb) {
496 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
497 &ireq->ir_v6_rmt_addr);
498
499 fl6->daddr = ireq->ir_v6_rmt_addr;
500 if (np->repflow && (ireq->pktopts != NULL))
501 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
502
503 skb_set_queue_mapping(skb, queue_mapping);
504 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
505 err = net_xmit_eval(err);
506 }
507
508done:
509 return err;
510}
511
512
513static void tcp_v6_reqsk_destructor(struct request_sock *req)
514{
515 kfree_skb(inet_rsk(req)->pktopts);
516}
517
518#ifdef CONFIG_TCP_MD5SIG
519static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
520 const struct in6_addr *addr)
521{
522 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
523}
524
525static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
526 struct sock *addr_sk)
527{
528 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
529}
530
531static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
532 struct request_sock *req)
533{
534 return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
535}
536
537static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
538 int optlen)
539{
540 struct tcp_md5sig cmd;
541 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
542
543 if (optlen < sizeof(cmd))
544 return -EINVAL;
545
546 if (copy_from_user(&cmd, optval, sizeof(cmd)))
547 return -EFAULT;
548
549 if (sin6->sin6_family != AF_INET6)
550 return -EINVAL;
551
552 if (!cmd.tcpm_keylen) {
553 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
554 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
555 AF_INET);
556 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
557 AF_INET6);
558 }
559
560 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
561 return -EINVAL;
562
563 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
564 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
565 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
566
567 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
568 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
569}
570
571static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
572 const struct in6_addr *daddr,
573 const struct in6_addr *saddr, int nbytes)
574{
575 struct tcp6_pseudohdr *bp;
576 struct scatterlist sg;
577
578 bp = &hp->md5_blk.ip6;
579
580 bp->saddr = *saddr;
581 bp->daddr = *daddr;
582 bp->protocol = cpu_to_be32(IPPROTO_TCP);
583 bp->len = cpu_to_be32(nbytes);
584
585 sg_init_one(&sg, bp, sizeof(*bp));
586 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
587}
588
589static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
590 const struct in6_addr *daddr, struct in6_addr *saddr,
591 const struct tcphdr *th)
592{
593 struct tcp_md5sig_pool *hp;
594 struct hash_desc *desc;
595
596 hp = tcp_get_md5sig_pool();
597 if (!hp)
598 goto clear_hash_noput;
599 desc = &hp->md5_desc;
600
601 if (crypto_hash_init(desc))
602 goto clear_hash;
603 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
604 goto clear_hash;
605 if (tcp_md5_hash_header(hp, th))
606 goto clear_hash;
607 if (tcp_md5_hash_key(hp, key))
608 goto clear_hash;
609 if (crypto_hash_final(desc, md5_hash))
610 goto clear_hash;
611
612 tcp_put_md5sig_pool();
613 return 0;
614
615clear_hash:
616 tcp_put_md5sig_pool();
617clear_hash_noput:
618 memset(md5_hash, 0, 16);
619 return 1;
620}
621
622static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
623 const struct sock *sk,
624 const struct request_sock *req,
625 const struct sk_buff *skb)
626{
627 const struct in6_addr *saddr, *daddr;
628 struct tcp_md5sig_pool *hp;
629 struct hash_desc *desc;
630 const struct tcphdr *th = tcp_hdr(skb);
631
632 if (sk) {
633 saddr = &inet6_sk(sk)->saddr;
634 daddr = &sk->sk_v6_daddr;
635 } else if (req) {
636 saddr = &inet_rsk(req)->ir_v6_loc_addr;
637 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
638 } else {
639 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640 saddr = &ip6h->saddr;
641 daddr = &ip6h->daddr;
642 }
643
644 hp = tcp_get_md5sig_pool();
645 if (!hp)
646 goto clear_hash_noput;
647 desc = &hp->md5_desc;
648
649 if (crypto_hash_init(desc))
650 goto clear_hash;
651
652 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
653 goto clear_hash;
654 if (tcp_md5_hash_header(hp, th))
655 goto clear_hash;
656 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
657 goto clear_hash;
658 if (tcp_md5_hash_key(hp, key))
659 goto clear_hash;
660 if (crypto_hash_final(desc, md5_hash))
661 goto clear_hash;
662
663 tcp_put_md5sig_pool();
664 return 0;
665
666clear_hash:
667 tcp_put_md5sig_pool();
668clear_hash_noput:
669 memset(md5_hash, 0, 16);
670 return 1;
671}
672
673static int __tcp_v6_inbound_md5_hash(struct sock *sk,
674 const struct sk_buff *skb)
675{
676 const __u8 *hash_location = NULL;
677 struct tcp_md5sig_key *hash_expected;
678 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
679 const struct tcphdr *th = tcp_hdr(skb);
680 int genhash;
681 u8 newhash[16];
682
683 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
684 hash_location = tcp_parse_md5sig_option(th);
685
686
687 if (!hash_expected && !hash_location)
688 return 0;
689
690 if (hash_expected && !hash_location) {
691 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
692 return 1;
693 }
694
695 if (!hash_expected && hash_location) {
696 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
697 return 1;
698 }
699
700
701 genhash = tcp_v6_md5_hash_skb(newhash,
702 hash_expected,
703 NULL, NULL, skb);
704
705 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
706 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
707 genhash ? "failed" : "mismatch",
708 &ip6h->saddr, ntohs(th->source),
709 &ip6h->daddr, ntohs(th->dest));
710 return 1;
711 }
712 return 0;
713}
714
715static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
716{
717 int ret;
718
719 rcu_read_lock();
720 ret = __tcp_v6_inbound_md5_hash(sk, skb);
721 rcu_read_unlock();
722
723 return ret;
724}
725
726#endif
727
728static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
729 struct sk_buff *skb)
730{
731 struct inet_request_sock *ireq = inet_rsk(req);
732 struct ipv6_pinfo *np = inet6_sk(sk);
733
734 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
735 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
736
737 ireq->ir_iif = sk->sk_bound_dev_if;
738
739
740 if (!sk->sk_bound_dev_if &&
741 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
742 ireq->ir_iif = tcp_v6_iif(skb);
743
744 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
745 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
746 np->rxopt.bits.rxinfo ||
747 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
748 np->rxopt.bits.rxohlim || np->repflow)) {
749 atomic_inc(&skb->users);
750 ireq->pktopts = skb;
751 }
752}
753
754static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
755 const struct request_sock *req,
756 bool *strict)
757{
758 if (strict)
759 *strict = true;
760 return inet6_csk_route_req(sk, &fl->u.ip6, req);
761}
762
763struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
764 .family = AF_INET6,
765 .obj_size = sizeof(struct tcp6_request_sock),
766 .rtx_syn_ack = tcp_rtx_synack,
767 .send_ack = tcp_v6_reqsk_send_ack,
768 .destructor = tcp_v6_reqsk_destructor,
769 .send_reset = tcp_v6_send_reset,
770 .syn_ack_timeout = tcp_syn_ack_timeout,
771};
772
773static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
774 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
775 sizeof(struct ipv6hdr),
776#ifdef CONFIG_TCP_MD5SIG
777 .md5_lookup = tcp_v6_reqsk_md5_lookup,
778 .calc_md5_hash = tcp_v6_md5_hash_skb,
779#endif
780 .init_req = tcp_v6_init_req,
781#ifdef CONFIG_SYN_COOKIES
782 .cookie_init_seq = cookie_v6_init_sequence,
783#endif
784 .route_req = tcp_v6_route_req,
785 .init_seq = tcp_v6_init_sequence,
786 .send_synack = tcp_v6_send_synack,
787 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
788};
789
790static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
791 u32 ack, u32 win, u32 tsval, u32 tsecr,
792 int oif, struct tcp_md5sig_key *key, int rst,
793 u8 tclass, u32 label)
794{
795 const struct tcphdr *th = tcp_hdr(skb);
796 struct tcphdr *t1;
797 struct sk_buff *buff;
798 struct flowi6 fl6;
799 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
800 struct sock *ctl_sk = net->ipv6.tcp_sk;
801 unsigned int tot_len = sizeof(struct tcphdr);
802 struct dst_entry *dst;
803 __be32 *topt;
804
805 if (tsecr)
806 tot_len += TCPOLEN_TSTAMP_ALIGNED;
807#ifdef CONFIG_TCP_MD5SIG
808 if (key)
809 tot_len += TCPOLEN_MD5SIG_ALIGNED;
810#endif
811
812 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
813 GFP_ATOMIC);
814 if (buff == NULL)
815 return;
816
817 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
818
819 t1 = (struct tcphdr *) skb_push(buff, tot_len);
820 skb_reset_transport_header(buff);
821
822
823 memset(t1, 0, sizeof(*t1));
824 t1->dest = th->source;
825 t1->source = th->dest;
826 t1->doff = tot_len / 4;
827 t1->seq = htonl(seq);
828 t1->ack_seq = htonl(ack);
829 t1->ack = !rst || !th->ack;
830 t1->rst = rst;
831 t1->window = htons(win);
832
833 topt = (__be32 *)(t1 + 1);
834
835 if (tsecr) {
836 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
837 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
838 *topt++ = htonl(tsval);
839 *topt++ = htonl(tsecr);
840 }
841
842#ifdef CONFIG_TCP_MD5SIG
843 if (key) {
844 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
845 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
846 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
847 &ipv6_hdr(skb)->saddr,
848 &ipv6_hdr(skb)->daddr, t1);
849 }
850#endif
851
852 memset(&fl6, 0, sizeof(fl6));
853 fl6.daddr = ipv6_hdr(skb)->saddr;
854 fl6.saddr = ipv6_hdr(skb)->daddr;
855 fl6.flowlabel = label;
856
857 buff->ip_summed = CHECKSUM_PARTIAL;
858 buff->csum = 0;
859
860 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
861
862 fl6.flowi6_proto = IPPROTO_TCP;
863 if (rt6_need_strict(&fl6.daddr) && !oif)
864 fl6.flowi6_oif = tcp_v6_iif(skb);
865 else
866 fl6.flowi6_oif = oif;
867 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
868 fl6.fl6_dport = t1->dest;
869 fl6.fl6_sport = t1->source;
870 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
871
872
873
874
875
876 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
877 if (!IS_ERR(dst)) {
878 skb_dst_set(buff, dst);
879 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
880 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
881 if (rst)
882 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
883 return;
884 }
885
886 kfree_skb(buff);
887}
888
889static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
890{
891 const struct tcphdr *th = tcp_hdr(skb);
892 u32 seq = 0, ack_seq = 0;
893 struct tcp_md5sig_key *key = NULL;
894#ifdef CONFIG_TCP_MD5SIG
895 const __u8 *hash_location = NULL;
896 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
897 unsigned char newhash[16];
898 int genhash;
899 struct sock *sk1 = NULL;
900#endif
901 int oif;
902
903 if (th->rst)
904 return;
905
906
907
908
909 if (!sk && !ipv6_unicast_destination(skb))
910 return;
911
912#ifdef CONFIG_TCP_MD5SIG
913 hash_location = tcp_parse_md5sig_option(th);
914 if (!sk && hash_location) {
915
916
917
918
919
920
921
922 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
923 &tcp_hashinfo, &ipv6h->saddr,
924 th->source, &ipv6h->daddr,
925 ntohs(th->source), tcp_v6_iif(skb));
926 if (!sk1)
927 return;
928
929 rcu_read_lock();
930 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
931 if (!key)
932 goto release_sk1;
933
934 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
935 if (genhash || memcmp(hash_location, newhash, 16) != 0)
936 goto release_sk1;
937 } else {
938 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
939 }
940#endif
941
942 if (th->ack)
943 seq = ntohl(th->ack_seq);
944 else
945 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
946 (th->doff << 2);
947
948 oif = sk ? sk->sk_bound_dev_if : 0;
949 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
950
951#ifdef CONFIG_TCP_MD5SIG
952release_sk1:
953 if (sk1) {
954 rcu_read_unlock();
955 sock_put(sk1);
956 }
957#endif
958}
959
960static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
961 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
962 struct tcp_md5sig_key *key, u8 tclass,
963 u32 label)
964{
965 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
966 tclass, label);
967}
968
969static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
970{
971 struct inet_timewait_sock *tw = inet_twsk(sk);
972 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
973
974 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
975 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
976 tcp_time_stamp + tcptw->tw_ts_offset,
977 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
978 tw->tw_tclass, (tw->tw_flowlabel << 12));
979
980 inet_twsk_put(tw);
981}
982
983static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
984 struct request_sock *req)
985{
986
987
988
989 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
990 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
991 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
992 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
993 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
994 0, 0);
995}
996
997
998static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
999{
1000 struct request_sock *req, **prev;
1001 const struct tcphdr *th = tcp_hdr(skb);
1002 struct sock *nsk;
1003
1004
1005 req = inet6_csk_search_req(sk, &prev, th->source,
1006 &ipv6_hdr(skb)->saddr,
1007 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
1008 if (req)
1009 return tcp_check_req(sk, skb, req, prev, false);
1010
1011 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1012 &ipv6_hdr(skb)->saddr, th->source,
1013 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1014 tcp_v6_iif(skb));
1015
1016 if (nsk) {
1017 if (nsk->sk_state != TCP_TIME_WAIT) {
1018 bh_lock_sock(nsk);
1019 return nsk;
1020 }
1021 inet_twsk_put(inet_twsk(nsk));
1022 return NULL;
1023 }
1024
1025#ifdef CONFIG_SYN_COOKIES
1026 if (!th->syn)
1027 sk = cookie_v6_check(sk, skb);
1028#endif
1029 return sk;
1030}
1031
1032static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1033{
1034 if (skb->protocol == htons(ETH_P_IP))
1035 return tcp_v4_conn_request(sk, skb);
1036
1037 if (!ipv6_unicast_destination(skb))
1038 goto drop;
1039
1040 return tcp_conn_request(&tcp6_request_sock_ops,
1041 &tcp_request_sock_ipv6_ops, sk, skb);
1042
1043drop:
1044 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1045 return 0;
1046}
1047
1048static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1049 struct request_sock *req,
1050 struct dst_entry *dst)
1051{
1052 struct inet_request_sock *ireq;
1053 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1054 struct tcp6_sock *newtcp6sk;
1055 struct inet_sock *newinet;
1056 struct tcp_sock *newtp;
1057 struct sock *newsk;
1058#ifdef CONFIG_TCP_MD5SIG
1059 struct tcp_md5sig_key *key;
1060#endif
1061 struct flowi6 fl6;
1062
1063 if (skb->protocol == htons(ETH_P_IP)) {
1064
1065
1066
1067
1068 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1069
1070 if (newsk == NULL)
1071 return NULL;
1072
1073 newtcp6sk = (struct tcp6_sock *)newsk;
1074 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1075
1076 newinet = inet_sk(newsk);
1077 newnp = inet6_sk(newsk);
1078 newtp = tcp_sk(newsk);
1079
1080 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1081
1082 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1083
1084 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1085
1086 newsk->sk_v6_rcv_saddr = newnp->saddr;
1087
1088 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1089 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1090#ifdef CONFIG_TCP_MD5SIG
1091 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1092#endif
1093
1094 newnp->ipv6_ac_list = NULL;
1095 newnp->ipv6_fl_list = NULL;
1096 newnp->pktoptions = NULL;
1097 newnp->opt = NULL;
1098 newnp->mcast_oif = tcp_v6_iif(skb);
1099 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1100 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1101 if (np->repflow)
1102 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1115
1116 return newsk;
1117 }
1118
1119 ireq = inet_rsk(req);
1120
1121 if (sk_acceptq_is_full(sk))
1122 goto out_overflow;
1123
1124 if (!dst) {
1125 dst = inet6_csk_route_req(sk, &fl6, req);
1126 if (!dst)
1127 goto out;
1128 }
1129
1130 newsk = tcp_create_openreq_child(sk, req, skb);
1131 if (newsk == NULL)
1132 goto out_nonewsk;
1133
1134
1135
1136
1137
1138
1139
1140 newsk->sk_gso_type = SKB_GSO_TCPV6;
1141 __ip6_dst_store(newsk, dst, NULL, NULL);
1142 inet6_sk_rx_dst_set(newsk, skb);
1143
1144 newtcp6sk = (struct tcp6_sock *)newsk;
1145 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1146
1147 newtp = tcp_sk(newsk);
1148 newinet = inet_sk(newsk);
1149 newnp = inet6_sk(newsk);
1150
1151 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1152
1153 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1154 newnp->saddr = ireq->ir_v6_loc_addr;
1155 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1156 newsk->sk_bound_dev_if = ireq->ir_iif;
1157
1158 ip6_set_txhash(newsk);
1159
1160
1161
1162
1163
1164 newinet->inet_opt = NULL;
1165 newnp->ipv6_ac_list = NULL;
1166 newnp->ipv6_fl_list = NULL;
1167
1168
1169 newnp->rxopt.all = np->rxopt.all;
1170
1171
1172 newnp->pktoptions = NULL;
1173 if (ireq->pktopts != NULL) {
1174 newnp->pktoptions = skb_clone(ireq->pktopts,
1175 sk_gfp_atomic(sk, GFP_ATOMIC));
1176 consume_skb(ireq->pktopts);
1177 ireq->pktopts = NULL;
1178 if (newnp->pktoptions)
1179 skb_set_owner_r(newnp->pktoptions, newsk);
1180 }
1181 newnp->opt = NULL;
1182 newnp->mcast_oif = tcp_v6_iif(skb);
1183 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1184 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1185 if (np->repflow)
1186 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1187
1188
1189
1190
1191
1192
1193
1194 if (np->opt)
1195 newnp->opt = ipv6_dup_options(newsk, np->opt);
1196
1197 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1198 if (newnp->opt)
1199 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1200 newnp->opt->opt_flen);
1201
1202 tcp_ca_openreq_child(newsk, dst);
1203
1204 tcp_sync_mss(newsk, dst_mtu(dst));
1205 newtp->advmss = dst_metric_advmss(dst);
1206 if (tcp_sk(sk)->rx_opt.user_mss &&
1207 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1208 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1209
1210 tcp_initialize_rcv_mss(newsk);
1211
1212 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1213 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1214
1215#ifdef CONFIG_TCP_MD5SIG
1216
1217 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1218 if (key != NULL) {
1219
1220
1221
1222
1223
1224 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1225 AF_INET6, key->key, key->keylen,
1226 sk_gfp_atomic(sk, GFP_ATOMIC));
1227 }
1228#endif
1229
1230 if (__inet_inherit_port(sk, newsk) < 0) {
1231 inet_csk_prepare_forced_close(newsk);
1232 tcp_done(newsk);
1233 goto out;
1234 }
1235 __inet6_hash(newsk, NULL);
1236
1237 return newsk;
1238
1239out_overflow:
1240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1241out_nonewsk:
1242 dst_release(dst);
1243out:
1244 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1245 return NULL;
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1257{
1258 struct ipv6_pinfo *np = inet6_sk(sk);
1259 struct tcp_sock *tp;
1260 struct sk_buff *opt_skb = NULL;
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 if (skb->protocol == htons(ETH_P_IP))
1271 return tcp_v4_do_rcv(sk, skb);
1272
1273 if (sk_filter(sk, skb))
1274 goto discard;
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 if (np->rxopt.all)
1295 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1296
1297 if (sk->sk_state == TCP_ESTABLISHED) {
1298 struct dst_entry *dst = sk->sk_rx_dst;
1299
1300 sock_rps_save_rxhash(sk, skb);
1301 sk_mark_napi_id(sk, skb);
1302 if (dst) {
1303 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1304 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1305 dst_release(dst);
1306 sk->sk_rx_dst = NULL;
1307 }
1308 }
1309
1310 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1311 if (opt_skb)
1312 goto ipv6_pktoptions;
1313 return 0;
1314 }
1315
1316 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1317 goto csum_err;
1318
1319 if (sk->sk_state == TCP_LISTEN) {
1320 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1321 if (!nsk)
1322 goto discard;
1323
1324
1325
1326
1327
1328
1329 if (nsk != sk) {
1330 sock_rps_save_rxhash(nsk, skb);
1331 sk_mark_napi_id(sk, skb);
1332 if (tcp_child_process(sk, nsk, skb))
1333 goto reset;
1334 if (opt_skb)
1335 __kfree_skb(opt_skb);
1336 return 0;
1337 }
1338 } else
1339 sock_rps_save_rxhash(sk, skb);
1340
1341 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1342 goto reset;
1343 if (opt_skb)
1344 goto ipv6_pktoptions;
1345 return 0;
1346
1347reset:
1348 tcp_v6_send_reset(sk, skb);
1349discard:
1350 if (opt_skb)
1351 __kfree_skb(opt_skb);
1352 kfree_skb(skb);
1353 return 0;
1354csum_err:
1355 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1356 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1357 goto discard;
1358
1359
1360ipv6_pktoptions:
1361
1362
1363
1364
1365
1366
1367
1368 tp = tcp_sk(sk);
1369 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1370 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1371 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1372 np->mcast_oif = tcp_v6_iif(opt_skb);
1373 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1374 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1375 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1376 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1377 if (np->repflow)
1378 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1379 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1380 skb_set_owner_r(opt_skb, sk);
1381 opt_skb = xchg(&np->pktoptions, opt_skb);
1382 } else {
1383 __kfree_skb(opt_skb);
1384 opt_skb = xchg(&np->pktoptions, NULL);
1385 }
1386 }
1387
1388 kfree_skb(opt_skb);
1389 return 0;
1390}
1391
1392static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1393 const struct tcphdr *th)
1394{
1395
1396
1397
1398
1399
1400 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1401 sizeof(struct inet6_skb_parm));
1402 barrier();
1403
1404 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1405 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1406 skb->len - th->doff*4);
1407 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1408 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1409 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1410 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1411 TCP_SKB_CB(skb)->sacked = 0;
1412}
1413
1414static void tcp_v6_restore_cb(struct sk_buff *skb)
1415{
1416
1417
1418
1419 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1420 sizeof(struct inet6_skb_parm));
1421}
1422
1423static int tcp_v6_rcv(struct sk_buff *skb)
1424{
1425 const struct tcphdr *th;
1426 const struct ipv6hdr *hdr;
1427 struct sock *sk;
1428 int ret;
1429 struct net *net = dev_net(skb->dev);
1430
1431 if (skb->pkt_type != PACKET_HOST)
1432 goto discard_it;
1433
1434
1435
1436
1437 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1438
1439 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1440 goto discard_it;
1441
1442 th = tcp_hdr(skb);
1443
1444 if (th->doff < sizeof(struct tcphdr)/4)
1445 goto bad_packet;
1446 if (!pskb_may_pull(skb, th->doff*4))
1447 goto discard_it;
1448
1449 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1450 goto csum_error;
1451
1452 th = tcp_hdr(skb);
1453 hdr = ipv6_hdr(skb);
1454
1455 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1456 inet6_iif(skb));
1457 if (!sk)
1458 goto no_tcp_socket;
1459
1460process:
1461 if (sk->sk_state == TCP_TIME_WAIT)
1462 goto do_time_wait;
1463
1464 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1465 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1466 goto discard_and_relse;
1467 }
1468
1469 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1470 goto discard_and_relse;
1471
1472 tcp_v6_fill_cb(skb, hdr, th);
1473
1474#ifdef CONFIG_TCP_MD5SIG
1475 if (tcp_v6_inbound_md5_hash(sk, skb))
1476 goto discard_and_relse;
1477#endif
1478
1479 if (sk_filter(sk, skb))
1480 goto discard_and_relse;
1481
1482 sk_incoming_cpu_update(sk);
1483 skb->dev = NULL;
1484
1485 bh_lock_sock_nested(sk);
1486 ret = 0;
1487 if (!sock_owned_by_user(sk)) {
1488 if (!tcp_prequeue(sk, skb))
1489 ret = tcp_v6_do_rcv(sk, skb);
1490 } else if (unlikely(sk_add_backlog(sk, skb,
1491 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1492 bh_unlock_sock(sk);
1493 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1494 goto discard_and_relse;
1495 }
1496 bh_unlock_sock(sk);
1497
1498 sock_put(sk);
1499 return ret ? -1 : 0;
1500
1501no_tcp_socket:
1502 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1503 goto discard_it;
1504
1505 tcp_v6_fill_cb(skb, hdr, th);
1506
1507 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1508csum_error:
1509 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1510bad_packet:
1511 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1512 } else {
1513 tcp_v6_send_reset(NULL, skb);
1514 }
1515
1516discard_it:
1517 kfree_skb(skb);
1518 return 0;
1519
1520discard_and_relse:
1521 sock_put(sk);
1522 goto discard_it;
1523
1524do_time_wait:
1525 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1526 inet_twsk_put(inet_twsk(sk));
1527 goto discard_it;
1528 }
1529
1530 tcp_v6_fill_cb(skb, hdr, th);
1531
1532 if (skb->len < (th->doff<<2)) {
1533 inet_twsk_put(inet_twsk(sk));
1534 goto bad_packet;
1535 }
1536 if (tcp_checksum_complete(skb)) {
1537 inet_twsk_put(inet_twsk(sk));
1538 goto csum_error;
1539 }
1540
1541 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1542 case TCP_TW_SYN:
1543 {
1544 struct sock *sk2;
1545
1546 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1547 &ipv6_hdr(skb)->saddr, th->source,
1548 &ipv6_hdr(skb)->daddr,
1549 ntohs(th->dest), tcp_v6_iif(skb));
1550 if (sk2 != NULL) {
1551 struct inet_timewait_sock *tw = inet_twsk(sk);
1552 inet_twsk_deschedule(tw, &tcp_death_row);
1553 inet_twsk_put(tw);
1554 sk = sk2;
1555 tcp_v6_restore_cb(skb);
1556 goto process;
1557 }
1558
1559 }
1560 case TCP_TW_ACK:
1561 tcp_v6_timewait_ack(sk, skb);
1562 break;
1563 case TCP_TW_RST:
1564 tcp_v6_restore_cb(skb);
1565 goto no_tcp_socket;
1566 case TCP_TW_SUCCESS:
1567 ;
1568 }
1569 goto discard_it;
1570}
1571
1572static void tcp_v6_early_demux(struct sk_buff *skb)
1573{
1574 const struct ipv6hdr *hdr;
1575 const struct tcphdr *th;
1576 struct sock *sk;
1577
1578 if (skb->pkt_type != PACKET_HOST)
1579 return;
1580
1581 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1582 return;
1583
1584 hdr = ipv6_hdr(skb);
1585 th = tcp_hdr(skb);
1586
1587 if (th->doff < sizeof(struct tcphdr) / 4)
1588 return;
1589
1590
1591 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1592 &hdr->saddr, th->source,
1593 &hdr->daddr, ntohs(th->dest),
1594 inet6_iif(skb));
1595 if (sk) {
1596 skb->sk = sk;
1597 skb->destructor = sock_edemux;
1598 if (sk->sk_state != TCP_TIME_WAIT) {
1599 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1600
1601 if (dst)
1602 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1603 if (dst &&
1604 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1605 skb_dst_set_noref(skb, dst);
1606 }
1607 }
1608}
1609
1610static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1611 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1612 .twsk_unique = tcp_twsk_unique,
1613 .twsk_destructor = tcp_twsk_destructor,
1614};
1615
1616static const struct inet_connection_sock_af_ops ipv6_specific = {
1617 .queue_xmit = inet6_csk_xmit,
1618 .send_check = tcp_v6_send_check,
1619 .rebuild_header = inet6_sk_rebuild_header,
1620 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1621 .conn_request = tcp_v6_conn_request,
1622 .syn_recv_sock = tcp_v6_syn_recv_sock,
1623 .net_header_len = sizeof(struct ipv6hdr),
1624 .net_frag_header_len = sizeof(struct frag_hdr),
1625 .setsockopt = ipv6_setsockopt,
1626 .getsockopt = ipv6_getsockopt,
1627 .addr2sockaddr = inet6_csk_addr2sockaddr,
1628 .sockaddr_len = sizeof(struct sockaddr_in6),
1629 .bind_conflict = inet6_csk_bind_conflict,
1630#ifdef CONFIG_COMPAT
1631 .compat_setsockopt = compat_ipv6_setsockopt,
1632 .compat_getsockopt = compat_ipv6_getsockopt,
1633#endif
1634 .mtu_reduced = tcp_v6_mtu_reduced,
1635};
1636
1637#ifdef CONFIG_TCP_MD5SIG
1638static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1639 .md5_lookup = tcp_v6_md5_lookup,
1640 .calc_md5_hash = tcp_v6_md5_hash_skb,
1641 .md5_parse = tcp_v6_parse_md5_keys,
1642};
1643#endif
1644
1645
1646
1647
1648static const struct inet_connection_sock_af_ops ipv6_mapped = {
1649 .queue_xmit = ip_queue_xmit,
1650 .send_check = tcp_v4_send_check,
1651 .rebuild_header = inet_sk_rebuild_header,
1652 .sk_rx_dst_set = inet_sk_rx_dst_set,
1653 .conn_request = tcp_v6_conn_request,
1654 .syn_recv_sock = tcp_v6_syn_recv_sock,
1655 .net_header_len = sizeof(struct iphdr),
1656 .setsockopt = ipv6_setsockopt,
1657 .getsockopt = ipv6_getsockopt,
1658 .addr2sockaddr = inet6_csk_addr2sockaddr,
1659 .sockaddr_len = sizeof(struct sockaddr_in6),
1660 .bind_conflict = inet6_csk_bind_conflict,
1661#ifdef CONFIG_COMPAT
1662 .compat_setsockopt = compat_ipv6_setsockopt,
1663 .compat_getsockopt = compat_ipv6_getsockopt,
1664#endif
1665 .mtu_reduced = tcp_v4_mtu_reduced,
1666};
1667
1668#ifdef CONFIG_TCP_MD5SIG
1669static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1670 .md5_lookup = tcp_v4_md5_lookup,
1671 .calc_md5_hash = tcp_v4_md5_hash_skb,
1672 .md5_parse = tcp_v6_parse_md5_keys,
1673};
1674#endif
1675
1676
1677
1678
1679static int tcp_v6_init_sock(struct sock *sk)
1680{
1681 struct inet_connection_sock *icsk = inet_csk(sk);
1682
1683 tcp_init_sock(sk);
1684
1685 icsk->icsk_af_ops = &ipv6_specific;
1686
1687#ifdef CONFIG_TCP_MD5SIG
1688 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1689#endif
1690
1691 return 0;
1692}
1693
1694static void tcp_v6_destroy_sock(struct sock *sk)
1695{
1696 tcp_v4_destroy_sock(sk);
1697 inet6_destroy_sock(sk);
1698}
1699
1700#ifdef CONFIG_PROC_FS
1701
1702static void get_openreq6(struct seq_file *seq,
1703 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1704{
1705 int ttd = req->expires - jiffies;
1706 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1707 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1708
1709 if (ttd < 0)
1710 ttd = 0;
1711
1712 seq_printf(seq,
1713 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1714 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1715 i,
1716 src->s6_addr32[0], src->s6_addr32[1],
1717 src->s6_addr32[2], src->s6_addr32[3],
1718 inet_rsk(req)->ir_num,
1719 dest->s6_addr32[0], dest->s6_addr32[1],
1720 dest->s6_addr32[2], dest->s6_addr32[3],
1721 ntohs(inet_rsk(req)->ir_rmt_port),
1722 TCP_SYN_RECV,
1723 0, 0,
1724 1,
1725 jiffies_to_clock_t(ttd),
1726 req->num_timeout,
1727 from_kuid_munged(seq_user_ns(seq), uid),
1728 0,
1729 0,
1730 0, req);
1731}
1732
1733static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1734{
1735 const struct in6_addr *dest, *src;
1736 __u16 destp, srcp;
1737 int timer_active;
1738 unsigned long timer_expires;
1739 const struct inet_sock *inet = inet_sk(sp);
1740 const struct tcp_sock *tp = tcp_sk(sp);
1741 const struct inet_connection_sock *icsk = inet_csk(sp);
1742 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1743
1744 dest = &sp->sk_v6_daddr;
1745 src = &sp->sk_v6_rcv_saddr;
1746 destp = ntohs(inet->inet_dport);
1747 srcp = ntohs(inet->inet_sport);
1748
1749 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1750 timer_active = 1;
1751 timer_expires = icsk->icsk_timeout;
1752 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1753 timer_active = 4;
1754 timer_expires = icsk->icsk_timeout;
1755 } else if (timer_pending(&sp->sk_timer)) {
1756 timer_active = 2;
1757 timer_expires = sp->sk_timer.expires;
1758 } else {
1759 timer_active = 0;
1760 timer_expires = jiffies;
1761 }
1762
1763 seq_printf(seq,
1764 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1765 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1766 i,
1767 src->s6_addr32[0], src->s6_addr32[1],
1768 src->s6_addr32[2], src->s6_addr32[3], srcp,
1769 dest->s6_addr32[0], dest->s6_addr32[1],
1770 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1771 sp->sk_state,
1772 tp->write_seq-tp->snd_una,
1773 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1774 timer_active,
1775 jiffies_delta_to_clock_t(timer_expires - jiffies),
1776 icsk->icsk_retransmits,
1777 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1778 icsk->icsk_probes_out,
1779 sock_i_ino(sp),
1780 atomic_read(&sp->sk_refcnt), sp,
1781 jiffies_to_clock_t(icsk->icsk_rto),
1782 jiffies_to_clock_t(icsk->icsk_ack.ato),
1783 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1784 tp->snd_cwnd,
1785 sp->sk_state == TCP_LISTEN ?
1786 (fastopenq ? fastopenq->max_qlen : 0) :
1787 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1788 );
1789}
1790
1791static void get_timewait6_sock(struct seq_file *seq,
1792 struct inet_timewait_sock *tw, int i)
1793{
1794 const struct in6_addr *dest, *src;
1795 __u16 destp, srcp;
1796 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1797
1798 dest = &tw->tw_v6_daddr;
1799 src = &tw->tw_v6_rcv_saddr;
1800 destp = ntohs(tw->tw_dport);
1801 srcp = ntohs(tw->tw_sport);
1802
1803 seq_printf(seq,
1804 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1805 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1806 i,
1807 src->s6_addr32[0], src->s6_addr32[1],
1808 src->s6_addr32[2], src->s6_addr32[3], srcp,
1809 dest->s6_addr32[0], dest->s6_addr32[1],
1810 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1811 tw->tw_substate, 0, 0,
1812 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1813 atomic_read(&tw->tw_refcnt), tw);
1814}
1815
1816static int tcp6_seq_show(struct seq_file *seq, void *v)
1817{
1818 struct tcp_iter_state *st;
1819 struct sock *sk = v;
1820
1821 if (v == SEQ_START_TOKEN) {
1822 seq_puts(seq,
1823 " sl "
1824 "local_address "
1825 "remote_address "
1826 "st tx_queue rx_queue tr tm->when retrnsmt"
1827 " uid timeout inode\n");
1828 goto out;
1829 }
1830 st = seq->private;
1831
1832 switch (st->state) {
1833 case TCP_SEQ_STATE_LISTENING:
1834 case TCP_SEQ_STATE_ESTABLISHED:
1835 if (sk->sk_state == TCP_TIME_WAIT)
1836 get_timewait6_sock(seq, v, st->num);
1837 else
1838 get_tcp6_sock(seq, v, st->num);
1839 break;
1840 case TCP_SEQ_STATE_OPENREQ:
1841 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1842 break;
1843 }
1844out:
1845 return 0;
1846}
1847
1848static const struct file_operations tcp6_afinfo_seq_fops = {
1849 .owner = THIS_MODULE,
1850 .open = tcp_seq_open,
1851 .read = seq_read,
1852 .llseek = seq_lseek,
1853 .release = seq_release_net
1854};
1855
1856static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1857 .name = "tcp6",
1858 .family = AF_INET6,
1859 .seq_fops = &tcp6_afinfo_seq_fops,
1860 .seq_ops = {
1861 .show = tcp6_seq_show,
1862 },
1863};
1864
1865int __net_init tcp6_proc_init(struct net *net)
1866{
1867 return tcp_proc_register(net, &tcp6_seq_afinfo);
1868}
1869
1870void tcp6_proc_exit(struct net *net)
1871{
1872 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1873}
1874#endif
1875
1876static void tcp_v6_clear_sk(struct sock *sk, int size)
1877{
1878 struct inet_sock *inet = inet_sk(sk);
1879
1880
1881 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1882
1883 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1884 memset(&inet->pinet6 + 1, 0, size);
1885}
1886
1887struct proto tcpv6_prot = {
1888 .name = "TCPv6",
1889 .owner = THIS_MODULE,
1890 .close = tcp_close,
1891 .connect = tcp_v6_connect,
1892 .disconnect = tcp_disconnect,
1893 .accept = inet_csk_accept,
1894 .ioctl = tcp_ioctl,
1895 .init = tcp_v6_init_sock,
1896 .destroy = tcp_v6_destroy_sock,
1897 .shutdown = tcp_shutdown,
1898 .setsockopt = tcp_setsockopt,
1899 .getsockopt = tcp_getsockopt,
1900 .recvmsg = tcp_recvmsg,
1901 .sendmsg = tcp_sendmsg,
1902 .sendpage = tcp_sendpage,
1903 .backlog_rcv = tcp_v6_do_rcv,
1904 .release_cb = tcp_release_cb,
1905 .hash = tcp_v6_hash,
1906 .unhash = inet_unhash,
1907 .get_port = inet_csk_get_port,
1908 .enter_memory_pressure = tcp_enter_memory_pressure,
1909 .stream_memory_free = tcp_stream_memory_free,
1910 .sockets_allocated = &tcp_sockets_allocated,
1911 .memory_allocated = &tcp_memory_allocated,
1912 .memory_pressure = &tcp_memory_pressure,
1913 .orphan_count = &tcp_orphan_count,
1914 .sysctl_mem = sysctl_tcp_mem,
1915 .sysctl_wmem = sysctl_tcp_wmem,
1916 .sysctl_rmem = sysctl_tcp_rmem,
1917 .max_header = MAX_TCP_HEADER,
1918 .obj_size = sizeof(struct tcp6_sock),
1919 .slab_flags = SLAB_DESTROY_BY_RCU,
1920 .twsk_prot = &tcp6_timewait_sock_ops,
1921 .rsk_prot = &tcp6_request_sock_ops,
1922 .h.hashinfo = &tcp_hashinfo,
1923 .no_autobind = true,
1924#ifdef CONFIG_COMPAT
1925 .compat_setsockopt = compat_tcp_setsockopt,
1926 .compat_getsockopt = compat_tcp_getsockopt,
1927#endif
1928#ifdef CONFIG_MEMCG_KMEM
1929 .proto_cgroup = tcp_proto_cgroup,
1930#endif
1931 .clear_sk = tcp_v6_clear_sk,
1932};
1933
1934static const struct inet6_protocol tcpv6_protocol = {
1935 .early_demux = tcp_v6_early_demux,
1936 .handler = tcp_v6_rcv,
1937 .err_handler = tcp_v6_err,
1938 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1939};
1940
1941static struct inet_protosw tcpv6_protosw = {
1942 .type = SOCK_STREAM,
1943 .protocol = IPPROTO_TCP,
1944 .prot = &tcpv6_prot,
1945 .ops = &inet6_stream_ops,
1946 .flags = INET_PROTOSW_PERMANENT |
1947 INET_PROTOSW_ICSK,
1948};
1949
1950static int __net_init tcpv6_net_init(struct net *net)
1951{
1952 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1953 SOCK_RAW, IPPROTO_TCP, net);
1954}
1955
1956static void __net_exit tcpv6_net_exit(struct net *net)
1957{
1958 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1959}
1960
1961static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1962{
1963 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1964}
1965
1966static struct pernet_operations tcpv6_net_ops = {
1967 .init = tcpv6_net_init,
1968 .exit = tcpv6_net_exit,
1969 .exit_batch = tcpv6_net_exit_batch,
1970};
1971
1972int __init tcpv6_init(void)
1973{
1974 int ret;
1975
1976 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1977 if (ret)
1978 goto out;
1979
1980
1981 ret = inet6_register_protosw(&tcpv6_protosw);
1982 if (ret)
1983 goto out_tcpv6_protocol;
1984
1985 ret = register_pernet_subsys(&tcpv6_net_ops);
1986 if (ret)
1987 goto out_tcpv6_protosw;
1988out:
1989 return ret;
1990
1991out_tcpv6_protosw:
1992 inet6_unregister_protosw(&tcpv6_protosw);
1993out_tcpv6_protocol:
1994 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1995 goto out;
1996}
1997
1998void tcpv6_exit(void)
1999{
2000 unregister_pernet_subsys(&tcpv6_net_ops);
2001 inet6_unregister_protosw(&tcpv6_protosw);
2002 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2003}
2004