1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/bottom_half.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41#include <linux/slab.h>
42#include <linux/uaccess.h>
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
49#include <net/inet6_hashtables.h>
50#include <net/inet6_connection_sock.h>
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/snmp.h>
60#include <net/dsfield.h>
61#include <net/timewait_sock.h>
62#include <net/inet_common.h>
63#include <net/secure_seq.h>
64#include <net/busy_poll.h>
65
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
69#include <crypto/hash.h>
70#include <linux/scatterlist.h>
71
72#include <trace/events/tcp.h>
73
74static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
77
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79
80static const struct inet_connection_sock_af_ops ipv6_mapped;
81static const struct inet_connection_sock_af_ops ipv6_specific;
82#ifdef CONFIG_TCP_MD5SIG
83static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85#else
86static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
87 const struct in6_addr *addr)
88{
89 return NULL;
90}
91#endif
92
93static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94{
95 struct dst_entry *dst = skb_dst(skb);
96
97 if (dst && dst_hold_safe(dst)) {
98 const struct rt6_info *rt = (const struct rt6_info *)dst;
99
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103 }
104}
105
106static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107{
108 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
112}
113
114static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115{
116 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117 ipv6_hdr(skb)->saddr.s6_addr32);
118}
119
120static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
121 int addr_len)
122{
123
124
125
126
127 if (addr_len < SIN6_LEN_RFC2133)
128 return -EINVAL;
129
130 sock_owned_by_me(sk);
131
132 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
133}
134
135static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
136 int addr_len)
137{
138 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
139 struct inet_sock *inet = inet_sk(sk);
140 struct inet_connection_sock *icsk = inet_csk(sk);
141 struct ipv6_pinfo *np = inet6_sk(sk);
142 struct tcp_sock *tp = tcp_sk(sk);
143 struct in6_addr *saddr = NULL, *final_p, final;
144 struct ipv6_txoptions *opt;
145 struct flowi6 fl6;
146 struct dst_entry *dst;
147 int addr_type;
148 int err;
149 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
150
151 if (addr_len < SIN6_LEN_RFC2133)
152 return -EINVAL;
153
154 if (usin->sin6_family != AF_INET6)
155 return -EAFNOSUPPORT;
156
157 memset(&fl6, 0, sizeof(fl6));
158
159 if (np->sndflow) {
160 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
161 IP6_ECN_flow_init(fl6.flowlabel);
162 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
163 struct ip6_flowlabel *flowlabel;
164 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
165 if (!flowlabel)
166 return -EINVAL;
167 fl6_sock_release(flowlabel);
168 }
169 }
170
171
172
173
174
175 if (ipv6_addr_any(&usin->sin6_addr)) {
176 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
177 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
178 &usin->sin6_addr);
179 else
180 usin->sin6_addr = in6addr_loopback;
181 }
182
183 addr_type = ipv6_addr_type(&usin->sin6_addr);
184
185 if (addr_type & IPV6_ADDR_MULTICAST)
186 return -ENETUNREACH;
187
188 if (addr_type&IPV6_ADDR_LINKLOCAL) {
189 if (addr_len >= sizeof(struct sockaddr_in6) &&
190 usin->sin6_scope_id) {
191
192
193
194 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
195 return -EINVAL;
196
197 sk->sk_bound_dev_if = usin->sin6_scope_id;
198 }
199
200
201 if (!sk->sk_bound_dev_if)
202 return -EINVAL;
203 }
204
205 if (tp->rx_opt.ts_recent_stamp &&
206 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
207 tp->rx_opt.ts_recent = 0;
208 tp->rx_opt.ts_recent_stamp = 0;
209 tp->write_seq = 0;
210 }
211
212 sk->sk_v6_daddr = usin->sin6_addr;
213 np->flow_label = fl6.flowlabel;
214
215
216
217
218
219 if (addr_type & IPV6_ADDR_MAPPED) {
220 u32 exthdrlen = icsk->icsk_ext_hdr_len;
221 struct sockaddr_in sin;
222
223 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
224
225 if (__ipv6_only_sock(sk))
226 return -ENETUNREACH;
227
228 sin.sin_family = AF_INET;
229 sin.sin_port = usin->sin6_port;
230 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
231
232 icsk->icsk_af_ops = &ipv6_mapped;
233 sk->sk_backlog_rcv = tcp_v4_do_rcv;
234#ifdef CONFIG_TCP_MD5SIG
235 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
236#endif
237
238 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
239
240 if (err) {
241 icsk->icsk_ext_hdr_len = exthdrlen;
242 icsk->icsk_af_ops = &ipv6_specific;
243 sk->sk_backlog_rcv = tcp_v6_do_rcv;
244#ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_specific;
246#endif
247 goto failure;
248 }
249 np->saddr = sk->sk_v6_rcv_saddr;
250
251 return err;
252 }
253
254 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
255 saddr = &sk->sk_v6_rcv_saddr;
256
257 fl6.flowi6_proto = IPPROTO_TCP;
258 fl6.daddr = sk->sk_v6_daddr;
259 fl6.saddr = saddr ? *saddr : np->saddr;
260 fl6.flowi6_oif = sk->sk_bound_dev_if;
261 fl6.flowi6_mark = sk->sk_mark;
262 fl6.fl6_dport = usin->sin6_port;
263 fl6.fl6_sport = inet->inet_sport;
264 fl6.flowi6_uid = sk->sk_uid;
265
266 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
267 final_p = fl6_update_dst(&fl6, opt, &final);
268
269 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
270
271 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
272 if (IS_ERR(dst)) {
273 err = PTR_ERR(dst);
274 goto failure;
275 }
276
277 if (!saddr) {
278 saddr = &fl6.saddr;
279 sk->sk_v6_rcv_saddr = *saddr;
280 }
281
282
283 np->saddr = *saddr;
284 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
285
286 sk->sk_gso_type = SKB_GSO_TCPV6;
287 ip6_dst_store(sk, dst, NULL, NULL);
288
289 icsk->icsk_ext_hdr_len = 0;
290 if (opt)
291 icsk->icsk_ext_hdr_len = opt->opt_flen +
292 opt->opt_nflen;
293
294 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
295
296 inet->inet_dport = usin->sin6_port;
297
298 tcp_set_state(sk, TCP_SYN_SENT);
299 err = inet6_hash_connect(tcp_death_row, sk);
300 if (err)
301 goto late_failure;
302
303 sk_set_txhash(sk);
304
305 if (likely(!tp->repair)) {
306 if (!tp->write_seq)
307 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
308 sk->sk_v6_daddr.s6_addr32,
309 inet->inet_sport,
310 inet->inet_dport);
311 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
312 np->saddr.s6_addr32,
313 sk->sk_v6_daddr.s6_addr32);
314 }
315
316 if (tcp_fastopen_defer_connect(sk, &err))
317 return err;
318 if (err)
319 goto late_failure;
320
321 err = tcp_connect(sk);
322 if (err)
323 goto late_failure;
324
325 return 0;
326
327late_failure:
328 tcp_set_state(sk, TCP_CLOSE);
329failure:
330 inet->inet_dport = 0;
331 sk->sk_route_caps = 0;
332 return err;
333}
334
335static void tcp_v6_mtu_reduced(struct sock *sk)
336{
337 struct dst_entry *dst;
338
339 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
340 return;
341
342 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
343 if (!dst)
344 return;
345
346 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
347 tcp_sync_mss(sk, dst_mtu(dst));
348 tcp_simple_retransmit(sk);
349 }
350}
351
352static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
353 u8 type, u8 code, int offset, __be32 info)
354{
355 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
356 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
357 struct net *net = dev_net(skb->dev);
358 struct request_sock *fastopen;
359 struct ipv6_pinfo *np;
360 struct tcp_sock *tp;
361 __u32 seq, snd_una;
362 struct sock *sk;
363 bool fatal;
364 int err;
365
366 sk = __inet6_lookup_established(net, &tcp_hashinfo,
367 &hdr->daddr, th->dest,
368 &hdr->saddr, ntohs(th->source),
369 skb->dev->ifindex, inet6_sdif(skb));
370
371 if (!sk) {
372 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
373 ICMP6_MIB_INERRORS);
374 return;
375 }
376
377 if (sk->sk_state == TCP_TIME_WAIT) {
378 inet_twsk_put(inet_twsk(sk));
379 return;
380 }
381 seq = ntohl(th->seq);
382 fatal = icmpv6_err_convert(type, code, &err);
383 if (sk->sk_state == TCP_NEW_SYN_RECV)
384 return tcp_req_err(sk, seq, fatal);
385
386 bh_lock_sock(sk);
387 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
388 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
389
390 if (sk->sk_state == TCP_CLOSE)
391 goto out;
392
393 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
394 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
395 goto out;
396 }
397
398 tp = tcp_sk(sk);
399
400 fastopen = tp->fastopen_rsk;
401 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
402 if (sk->sk_state != TCP_LISTEN &&
403 !between(seq, snd_una, tp->snd_nxt)) {
404 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
405 goto out;
406 }
407
408 np = inet6_sk(sk);
409
410 if (type == NDISC_REDIRECT) {
411 if (!sock_owned_by_user(sk)) {
412 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
413
414 if (dst)
415 dst->ops->redirect(dst, sk, skb);
416 }
417 goto out;
418 }
419
420 if (type == ICMPV6_PKT_TOOBIG) {
421
422
423
424
425 if (sk->sk_state == TCP_LISTEN)
426 goto out;
427
428 if (!ip6_sk_accept_pmtu(sk))
429 goto out;
430
431 tp->mtu_info = ntohl(info);
432 if (!sock_owned_by_user(sk))
433 tcp_v6_mtu_reduced(sk);
434 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
435 &sk->sk_tsq_flags))
436 sock_hold(sk);
437 goto out;
438 }
439
440
441
442 switch (sk->sk_state) {
443 case TCP_SYN_SENT:
444 case TCP_SYN_RECV:
445
446
447
448 if (fastopen && !fastopen->sk)
449 break;
450
451 if (!sock_owned_by_user(sk)) {
452 sk->sk_err = err;
453 sk->sk_error_report(sk);
454
455 tcp_done(sk);
456 } else
457 sk->sk_err_soft = err;
458 goto out;
459 }
460
461 if (!sock_owned_by_user(sk) && np->recverr) {
462 sk->sk_err = err;
463 sk->sk_error_report(sk);
464 } else
465 sk->sk_err_soft = err;
466
467out:
468 bh_unlock_sock(sk);
469 sock_put(sk);
470}
471
472
473static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
474 struct flowi *fl,
475 struct request_sock *req,
476 struct tcp_fastopen_cookie *foc,
477 enum tcp_synack_type synack_type)
478{
479 struct inet_request_sock *ireq = inet_rsk(req);
480 struct ipv6_pinfo *np = inet6_sk(sk);
481 struct ipv6_txoptions *opt;
482 struct flowi6 *fl6 = &fl->u.ip6;
483 struct sk_buff *skb;
484 int err = -ENOMEM;
485
486
487 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
488 IPPROTO_TCP)) == NULL)
489 goto done;
490
491 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
492
493 if (skb) {
494 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
495 &ireq->ir_v6_rmt_addr);
496
497 fl6->daddr = ireq->ir_v6_rmt_addr;
498 if (np->repflow && ireq->pktopts)
499 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
500
501 rcu_read_lock();
502 opt = ireq->ipv6_opt;
503 if (!opt)
504 opt = rcu_dereference(np->opt);
505 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
506 rcu_read_unlock();
507 err = net_xmit_eval(err);
508 }
509
510done:
511 return err;
512}
513
514
515static void tcp_v6_reqsk_destructor(struct request_sock *req)
516{
517 kfree(inet_rsk(req)->ipv6_opt);
518 kfree_skb(inet_rsk(req)->pktopts);
519}
520
521#ifdef CONFIG_TCP_MD5SIG
522static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
523 const struct in6_addr *addr)
524{
525 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
526}
527
528static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
529 const struct sock *addr_sk)
530{
531 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
532}
533
534static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
535 char __user *optval, int optlen)
536{
537 struct tcp_md5sig cmd;
538 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
539 u8 prefixlen;
540
541 if (optlen < sizeof(cmd))
542 return -EINVAL;
543
544 if (copy_from_user(&cmd, optval, sizeof(cmd)))
545 return -EFAULT;
546
547 if (sin6->sin6_family != AF_INET6)
548 return -EINVAL;
549
550 if (optname == TCP_MD5SIG_EXT &&
551 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
552 prefixlen = cmd.tcpm_prefixlen;
553 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
554 prefixlen > 32))
555 return -EINVAL;
556 } else {
557 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
558 }
559
560 if (!cmd.tcpm_keylen) {
561 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
562 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
563 AF_INET, prefixlen);
564 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
565 AF_INET6, prefixlen);
566 }
567
568 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
569 return -EINVAL;
570
571 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
572 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
573 AF_INET, prefixlen, cmd.tcpm_key,
574 cmd.tcpm_keylen, GFP_KERNEL);
575
576 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
577 AF_INET6, prefixlen, cmd.tcpm_key,
578 cmd.tcpm_keylen, GFP_KERNEL);
579}
580
581static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
582 const struct in6_addr *daddr,
583 const struct in6_addr *saddr,
584 const struct tcphdr *th, int nbytes)
585{
586 struct tcp6_pseudohdr *bp;
587 struct scatterlist sg;
588 struct tcphdr *_th;
589
590 bp = hp->scratch;
591
592 bp->saddr = *saddr;
593 bp->daddr = *daddr;
594 bp->protocol = cpu_to_be32(IPPROTO_TCP);
595 bp->len = cpu_to_be32(nbytes);
596
597 _th = (struct tcphdr *)(bp + 1);
598 memcpy(_th, th, sizeof(*th));
599 _th->check = 0;
600
601 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
602 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
603 sizeof(*bp) + sizeof(*th));
604 return crypto_ahash_update(hp->md5_req);
605}
606
607static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
608 const struct in6_addr *daddr, struct in6_addr *saddr,
609 const struct tcphdr *th)
610{
611 struct tcp_md5sig_pool *hp;
612 struct ahash_request *req;
613
614 hp = tcp_get_md5sig_pool();
615 if (!hp)
616 goto clear_hash_noput;
617 req = hp->md5_req;
618
619 if (crypto_ahash_init(req))
620 goto clear_hash;
621 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
622 goto clear_hash;
623 if (tcp_md5_hash_key(hp, key))
624 goto clear_hash;
625 ahash_request_set_crypt(req, NULL, md5_hash, 0);
626 if (crypto_ahash_final(req))
627 goto clear_hash;
628
629 tcp_put_md5sig_pool();
630 return 0;
631
632clear_hash:
633 tcp_put_md5sig_pool();
634clear_hash_noput:
635 memset(md5_hash, 0, 16);
636 return 1;
637}
638
639static int tcp_v6_md5_hash_skb(char *md5_hash,
640 const struct tcp_md5sig_key *key,
641 const struct sock *sk,
642 const struct sk_buff *skb)
643{
644 const struct in6_addr *saddr, *daddr;
645 struct tcp_md5sig_pool *hp;
646 struct ahash_request *req;
647 const struct tcphdr *th = tcp_hdr(skb);
648
649 if (sk) {
650 saddr = &sk->sk_v6_rcv_saddr;
651 daddr = &sk->sk_v6_daddr;
652 } else {
653 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
654 saddr = &ip6h->saddr;
655 daddr = &ip6h->daddr;
656 }
657
658 hp = tcp_get_md5sig_pool();
659 if (!hp)
660 goto clear_hash_noput;
661 req = hp->md5_req;
662
663 if (crypto_ahash_init(req))
664 goto clear_hash;
665
666 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
667 goto clear_hash;
668 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
669 goto clear_hash;
670 if (tcp_md5_hash_key(hp, key))
671 goto clear_hash;
672 ahash_request_set_crypt(req, NULL, md5_hash, 0);
673 if (crypto_ahash_final(req))
674 goto clear_hash;
675
676 tcp_put_md5sig_pool();
677 return 0;
678
679clear_hash:
680 tcp_put_md5sig_pool();
681clear_hash_noput:
682 memset(md5_hash, 0, 16);
683 return 1;
684}
685
686#endif
687
688static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
689 const struct sk_buff *skb)
690{
691#ifdef CONFIG_TCP_MD5SIG
692 const __u8 *hash_location = NULL;
693 struct tcp_md5sig_key *hash_expected;
694 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
695 const struct tcphdr *th = tcp_hdr(skb);
696 int genhash;
697 u8 newhash[16];
698
699 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
700 hash_location = tcp_parse_md5sig_option(th);
701
702
703 if (!hash_expected && !hash_location)
704 return false;
705
706 if (hash_expected && !hash_location) {
707 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
708 return true;
709 }
710
711 if (!hash_expected && hash_location) {
712 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
713 return true;
714 }
715
716
717 genhash = tcp_v6_md5_hash_skb(newhash,
718 hash_expected,
719 NULL, skb);
720
721 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
722 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
723 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
724 genhash ? "failed" : "mismatch",
725 &ip6h->saddr, ntohs(th->source),
726 &ip6h->daddr, ntohs(th->dest));
727 return true;
728 }
729#endif
730 return false;
731}
732
733static void tcp_v6_init_req(struct request_sock *req,
734 const struct sock *sk_listener,
735 struct sk_buff *skb)
736{
737 struct inet_request_sock *ireq = inet_rsk(req);
738 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
739
740 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
741 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
742
743
744 if (!sk_listener->sk_bound_dev_if &&
745 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
746 ireq->ir_iif = tcp_v6_iif(skb);
747
748 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
749 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
750 np->rxopt.bits.rxinfo ||
751 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
752 np->rxopt.bits.rxohlim || np->repflow)) {
753 refcount_inc(&skb->users);
754 ireq->pktopts = skb;
755 }
756}
757
758static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
759 struct flowi *fl,
760 const struct request_sock *req)
761{
762 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
763}
764
765struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
766 .family = AF_INET6,
767 .obj_size = sizeof(struct tcp6_request_sock),
768 .rtx_syn_ack = tcp_rtx_synack,
769 .send_ack = tcp_v6_reqsk_send_ack,
770 .destructor = tcp_v6_reqsk_destructor,
771 .send_reset = tcp_v6_send_reset,
772 .syn_ack_timeout = tcp_syn_ack_timeout,
773};
774
775static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
776 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
777 sizeof(struct ipv6hdr),
778#ifdef CONFIG_TCP_MD5SIG
779 .req_md5_lookup = tcp_v6_md5_lookup,
780 .calc_md5_hash = tcp_v6_md5_hash_skb,
781#endif
782 .init_req = tcp_v6_init_req,
783#ifdef CONFIG_SYN_COOKIES
784 .cookie_init_seq = cookie_v6_init_sequence,
785#endif
786 .route_req = tcp_v6_route_req,
787 .init_seq = tcp_v6_init_seq,
788 .init_ts_off = tcp_v6_init_ts_off,
789 .send_synack = tcp_v6_send_synack,
790};
791
792static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
793 u32 ack, u32 win, u32 tsval, u32 tsecr,
794 int oif, struct tcp_md5sig_key *key, int rst,
795 u8 tclass, __be32 label)
796{
797 const struct tcphdr *th = tcp_hdr(skb);
798 struct tcphdr *t1;
799 struct sk_buff *buff;
800 struct flowi6 fl6;
801 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
802 struct sock *ctl_sk = net->ipv6.tcp_sk;
803 unsigned int tot_len = sizeof(struct tcphdr);
804 struct dst_entry *dst;
805 __be32 *topt;
806 __u32 mark = 0;
807
808 if (tsecr)
809 tot_len += TCPOLEN_TSTAMP_ALIGNED;
810#ifdef CONFIG_TCP_MD5SIG
811 if (key)
812 tot_len += TCPOLEN_MD5SIG_ALIGNED;
813#endif
814
815 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
816 GFP_ATOMIC);
817 if (!buff)
818 return;
819
820 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
821
822 t1 = skb_push(buff, tot_len);
823 skb_reset_transport_header(buff);
824
825
826 memset(t1, 0, sizeof(*t1));
827 t1->dest = th->source;
828 t1->source = th->dest;
829 t1->doff = tot_len / 4;
830 t1->seq = htonl(seq);
831 t1->ack_seq = htonl(ack);
832 t1->ack = !rst || !th->ack;
833 t1->rst = rst;
834 t1->window = htons(win);
835
836 topt = (__be32 *)(t1 + 1);
837
838 if (tsecr) {
839 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
840 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
841 *topt++ = htonl(tsval);
842 *topt++ = htonl(tsecr);
843 }
844
845#ifdef CONFIG_TCP_MD5SIG
846 if (key) {
847 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
848 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
849 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
850 &ipv6_hdr(skb)->saddr,
851 &ipv6_hdr(skb)->daddr, t1);
852 }
853#endif
854
855 memset(&fl6, 0, sizeof(fl6));
856 fl6.daddr = ipv6_hdr(skb)->saddr;
857 fl6.saddr = ipv6_hdr(skb)->daddr;
858 fl6.flowlabel = label;
859
860 buff->ip_summed = CHECKSUM_PARTIAL;
861 buff->csum = 0;
862
863 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
864
865 fl6.flowi6_proto = IPPROTO_TCP;
866 if (rt6_need_strict(&fl6.daddr) && !oif)
867 fl6.flowi6_oif = tcp_v6_iif(skb);
868 else {
869 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
870 oif = skb->skb_iif;
871
872 fl6.flowi6_oif = oif;
873 }
874
875 if (sk)
876 mark = (sk->sk_state == TCP_TIME_WAIT) ?
877 inet_twsk(sk)->tw_mark : sk->sk_mark;
878 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
879 fl6.fl6_dport = t1->dest;
880 fl6.fl6_sport = t1->source;
881 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
882 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
883
884
885
886
887
888 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
889 if (!IS_ERR(dst)) {
890 skb_dst_set(buff, dst);
891 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
892 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
893 if (rst)
894 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
895 return;
896 }
897
898 kfree_skb(buff);
899}
900
901static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
902{
903 const struct tcphdr *th = tcp_hdr(skb);
904 u32 seq = 0, ack_seq = 0;
905 struct tcp_md5sig_key *key = NULL;
906#ifdef CONFIG_TCP_MD5SIG
907 const __u8 *hash_location = NULL;
908 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
909 unsigned char newhash[16];
910 int genhash;
911 struct sock *sk1 = NULL;
912#endif
913 int oif = 0;
914
915 if (th->rst)
916 return;
917
918
919
920
921 if (!sk && !ipv6_unicast_destination(skb))
922 return;
923
924#ifdef CONFIG_TCP_MD5SIG
925 rcu_read_lock();
926 hash_location = tcp_parse_md5sig_option(th);
927 if (sk && sk_fullsock(sk)) {
928 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
929 } else if (hash_location) {
930
931
932
933
934
935
936
937 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
938 &tcp_hashinfo, NULL, 0,
939 &ipv6h->saddr,
940 th->source, &ipv6h->daddr,
941 ntohs(th->source),
942 tcp_v6_iif_l3_slave(skb),
943 tcp_v6_sdif(skb));
944 if (!sk1)
945 goto out;
946
947 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
948 if (!key)
949 goto out;
950
951 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
952 if (genhash || memcmp(hash_location, newhash, 16) != 0)
953 goto out;
954 }
955#endif
956
957 if (th->ack)
958 seq = ntohl(th->ack_seq);
959 else
960 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
961 (th->doff << 2);
962
963 if (sk) {
964 oif = sk->sk_bound_dev_if;
965 if (sk_fullsock(sk))
966 trace_tcp_send_reset(sk, skb);
967 }
968
969 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
970
971#ifdef CONFIG_TCP_MD5SIG
972out:
973 rcu_read_unlock();
974#endif
975}
976
977static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
978 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
979 struct tcp_md5sig_key *key, u8 tclass,
980 __be32 label)
981{
982 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
983 tclass, label);
984}
985
986static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
987{
988 struct inet_timewait_sock *tw = inet_twsk(sk);
989 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
990
991 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
992 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
993 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
994 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
995 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
996
997 inet_twsk_put(tw);
998}
999
1000static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1001 struct request_sock *req)
1002{
1003
1004
1005
1006
1007
1008
1009
1010
1011 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1012 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1013 tcp_rsk(req)->rcv_nxt,
1014 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1015 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1016 req->ts_recent, sk->sk_bound_dev_if,
1017 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1018 0, 0);
1019}
1020
1021
1022static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1023{
1024#ifdef CONFIG_SYN_COOKIES
1025 const struct tcphdr *th = tcp_hdr(skb);
1026
1027 if (!th->syn)
1028 sk = cookie_v6_check(sk, skb);
1029#endif
1030 return sk;
1031}
1032
1033static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1034{
1035 if (skb->protocol == htons(ETH_P_IP))
1036 return tcp_v4_conn_request(sk, skb);
1037
1038 if (!ipv6_unicast_destination(skb))
1039 goto drop;
1040
1041 return tcp_conn_request(&tcp6_request_sock_ops,
1042 &tcp_request_sock_ipv6_ops, sk, skb);
1043
1044drop:
1045 tcp_listendrop(sk);
1046 return 0;
1047}
1048
1049static void tcp_v6_restore_cb(struct sk_buff *skb)
1050{
1051
1052
1053
1054
1055 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1056 sizeof(struct inet6_skb_parm));
1057}
1058
1059static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1060 struct request_sock *req,
1061 struct dst_entry *dst,
1062 struct request_sock *req_unhash,
1063 bool *own_req)
1064{
1065 struct inet_request_sock *ireq;
1066 struct ipv6_pinfo *newnp;
1067 const struct ipv6_pinfo *np = inet6_sk(sk);
1068 struct ipv6_txoptions *opt;
1069 struct tcp6_sock *newtcp6sk;
1070 struct inet_sock *newinet;
1071 struct tcp_sock *newtp;
1072 struct sock *newsk;
1073#ifdef CONFIG_TCP_MD5SIG
1074 struct tcp_md5sig_key *key;
1075#endif
1076 struct flowi6 fl6;
1077
1078 if (skb->protocol == htons(ETH_P_IP)) {
1079
1080
1081
1082
1083 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1084 req_unhash, own_req);
1085
1086 if (!newsk)
1087 return NULL;
1088
1089 newtcp6sk = (struct tcp6_sock *)newsk;
1090 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1091
1092 newinet = inet_sk(newsk);
1093 newnp = inet6_sk(newsk);
1094 newtp = tcp_sk(newsk);
1095
1096 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1097
1098 newnp->saddr = newsk->sk_v6_rcv_saddr;
1099
1100 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1101 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1102#ifdef CONFIG_TCP_MD5SIG
1103 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1104#endif
1105
1106 newnp->ipv6_mc_list = NULL;
1107 newnp->ipv6_ac_list = NULL;
1108 newnp->ipv6_fl_list = NULL;
1109 newnp->pktoptions = NULL;
1110 newnp->opt = NULL;
1111 newnp->mcast_oif = tcp_v6_iif(skb);
1112 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1113 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1114 if (np->repflow)
1115 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1128
1129 return newsk;
1130 }
1131
1132 ireq = inet_rsk(req);
1133
1134 if (sk_acceptq_is_full(sk))
1135 goto out_overflow;
1136
1137 if (!dst) {
1138 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1139 if (!dst)
1140 goto out;
1141 }
1142
1143 newsk = tcp_create_openreq_child(sk, req, skb);
1144 if (!newsk)
1145 goto out_nonewsk;
1146
1147
1148
1149
1150
1151
1152
1153 newsk->sk_gso_type = SKB_GSO_TCPV6;
1154 ip6_dst_store(newsk, dst, NULL, NULL);
1155 inet6_sk_rx_dst_set(newsk, skb);
1156
1157 newtcp6sk = (struct tcp6_sock *)newsk;
1158 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1159
1160 newtp = tcp_sk(newsk);
1161 newinet = inet_sk(newsk);
1162 newnp = inet6_sk(newsk);
1163
1164 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1165
1166 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1167 newnp->saddr = ireq->ir_v6_loc_addr;
1168 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1169 newsk->sk_bound_dev_if = ireq->ir_iif;
1170
1171
1172
1173
1174
1175 newinet->inet_opt = NULL;
1176 newnp->ipv6_mc_list = NULL;
1177 newnp->ipv6_ac_list = NULL;
1178 newnp->ipv6_fl_list = NULL;
1179
1180
1181 newnp->rxopt.all = np->rxopt.all;
1182
1183 newnp->pktoptions = NULL;
1184 newnp->opt = NULL;
1185 newnp->mcast_oif = tcp_v6_iif(skb);
1186 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1187 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1188 if (np->repflow)
1189 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1190
1191
1192
1193
1194
1195
1196
1197 opt = ireq->ipv6_opt;
1198 if (!opt)
1199 opt = rcu_dereference(np->opt);
1200 if (opt) {
1201 opt = ipv6_dup_options(newsk, opt);
1202 RCU_INIT_POINTER(newnp->opt, opt);
1203 }
1204 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1205 if (opt)
1206 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1207 opt->opt_flen;
1208
1209 tcp_ca_openreq_child(newsk, dst);
1210
1211 tcp_sync_mss(newsk, dst_mtu(dst));
1212 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1213
1214 tcp_initialize_rcv_mss(newsk);
1215
1216 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1217 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1218
1219#ifdef CONFIG_TCP_MD5SIG
1220
1221 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1222 if (key) {
1223
1224
1225
1226
1227
1228 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1229 AF_INET6, 128, key->key, key->keylen,
1230 sk_gfp_mask(sk, GFP_ATOMIC));
1231 }
1232#endif
1233
1234 if (__inet_inherit_port(sk, newsk) < 0) {
1235 inet_csk_prepare_forced_close(newsk);
1236 tcp_done(newsk);
1237 goto out;
1238 }
1239 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1240 if (*own_req) {
1241 tcp_move_syn(newtp, req);
1242
1243
1244 if (ireq->pktopts) {
1245 newnp->pktoptions = skb_clone(ireq->pktopts,
1246 sk_gfp_mask(sk, GFP_ATOMIC));
1247 consume_skb(ireq->pktopts);
1248 ireq->pktopts = NULL;
1249 if (newnp->pktoptions) {
1250 tcp_v6_restore_cb(newnp->pktoptions);
1251 skb_set_owner_r(newnp->pktoptions, newsk);
1252 }
1253 }
1254 }
1255
1256 return newsk;
1257
1258out_overflow:
1259 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1260out_nonewsk:
1261 dst_release(dst);
1262out:
1263 tcp_listendrop(sk);
1264 return NULL;
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1276{
1277 struct ipv6_pinfo *np = inet6_sk(sk);
1278 struct tcp_sock *tp;
1279 struct sk_buff *opt_skb = NULL;
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 if (skb->protocol == htons(ETH_P_IP))
1290 return tcp_v4_do_rcv(sk, skb);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 if (np->rxopt.all)
1311 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1312
1313 if (sk->sk_state == TCP_ESTABLISHED) {
1314 struct dst_entry *dst = sk->sk_rx_dst;
1315
1316 sock_rps_save_rxhash(sk, skb);
1317 sk_mark_napi_id(sk, skb);
1318 if (dst) {
1319 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1320 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1321 dst_release(dst);
1322 sk->sk_rx_dst = NULL;
1323 }
1324 }
1325
1326 tcp_rcv_established(sk, skb);
1327 if (opt_skb)
1328 goto ipv6_pktoptions;
1329 return 0;
1330 }
1331
1332 if (tcp_checksum_complete(skb))
1333 goto csum_err;
1334
1335 if (sk->sk_state == TCP_LISTEN) {
1336 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1337
1338 if (!nsk)
1339 goto discard;
1340
1341 if (nsk != sk) {
1342 if (tcp_child_process(sk, nsk, skb))
1343 goto reset;
1344 if (opt_skb)
1345 __kfree_skb(opt_skb);
1346 return 0;
1347 }
1348 } else
1349 sock_rps_save_rxhash(sk, skb);
1350
1351 if (tcp_rcv_state_process(sk, skb))
1352 goto reset;
1353 if (opt_skb)
1354 goto ipv6_pktoptions;
1355 return 0;
1356
1357reset:
1358 tcp_v6_send_reset(sk, skb);
1359discard:
1360 if (opt_skb)
1361 __kfree_skb(opt_skb);
1362 kfree_skb(skb);
1363 return 0;
1364csum_err:
1365 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1366 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1367 goto discard;
1368
1369
1370ipv6_pktoptions:
1371
1372
1373
1374
1375
1376
1377
1378 tp = tcp_sk(sk);
1379 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1380 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1381 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1382 np->mcast_oif = tcp_v6_iif(opt_skb);
1383 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1384 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1385 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1386 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1387 if (np->repflow)
1388 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1389 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1390 skb_set_owner_r(opt_skb, sk);
1391 tcp_v6_restore_cb(opt_skb);
1392 opt_skb = xchg(&np->pktoptions, opt_skb);
1393 } else {
1394 __kfree_skb(opt_skb);
1395 opt_skb = xchg(&np->pktoptions, NULL);
1396 }
1397 }
1398
1399 kfree_skb(opt_skb);
1400 return 0;
1401}
1402
1403static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1404 const struct tcphdr *th)
1405{
1406
1407
1408
1409
1410
1411 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1412 sizeof(struct inet6_skb_parm));
1413 barrier();
1414
1415 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1416 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1417 skb->len - th->doff*4);
1418 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1419 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1420 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1421 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1422 TCP_SKB_CB(skb)->sacked = 0;
1423 TCP_SKB_CB(skb)->has_rxtstamp =
1424 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1425}
1426
1427static int tcp_v6_rcv(struct sk_buff *skb)
1428{
1429 int sdif = inet6_sdif(skb);
1430 const struct tcphdr *th;
1431 const struct ipv6hdr *hdr;
1432 bool refcounted;
1433 struct sock *sk;
1434 int ret;
1435 struct net *net = dev_net(skb->dev);
1436
1437 if (skb->pkt_type != PACKET_HOST)
1438 goto discard_it;
1439
1440
1441
1442
1443 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1444
1445 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1446 goto discard_it;
1447
1448 th = (const struct tcphdr *)skb->data;
1449
1450 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1451 goto bad_packet;
1452 if (!pskb_may_pull(skb, th->doff*4))
1453 goto discard_it;
1454
1455 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1456 goto csum_error;
1457
1458 th = (const struct tcphdr *)skb->data;
1459 hdr = ipv6_hdr(skb);
1460
1461lookup:
1462 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1463 th->source, th->dest, inet6_iif(skb), sdif,
1464 &refcounted);
1465 if (!sk)
1466 goto no_tcp_socket;
1467
1468process:
1469 if (sk->sk_state == TCP_TIME_WAIT)
1470 goto do_time_wait;
1471
1472 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1473 struct request_sock *req = inet_reqsk(sk);
1474 bool req_stolen = false;
1475 struct sock *nsk;
1476
1477 sk = req->rsk_listener;
1478 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1479 sk_drops_add(sk, skb);
1480 reqsk_put(req);
1481 goto discard_it;
1482 }
1483 if (tcp_checksum_complete(skb)) {
1484 reqsk_put(req);
1485 goto csum_error;
1486 }
1487 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1488 inet_csk_reqsk_queue_drop_and_put(sk, req);
1489 goto lookup;
1490 }
1491 sock_hold(sk);
1492 refcounted = true;
1493 nsk = NULL;
1494 if (!tcp_filter(sk, skb)) {
1495 th = (const struct tcphdr *)skb->data;
1496 hdr = ipv6_hdr(skb);
1497 tcp_v6_fill_cb(skb, hdr, th);
1498 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1499 }
1500 if (!nsk) {
1501 reqsk_put(req);
1502 if (req_stolen) {
1503
1504
1505
1506
1507
1508 tcp_v6_restore_cb(skb);
1509 sock_put(sk);
1510 goto lookup;
1511 }
1512 goto discard_and_relse;
1513 }
1514 if (nsk == sk) {
1515 reqsk_put(req);
1516 tcp_v6_restore_cb(skb);
1517 } else if (tcp_child_process(sk, nsk, skb)) {
1518 tcp_v6_send_reset(nsk, skb);
1519 goto discard_and_relse;
1520 } else {
1521 sock_put(sk);
1522 return 0;
1523 }
1524 }
1525 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1526 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1527 goto discard_and_relse;
1528 }
1529
1530 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1531 goto discard_and_relse;
1532
1533 if (tcp_v6_inbound_md5_hash(sk, skb))
1534 goto discard_and_relse;
1535
1536 if (tcp_filter(sk, skb))
1537 goto discard_and_relse;
1538 th = (const struct tcphdr *)skb->data;
1539 hdr = ipv6_hdr(skb);
1540 tcp_v6_fill_cb(skb, hdr, th);
1541
1542 skb->dev = NULL;
1543
1544 if (sk->sk_state == TCP_LISTEN) {
1545 ret = tcp_v6_do_rcv(sk, skb);
1546 goto put_and_return;
1547 }
1548
1549 sk_incoming_cpu_update(sk);
1550
1551 bh_lock_sock_nested(sk);
1552 tcp_segs_in(tcp_sk(sk), skb);
1553 ret = 0;
1554 if (!sock_owned_by_user(sk)) {
1555 ret = tcp_v6_do_rcv(sk, skb);
1556 } else if (tcp_add_backlog(sk, skb)) {
1557 goto discard_and_relse;
1558 }
1559 bh_unlock_sock(sk);
1560
1561put_and_return:
1562 if (refcounted)
1563 sock_put(sk);
1564 return ret ? -1 : 0;
1565
1566no_tcp_socket:
1567 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1568 goto discard_it;
1569
1570 tcp_v6_fill_cb(skb, hdr, th);
1571
1572 if (tcp_checksum_complete(skb)) {
1573csum_error:
1574 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1575bad_packet:
1576 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1577 } else {
1578 tcp_v6_send_reset(NULL, skb);
1579 }
1580
1581discard_it:
1582 kfree_skb(skb);
1583 return 0;
1584
1585discard_and_relse:
1586 sk_drops_add(sk, skb);
1587 if (refcounted)
1588 sock_put(sk);
1589 goto discard_it;
1590
1591do_time_wait:
1592 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1593 inet_twsk_put(inet_twsk(sk));
1594 goto discard_it;
1595 }
1596
1597 tcp_v6_fill_cb(skb, hdr, th);
1598
1599 if (tcp_checksum_complete(skb)) {
1600 inet_twsk_put(inet_twsk(sk));
1601 goto csum_error;
1602 }
1603
1604 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1605 case TCP_TW_SYN:
1606 {
1607 struct sock *sk2;
1608
1609 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1610 skb, __tcp_hdrlen(th),
1611 &ipv6_hdr(skb)->saddr, th->source,
1612 &ipv6_hdr(skb)->daddr,
1613 ntohs(th->dest),
1614 tcp_v6_iif_l3_slave(skb),
1615 sdif);
1616 if (sk2) {
1617 struct inet_timewait_sock *tw = inet_twsk(sk);
1618 inet_twsk_deschedule_put(tw);
1619 sk = sk2;
1620 tcp_v6_restore_cb(skb);
1621 refcounted = false;
1622 goto process;
1623 }
1624 }
1625
1626
1627 case TCP_TW_ACK:
1628 tcp_v6_timewait_ack(sk, skb);
1629 break;
1630 case TCP_TW_RST:
1631 tcp_v6_send_reset(sk, skb);
1632 inet_twsk_deschedule_put(inet_twsk(sk));
1633 goto discard_it;
1634 case TCP_TW_SUCCESS:
1635 ;
1636 }
1637 goto discard_it;
1638}
1639
1640static void tcp_v6_early_demux(struct sk_buff *skb)
1641{
1642 const struct ipv6hdr *hdr;
1643 const struct tcphdr *th;
1644 struct sock *sk;
1645
1646 if (skb->pkt_type != PACKET_HOST)
1647 return;
1648
1649 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1650 return;
1651
1652 hdr = ipv6_hdr(skb);
1653 th = tcp_hdr(skb);
1654
1655 if (th->doff < sizeof(struct tcphdr) / 4)
1656 return;
1657
1658
1659 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1660 &hdr->saddr, th->source,
1661 &hdr->daddr, ntohs(th->dest),
1662 inet6_iif(skb), inet6_sdif(skb));
1663 if (sk) {
1664 skb->sk = sk;
1665 skb->destructor = sock_edemux;
1666 if (sk_fullsock(sk)) {
1667 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1668
1669 if (dst)
1670 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1671 if (dst &&
1672 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1673 skb_dst_set_noref(skb, dst);
1674 }
1675 }
1676}
1677
1678static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1679 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1680 .twsk_unique = tcp_twsk_unique,
1681 .twsk_destructor = tcp_twsk_destructor,
1682};
1683
1684static const struct inet_connection_sock_af_ops ipv6_specific = {
1685 .queue_xmit = inet6_csk_xmit,
1686 .send_check = tcp_v6_send_check,
1687 .rebuild_header = inet6_sk_rebuild_header,
1688 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1689 .conn_request = tcp_v6_conn_request,
1690 .syn_recv_sock = tcp_v6_syn_recv_sock,
1691 .net_header_len = sizeof(struct ipv6hdr),
1692 .net_frag_header_len = sizeof(struct frag_hdr),
1693 .setsockopt = ipv6_setsockopt,
1694 .getsockopt = ipv6_getsockopt,
1695 .addr2sockaddr = inet6_csk_addr2sockaddr,
1696 .sockaddr_len = sizeof(struct sockaddr_in6),
1697#ifdef CONFIG_COMPAT
1698 .compat_setsockopt = compat_ipv6_setsockopt,
1699 .compat_getsockopt = compat_ipv6_getsockopt,
1700#endif
1701 .mtu_reduced = tcp_v6_mtu_reduced,
1702};
1703
1704#ifdef CONFIG_TCP_MD5SIG
1705static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1706 .md5_lookup = tcp_v6_md5_lookup,
1707 .calc_md5_hash = tcp_v6_md5_hash_skb,
1708 .md5_parse = tcp_v6_parse_md5_keys,
1709};
1710#endif
1711
1712
1713
1714
1715static const struct inet_connection_sock_af_ops ipv6_mapped = {
1716 .queue_xmit = ip_queue_xmit,
1717 .send_check = tcp_v4_send_check,
1718 .rebuild_header = inet_sk_rebuild_header,
1719 .sk_rx_dst_set = inet_sk_rx_dst_set,
1720 .conn_request = tcp_v6_conn_request,
1721 .syn_recv_sock = tcp_v6_syn_recv_sock,
1722 .net_header_len = sizeof(struct iphdr),
1723 .setsockopt = ipv6_setsockopt,
1724 .getsockopt = ipv6_getsockopt,
1725 .addr2sockaddr = inet6_csk_addr2sockaddr,
1726 .sockaddr_len = sizeof(struct sockaddr_in6),
1727#ifdef CONFIG_COMPAT
1728 .compat_setsockopt = compat_ipv6_setsockopt,
1729 .compat_getsockopt = compat_ipv6_getsockopt,
1730#endif
1731 .mtu_reduced = tcp_v4_mtu_reduced,
1732};
1733
1734#ifdef CONFIG_TCP_MD5SIG
1735static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1736 .md5_lookup = tcp_v4_md5_lookup,
1737 .calc_md5_hash = tcp_v4_md5_hash_skb,
1738 .md5_parse = tcp_v6_parse_md5_keys,
1739};
1740#endif
1741
1742
1743
1744
1745static int tcp_v6_init_sock(struct sock *sk)
1746{
1747 struct inet_connection_sock *icsk = inet_csk(sk);
1748
1749 tcp_init_sock(sk);
1750
1751 icsk->icsk_af_ops = &ipv6_specific;
1752
1753#ifdef CONFIG_TCP_MD5SIG
1754 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1755#endif
1756
1757 return 0;
1758}
1759
1760static void tcp_v6_destroy_sock(struct sock *sk)
1761{
1762 tcp_v4_destroy_sock(sk);
1763 inet6_destroy_sock(sk);
1764}
1765
1766#ifdef CONFIG_PROC_FS
1767
1768static void get_openreq6(struct seq_file *seq,
1769 const struct request_sock *req, int i)
1770{
1771 long ttd = req->rsk_timer.expires - jiffies;
1772 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1773 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1774
1775 if (ttd < 0)
1776 ttd = 0;
1777
1778 seq_printf(seq,
1779 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1780 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1781 i,
1782 src->s6_addr32[0], src->s6_addr32[1],
1783 src->s6_addr32[2], src->s6_addr32[3],
1784 inet_rsk(req)->ir_num,
1785 dest->s6_addr32[0], dest->s6_addr32[1],
1786 dest->s6_addr32[2], dest->s6_addr32[3],
1787 ntohs(inet_rsk(req)->ir_rmt_port),
1788 TCP_SYN_RECV,
1789 0, 0,
1790 1,
1791 jiffies_to_clock_t(ttd),
1792 req->num_timeout,
1793 from_kuid_munged(seq_user_ns(seq),
1794 sock_i_uid(req->rsk_listener)),
1795 0,
1796 0,
1797 0, req);
1798}
1799
1800static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1801{
1802 const struct in6_addr *dest, *src;
1803 __u16 destp, srcp;
1804 int timer_active;
1805 unsigned long timer_expires;
1806 const struct inet_sock *inet = inet_sk(sp);
1807 const struct tcp_sock *tp = tcp_sk(sp);
1808 const struct inet_connection_sock *icsk = inet_csk(sp);
1809 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1810 int rx_queue;
1811 int state;
1812
1813 dest = &sp->sk_v6_daddr;
1814 src = &sp->sk_v6_rcv_saddr;
1815 destp = ntohs(inet->inet_dport);
1816 srcp = ntohs(inet->inet_sport);
1817
1818 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1819 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1820 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1821 timer_active = 1;
1822 timer_expires = icsk->icsk_timeout;
1823 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1824 timer_active = 4;
1825 timer_expires = icsk->icsk_timeout;
1826 } else if (timer_pending(&sp->sk_timer)) {
1827 timer_active = 2;
1828 timer_expires = sp->sk_timer.expires;
1829 } else {
1830 timer_active = 0;
1831 timer_expires = jiffies;
1832 }
1833
1834 state = inet_sk_state_load(sp);
1835 if (state == TCP_LISTEN)
1836 rx_queue = sp->sk_ack_backlog;
1837 else
1838
1839
1840
1841 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1842
1843 seq_printf(seq,
1844 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1845 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1846 i,
1847 src->s6_addr32[0], src->s6_addr32[1],
1848 src->s6_addr32[2], src->s6_addr32[3], srcp,
1849 dest->s6_addr32[0], dest->s6_addr32[1],
1850 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1851 state,
1852 tp->write_seq - tp->snd_una,
1853 rx_queue,
1854 timer_active,
1855 jiffies_delta_to_clock_t(timer_expires - jiffies),
1856 icsk->icsk_retransmits,
1857 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1858 icsk->icsk_probes_out,
1859 sock_i_ino(sp),
1860 refcount_read(&sp->sk_refcnt), sp,
1861 jiffies_to_clock_t(icsk->icsk_rto),
1862 jiffies_to_clock_t(icsk->icsk_ack.ato),
1863 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1864 tp->snd_cwnd,
1865 state == TCP_LISTEN ?
1866 fastopenq->max_qlen :
1867 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1868 );
1869}
1870
1871static void get_timewait6_sock(struct seq_file *seq,
1872 struct inet_timewait_sock *tw, int i)
1873{
1874 long delta = tw->tw_timer.expires - jiffies;
1875 const struct in6_addr *dest, *src;
1876 __u16 destp, srcp;
1877
1878 dest = &tw->tw_v6_daddr;
1879 src = &tw->tw_v6_rcv_saddr;
1880 destp = ntohs(tw->tw_dport);
1881 srcp = ntohs(tw->tw_sport);
1882
1883 seq_printf(seq,
1884 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1885 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1886 i,
1887 src->s6_addr32[0], src->s6_addr32[1],
1888 src->s6_addr32[2], src->s6_addr32[3], srcp,
1889 dest->s6_addr32[0], dest->s6_addr32[1],
1890 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1891 tw->tw_substate, 0, 0,
1892 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1893 refcount_read(&tw->tw_refcnt), tw);
1894}
1895
1896static int tcp6_seq_show(struct seq_file *seq, void *v)
1897{
1898 struct tcp_iter_state *st;
1899 struct sock *sk = v;
1900
1901 if (v == SEQ_START_TOKEN) {
1902 seq_puts(seq,
1903 " sl "
1904 "local_address "
1905 "remote_address "
1906 "st tx_queue rx_queue tr tm->when retrnsmt"
1907 " uid timeout inode\n");
1908 goto out;
1909 }
1910 st = seq->private;
1911
1912 if (sk->sk_state == TCP_TIME_WAIT)
1913 get_timewait6_sock(seq, v, st->num);
1914 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1915 get_openreq6(seq, v, st->num);
1916 else
1917 get_tcp6_sock(seq, v, st->num);
1918out:
1919 return 0;
1920}
1921
1922static const struct seq_operations tcp6_seq_ops = {
1923 .show = tcp6_seq_show,
1924 .start = tcp_seq_start,
1925 .next = tcp_seq_next,
1926 .stop = tcp_seq_stop,
1927};
1928
1929static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1930 .family = AF_INET6,
1931};
1932
1933int __net_init tcp6_proc_init(struct net *net)
1934{
1935 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1936 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
1937 return -ENOMEM;
1938 return 0;
1939}
1940
1941void tcp6_proc_exit(struct net *net)
1942{
1943 remove_proc_entry("tcp6", net->proc_net);
1944}
1945#endif
1946
1947struct proto tcpv6_prot = {
1948 .name = "TCPv6",
1949 .owner = THIS_MODULE,
1950 .close = tcp_close,
1951 .pre_connect = tcp_v6_pre_connect,
1952 .connect = tcp_v6_connect,
1953 .disconnect = tcp_disconnect,
1954 .accept = inet_csk_accept,
1955 .ioctl = tcp_ioctl,
1956 .init = tcp_v6_init_sock,
1957 .destroy = tcp_v6_destroy_sock,
1958 .shutdown = tcp_shutdown,
1959 .setsockopt = tcp_setsockopt,
1960 .getsockopt = tcp_getsockopt,
1961 .keepalive = tcp_set_keepalive,
1962 .recvmsg = tcp_recvmsg,
1963 .sendmsg = tcp_sendmsg,
1964 .sendpage = tcp_sendpage,
1965 .backlog_rcv = tcp_v6_do_rcv,
1966 .release_cb = tcp_release_cb,
1967 .hash = inet6_hash,
1968 .unhash = inet_unhash,
1969 .get_port = inet_csk_get_port,
1970 .enter_memory_pressure = tcp_enter_memory_pressure,
1971 .leave_memory_pressure = tcp_leave_memory_pressure,
1972 .stream_memory_free = tcp_stream_memory_free,
1973 .sockets_allocated = &tcp_sockets_allocated,
1974 .memory_allocated = &tcp_memory_allocated,
1975 .memory_pressure = &tcp_memory_pressure,
1976 .orphan_count = &tcp_orphan_count,
1977 .sysctl_mem = sysctl_tcp_mem,
1978 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1979 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1980 .max_header = MAX_TCP_HEADER,
1981 .obj_size = sizeof(struct tcp6_sock),
1982 .slab_flags = SLAB_TYPESAFE_BY_RCU,
1983 .twsk_prot = &tcp6_timewait_sock_ops,
1984 .rsk_prot = &tcp6_request_sock_ops,
1985 .h.hashinfo = &tcp_hashinfo,
1986 .no_autobind = true,
1987#ifdef CONFIG_COMPAT
1988 .compat_setsockopt = compat_tcp_setsockopt,
1989 .compat_getsockopt = compat_tcp_getsockopt,
1990#endif
1991 .diag_destroy = tcp_abort,
1992};
1993
1994
1995
1996
1997static struct inet6_protocol tcpv6_protocol = {
1998 .early_demux = tcp_v6_early_demux,
1999 .early_demux_handler = tcp_v6_early_demux,
2000 .handler = tcp_v6_rcv,
2001 .err_handler = tcp_v6_err,
2002 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2003};
2004
2005static struct inet_protosw tcpv6_protosw = {
2006 .type = SOCK_STREAM,
2007 .protocol = IPPROTO_TCP,
2008 .prot = &tcpv6_prot,
2009 .ops = &inet6_stream_ops,
2010 .flags = INET_PROTOSW_PERMANENT |
2011 INET_PROTOSW_ICSK,
2012};
2013
2014static int __net_init tcpv6_net_init(struct net *net)
2015{
2016 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2017 SOCK_RAW, IPPROTO_TCP, net);
2018}
2019
2020static void __net_exit tcpv6_net_exit(struct net *net)
2021{
2022 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2023}
2024
2025static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2026{
2027 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2028}
2029
2030static struct pernet_operations tcpv6_net_ops = {
2031 .init = tcpv6_net_init,
2032 .exit = tcpv6_net_exit,
2033 .exit_batch = tcpv6_net_exit_batch,
2034};
2035
2036int __init tcpv6_init(void)
2037{
2038 int ret;
2039
2040 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2041 if (ret)
2042 goto out;
2043
2044
2045 ret = inet6_register_protosw(&tcpv6_protosw);
2046 if (ret)
2047 goto out_tcpv6_protocol;
2048
2049 ret = register_pernet_subsys(&tcpv6_net_ops);
2050 if (ret)
2051 goto out_tcpv6_protosw;
2052out:
2053 return ret;
2054
2055out_tcpv6_protosw:
2056 inet6_unregister_protosw(&tcpv6_protosw);
2057out_tcpv6_protocol:
2058 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2059 goto out;
2060}
2061
2062void tcpv6_exit(void)
2063{
2064 unregister_pernet_subsys(&tcpv6_net_ops);
2065 inet6_unregister_protosw(&tcpv6_protosw);
2066 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2067}
2068