1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/bottom_half.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41#include <linux/slab.h>
42#include <linux/uaccess.h>
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46#include <linux/indirect_call_wrapper.h>
47
48#include <net/tcp.h>
49#include <net/ndisc.h>
50#include <net/inet6_hashtables.h>
51#include <net/inet6_connection_sock.h>
52#include <net/ipv6.h>
53#include <net/transp_v6.h>
54#include <net/addrconf.h>
55#include <net/ip6_route.h>
56#include <net/ip6_checksum.h>
57#include <net/inet_ecn.h>
58#include <net/protocol.h>
59#include <net/xfrm.h>
60#include <net/snmp.h>
61#include <net/dsfield.h>
62#include <net/timewait_sock.h>
63#include <net/inet_common.h>
64#include <net/secure_seq.h>
65#include <net/busy_poll.h>
66
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
70#include <crypto/hash.h>
71#include <linux/scatterlist.h>
72
73#include <trace/events/tcp.h>
74
75static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
76static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
78
79static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80
81static const struct inet_connection_sock_af_ops ipv6_mapped;
82const struct inet_connection_sock_af_ops ipv6_specific;
83#ifdef CONFIG_TCP_MD5SIG
84static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
85static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
86#else
87static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
88 const struct in6_addr *addr)
89{
90 return NULL;
91}
92#endif
93
94static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
95{
96 struct dst_entry *dst = skb_dst(skb);
97
98 if (dst && dst_hold_safe(dst)) {
99 const struct rt6_info *rt = (const struct rt6_info *)dst;
100
101 sk->sk_rx_dst = dst;
102 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
103 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
104 }
105}
106
107static u32 tcp_v6_init_seq(const struct sk_buff *skb)
108{
109 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
110 ipv6_hdr(skb)->saddr.s6_addr32,
111 tcp_hdr(skb)->dest,
112 tcp_hdr(skb)->source);
113}
114
115static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
116{
117 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
118 ipv6_hdr(skb)->saddr.s6_addr32);
119}
120
121static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
122 int addr_len)
123{
124
125
126
127
128 if (addr_len < SIN6_LEN_RFC2133)
129 return -EINVAL;
130
131 sock_owned_by_me(sk);
132
133 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
134}
135
136static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
137 int addr_len)
138{
139 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
140 struct inet_sock *inet = inet_sk(sk);
141 struct inet_connection_sock *icsk = inet_csk(sk);
142 struct ipv6_pinfo *np = inet6_sk(sk);
143 struct tcp_sock *tp = tcp_sk(sk);
144 struct in6_addr *saddr = NULL, *final_p, final;
145 struct ipv6_txoptions *opt;
146 struct flowi6 fl6;
147 struct dst_entry *dst;
148 int addr_type;
149 int err;
150 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
151
152 if (addr_len < SIN6_LEN_RFC2133)
153 return -EINVAL;
154
155 if (usin->sin6_family != AF_INET6)
156 return -EAFNOSUPPORT;
157
158 memset(&fl6, 0, sizeof(fl6));
159
160 if (np->sndflow) {
161 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
162 IP6_ECN_flow_init(fl6.flowlabel);
163 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
164 struct ip6_flowlabel *flowlabel;
165 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
166 if (!flowlabel)
167 return -EINVAL;
168 fl6_sock_release(flowlabel);
169 }
170 }
171
172
173
174
175
176 if (ipv6_addr_any(&usin->sin6_addr)) {
177 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
178 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
179 &usin->sin6_addr);
180 else
181 usin->sin6_addr = in6addr_loopback;
182 }
183
184 addr_type = ipv6_addr_type(&usin->sin6_addr);
185
186 if (addr_type & IPV6_ADDR_MULTICAST)
187 return -ENETUNREACH;
188
189 if (addr_type&IPV6_ADDR_LINKLOCAL) {
190 if (addr_len >= sizeof(struct sockaddr_in6) &&
191 usin->sin6_scope_id) {
192
193
194
195 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
196 return -EINVAL;
197
198 sk->sk_bound_dev_if = usin->sin6_scope_id;
199 }
200
201
202 if (!sk->sk_bound_dev_if)
203 return -EINVAL;
204 }
205
206 if (tp->rx_opt.ts_recent_stamp &&
207 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
208 tp->rx_opt.ts_recent = 0;
209 tp->rx_opt.ts_recent_stamp = 0;
210 WRITE_ONCE(tp->write_seq, 0);
211 }
212
213 sk->sk_v6_daddr = usin->sin6_addr;
214 np->flow_label = fl6.flowlabel;
215
216
217
218
219
220 if (addr_type & IPV6_ADDR_MAPPED) {
221 u32 exthdrlen = icsk->icsk_ext_hdr_len;
222 struct sockaddr_in sin;
223
224 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
225
226 if (__ipv6_only_sock(sk))
227 return -ENETUNREACH;
228
229 sin.sin_family = AF_INET;
230 sin.sin_port = usin->sin6_port;
231 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
232
233 icsk->icsk_af_ops = &ipv6_mapped;
234 if (sk_is_mptcp(sk))
235 mptcpv6_handle_mapped(sk, true);
236 sk->sk_backlog_rcv = tcp_v4_do_rcv;
237#ifdef CONFIG_TCP_MD5SIG
238 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
239#endif
240
241 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
242
243 if (err) {
244 icsk->icsk_ext_hdr_len = exthdrlen;
245 icsk->icsk_af_ops = &ipv6_specific;
246 if (sk_is_mptcp(sk))
247 mptcpv6_handle_mapped(sk, false);
248 sk->sk_backlog_rcv = tcp_v6_do_rcv;
249#ifdef CONFIG_TCP_MD5SIG
250 tp->af_specific = &tcp_sock_ipv6_specific;
251#endif
252 goto failure;
253 }
254 np->saddr = sk->sk_v6_rcv_saddr;
255
256 return err;
257 }
258
259 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
260 saddr = &sk->sk_v6_rcv_saddr;
261
262 fl6.flowi6_proto = IPPROTO_TCP;
263 fl6.daddr = sk->sk_v6_daddr;
264 fl6.saddr = saddr ? *saddr : np->saddr;
265 fl6.flowi6_oif = sk->sk_bound_dev_if;
266 fl6.flowi6_mark = sk->sk_mark;
267 fl6.fl6_dport = usin->sin6_port;
268 fl6.fl6_sport = inet->inet_sport;
269 fl6.flowi6_uid = sk->sk_uid;
270
271 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
272 final_p = fl6_update_dst(&fl6, opt, &final);
273
274 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
275
276 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
277 if (IS_ERR(dst)) {
278 err = PTR_ERR(dst);
279 goto failure;
280 }
281
282 if (!saddr) {
283 saddr = &fl6.saddr;
284 sk->sk_v6_rcv_saddr = *saddr;
285 }
286
287
288 np->saddr = *saddr;
289 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
290
291 sk->sk_gso_type = SKB_GSO_TCPV6;
292 ip6_dst_store(sk, dst, NULL, NULL);
293
294 icsk->icsk_ext_hdr_len = 0;
295 if (opt)
296 icsk->icsk_ext_hdr_len = opt->opt_flen +
297 opt->opt_nflen;
298
299 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
300
301 inet->inet_dport = usin->sin6_port;
302
303 tcp_set_state(sk, TCP_SYN_SENT);
304 err = inet6_hash_connect(tcp_death_row, sk);
305 if (err)
306 goto late_failure;
307
308 sk_set_txhash(sk);
309
310 if (likely(!tp->repair)) {
311 if (!tp->write_seq)
312 WRITE_ONCE(tp->write_seq,
313 secure_tcpv6_seq(np->saddr.s6_addr32,
314 sk->sk_v6_daddr.s6_addr32,
315 inet->inet_sport,
316 inet->inet_dport));
317 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
318 np->saddr.s6_addr32,
319 sk->sk_v6_daddr.s6_addr32);
320 }
321
322 if (tcp_fastopen_defer_connect(sk, &err))
323 return err;
324 if (err)
325 goto late_failure;
326
327 err = tcp_connect(sk);
328 if (err)
329 goto late_failure;
330
331 return 0;
332
333late_failure:
334 tcp_set_state(sk, TCP_CLOSE);
335failure:
336 inet->inet_dport = 0;
337 sk->sk_route_caps = 0;
338 return err;
339}
340
341static void tcp_v6_mtu_reduced(struct sock *sk)
342{
343 struct dst_entry *dst;
344 u32 mtu;
345
346 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
347 return;
348
349 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
350
351
352
353
354 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
355 return;
356
357 dst = inet6_csk_update_pmtu(sk, mtu);
358 if (!dst)
359 return;
360
361 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
362 tcp_sync_mss(sk, dst_mtu(dst));
363 tcp_simple_retransmit(sk);
364 }
365}
366
367static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
368 u8 type, u8 code, int offset, __be32 info)
369{
370 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
371 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
372 struct net *net = dev_net(skb->dev);
373 struct request_sock *fastopen;
374 struct ipv6_pinfo *np;
375 struct tcp_sock *tp;
376 __u32 seq, snd_una;
377 struct sock *sk;
378 bool fatal;
379 int err;
380
381 sk = __inet6_lookup_established(net, &tcp_hashinfo,
382 &hdr->daddr, th->dest,
383 &hdr->saddr, ntohs(th->source),
384 skb->dev->ifindex, inet6_sdif(skb));
385
386 if (!sk) {
387 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
388 ICMP6_MIB_INERRORS);
389 return -ENOENT;
390 }
391
392 if (sk->sk_state == TCP_TIME_WAIT) {
393 inet_twsk_put(inet_twsk(sk));
394 return 0;
395 }
396 seq = ntohl(th->seq);
397 fatal = icmpv6_err_convert(type, code, &err);
398 if (sk->sk_state == TCP_NEW_SYN_RECV) {
399 tcp_req_err(sk, seq, fatal);
400 return 0;
401 }
402
403 bh_lock_sock(sk);
404 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
405 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
406
407 if (sk->sk_state == TCP_CLOSE)
408 goto out;
409
410 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
411 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
412 goto out;
413 }
414
415 tp = tcp_sk(sk);
416
417 fastopen = rcu_dereference(tp->fastopen_rsk);
418 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
419 if (sk->sk_state != TCP_LISTEN &&
420 !between(seq, snd_una, tp->snd_nxt)) {
421 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
422 goto out;
423 }
424
425 np = inet6_sk(sk);
426
427 if (type == NDISC_REDIRECT) {
428 if (!sock_owned_by_user(sk)) {
429 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
430
431 if (dst)
432 dst->ops->redirect(dst, sk, skb);
433 }
434 goto out;
435 }
436
437 if (type == ICMPV6_PKT_TOOBIG) {
438 u32 mtu = ntohl(info);
439
440
441
442
443
444 if (sk->sk_state == TCP_LISTEN)
445 goto out;
446
447 if (!ip6_sk_accept_pmtu(sk))
448 goto out;
449
450 if (mtu < IPV6_MIN_MTU)
451 goto out;
452
453 WRITE_ONCE(tp->mtu_info, mtu);
454
455 if (!sock_owned_by_user(sk))
456 tcp_v6_mtu_reduced(sk);
457 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
458 &sk->sk_tsq_flags))
459 sock_hold(sk);
460 goto out;
461 }
462
463
464
465 switch (sk->sk_state) {
466 case TCP_SYN_SENT:
467 case TCP_SYN_RECV:
468
469
470
471 if (fastopen && !fastopen->sk)
472 break;
473
474 if (!sock_owned_by_user(sk)) {
475 sk->sk_err = err;
476 sk_error_report(sk);
477
478 tcp_done(sk);
479 } else
480 sk->sk_err_soft = err;
481 goto out;
482 }
483
484 if (!sock_owned_by_user(sk) && np->recverr) {
485 sk->sk_err = err;
486 sk_error_report(sk);
487 } else
488 sk->sk_err_soft = err;
489
490out:
491 bh_unlock_sock(sk);
492 sock_put(sk);
493 return 0;
494}
495
496
497static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
498 struct flowi *fl,
499 struct request_sock *req,
500 struct tcp_fastopen_cookie *foc,
501 enum tcp_synack_type synack_type,
502 struct sk_buff *syn_skb)
503{
504 struct inet_request_sock *ireq = inet_rsk(req);
505 struct ipv6_pinfo *np = inet6_sk(sk);
506 struct ipv6_txoptions *opt;
507 struct flowi6 *fl6 = &fl->u.ip6;
508 struct sk_buff *skb;
509 int err = -ENOMEM;
510
511
512 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
513 IPPROTO_TCP)) == NULL)
514 goto done;
515
516 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
517
518 if (skb) {
519 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
520 &ireq->ir_v6_rmt_addr);
521
522 fl6->daddr = ireq->ir_v6_rmt_addr;
523 if (np->repflow && ireq->pktopts)
524 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
525
526 rcu_read_lock();
527 opt = ireq->ipv6_opt;
528 if (!opt)
529 opt = rcu_dereference(np->opt);
530 err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt, np->tclass);
531 rcu_read_unlock();
532 err = net_xmit_eval(err);
533 }
534
535done:
536 return err;
537}
538
539
540static void tcp_v6_reqsk_destructor(struct request_sock *req)
541{
542 kfree(inet_rsk(req)->ipv6_opt);
543 kfree_skb(inet_rsk(req)->pktopts);
544}
545
546#ifdef CONFIG_TCP_MD5SIG
547static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
548 const struct in6_addr *addr)
549{
550 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
551}
552
553static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
554 const struct sock *addr_sk)
555{
556 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
557}
558
559static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
560 char __user *optval, int optlen)
561{
562 struct tcp_md5sig cmd;
563 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
564 u8 prefixlen;
565
566 if (optlen < sizeof(cmd))
567 return -EINVAL;
568
569 if (copy_from_user(&cmd, optval, sizeof(cmd)))
570 return -EFAULT;
571
572 if (sin6->sin6_family != AF_INET6)
573 return -EINVAL;
574
575 if (optname == TCP_MD5SIG_EXT &&
576 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
577 prefixlen = cmd.tcpm_prefixlen;
578 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
579 prefixlen > 32))
580 return -EINVAL;
581 } else {
582 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
583 }
584
585 if (!cmd.tcpm_keylen) {
586 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
587 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
588 AF_INET, prefixlen);
589 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
590 AF_INET6, prefixlen);
591 }
592
593 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
594 return -EINVAL;
595
596 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
597 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
598 AF_INET, prefixlen, cmd.tcpm_key,
599 cmd.tcpm_keylen, GFP_KERNEL);
600
601 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
602 AF_INET6, prefixlen, cmd.tcpm_key,
603 cmd.tcpm_keylen, GFP_KERNEL);
604}
605
606static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
607 const struct in6_addr *daddr,
608 const struct in6_addr *saddr,
609 const struct tcphdr *th, int nbytes)
610{
611 struct tcp6_pseudohdr *bp;
612 struct scatterlist sg;
613 struct tcphdr *_th;
614
615 bp = hp->scratch;
616
617 bp->saddr = *saddr;
618 bp->daddr = *daddr;
619 bp->protocol = cpu_to_be32(IPPROTO_TCP);
620 bp->len = cpu_to_be32(nbytes);
621
622 _th = (struct tcphdr *)(bp + 1);
623 memcpy(_th, th, sizeof(*th));
624 _th->check = 0;
625
626 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
627 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
628 sizeof(*bp) + sizeof(*th));
629 return crypto_ahash_update(hp->md5_req);
630}
631
632static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
633 const struct in6_addr *daddr, struct in6_addr *saddr,
634 const struct tcphdr *th)
635{
636 struct tcp_md5sig_pool *hp;
637 struct ahash_request *req;
638
639 hp = tcp_get_md5sig_pool();
640 if (!hp)
641 goto clear_hash_noput;
642 req = hp->md5_req;
643
644 if (crypto_ahash_init(req))
645 goto clear_hash;
646 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
647 goto clear_hash;
648 if (tcp_md5_hash_key(hp, key))
649 goto clear_hash;
650 ahash_request_set_crypt(req, NULL, md5_hash, 0);
651 if (crypto_ahash_final(req))
652 goto clear_hash;
653
654 tcp_put_md5sig_pool();
655 return 0;
656
657clear_hash:
658 tcp_put_md5sig_pool();
659clear_hash_noput:
660 memset(md5_hash, 0, 16);
661 return 1;
662}
663
664static int tcp_v6_md5_hash_skb(char *md5_hash,
665 const struct tcp_md5sig_key *key,
666 const struct sock *sk,
667 const struct sk_buff *skb)
668{
669 const struct in6_addr *saddr, *daddr;
670 struct tcp_md5sig_pool *hp;
671 struct ahash_request *req;
672 const struct tcphdr *th = tcp_hdr(skb);
673
674 if (sk) {
675 saddr = &sk->sk_v6_rcv_saddr;
676 daddr = &sk->sk_v6_daddr;
677 } else {
678 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
679 saddr = &ip6h->saddr;
680 daddr = &ip6h->daddr;
681 }
682
683 hp = tcp_get_md5sig_pool();
684 if (!hp)
685 goto clear_hash_noput;
686 req = hp->md5_req;
687
688 if (crypto_ahash_init(req))
689 goto clear_hash;
690
691 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
692 goto clear_hash;
693 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
694 goto clear_hash;
695 if (tcp_md5_hash_key(hp, key))
696 goto clear_hash;
697 ahash_request_set_crypt(req, NULL, md5_hash, 0);
698 if (crypto_ahash_final(req))
699 goto clear_hash;
700
701 tcp_put_md5sig_pool();
702 return 0;
703
704clear_hash:
705 tcp_put_md5sig_pool();
706clear_hash_noput:
707 memset(md5_hash, 0, 16);
708 return 1;
709}
710
711#endif
712
713static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
714 const struct sk_buff *skb)
715{
716#ifdef CONFIG_TCP_MD5SIG
717 const __u8 *hash_location = NULL;
718 struct tcp_md5sig_key *hash_expected;
719 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
720 const struct tcphdr *th = tcp_hdr(skb);
721 int genhash;
722 u8 newhash[16];
723
724 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
725 hash_location = tcp_parse_md5sig_option(th);
726
727
728 if (!hash_expected && !hash_location)
729 return false;
730
731 if (hash_expected && !hash_location) {
732 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
733 return true;
734 }
735
736 if (!hash_expected && hash_location) {
737 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
738 return true;
739 }
740
741
742 genhash = tcp_v6_md5_hash_skb(newhash,
743 hash_expected,
744 NULL, skb);
745
746 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
747 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
748 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
749 genhash ? "failed" : "mismatch",
750 &ip6h->saddr, ntohs(th->source),
751 &ip6h->daddr, ntohs(th->dest));
752 return true;
753 }
754#endif
755 return false;
756}
757
758static void tcp_v6_init_req(struct request_sock *req,
759 const struct sock *sk_listener,
760 struct sk_buff *skb)
761{
762 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
763 struct inet_request_sock *ireq = inet_rsk(req);
764 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
765
766 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
767 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
768
769
770 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
771 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
772 ireq->ir_iif = tcp_v6_iif(skb);
773
774 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
775 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
776 np->rxopt.bits.rxinfo ||
777 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
778 np->rxopt.bits.rxohlim || np->repflow)) {
779 refcount_inc(&skb->users);
780 ireq->pktopts = skb;
781 }
782}
783
784static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
785 struct sk_buff *skb,
786 struct flowi *fl,
787 struct request_sock *req)
788{
789 tcp_v6_init_req(req, sk, skb);
790
791 if (security_inet_conn_request(sk, skb, req))
792 return NULL;
793
794 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
795}
796
797struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
798 .family = AF_INET6,
799 .obj_size = sizeof(struct tcp6_request_sock),
800 .rtx_syn_ack = tcp_rtx_synack,
801 .send_ack = tcp_v6_reqsk_send_ack,
802 .destructor = tcp_v6_reqsk_destructor,
803 .send_reset = tcp_v6_send_reset,
804 .syn_ack_timeout = tcp_syn_ack_timeout,
805};
806
807const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
808 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
809 sizeof(struct ipv6hdr),
810#ifdef CONFIG_TCP_MD5SIG
811 .req_md5_lookup = tcp_v6_md5_lookup,
812 .calc_md5_hash = tcp_v6_md5_hash_skb,
813#endif
814#ifdef CONFIG_SYN_COOKIES
815 .cookie_init_seq = cookie_v6_init_sequence,
816#endif
817 .route_req = tcp_v6_route_req,
818 .init_seq = tcp_v6_init_seq,
819 .init_ts_off = tcp_v6_init_ts_off,
820 .send_synack = tcp_v6_send_synack,
821};
822
823static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
824 u32 ack, u32 win, u32 tsval, u32 tsecr,
825 int oif, struct tcp_md5sig_key *key, int rst,
826 u8 tclass, __be32 label)
827{
828 const struct tcphdr *th = tcp_hdr(skb);
829 struct tcphdr *t1;
830 struct sk_buff *buff;
831 struct flowi6 fl6;
832 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
833 struct sock *ctl_sk = net->ipv6.tcp_sk;
834 unsigned int tot_len = sizeof(struct tcphdr);
835 __be32 mrst = 0, *topt;
836 struct dst_entry *dst;
837 __u32 mark = 0;
838
839 if (tsecr)
840 tot_len += TCPOLEN_TSTAMP_ALIGNED;
841#ifdef CONFIG_TCP_MD5SIG
842 if (key)
843 tot_len += TCPOLEN_MD5SIG_ALIGNED;
844#endif
845
846#ifdef CONFIG_MPTCP
847 if (rst && !key) {
848 mrst = mptcp_reset_option(skb);
849
850 if (mrst)
851 tot_len += sizeof(__be32);
852 }
853#endif
854
855 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
856 GFP_ATOMIC);
857 if (!buff)
858 return;
859
860 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
861
862 t1 = skb_push(buff, tot_len);
863 skb_reset_transport_header(buff);
864
865
866 memset(t1, 0, sizeof(*t1));
867 t1->dest = th->source;
868 t1->source = th->dest;
869 t1->doff = tot_len / 4;
870 t1->seq = htonl(seq);
871 t1->ack_seq = htonl(ack);
872 t1->ack = !rst || !th->ack;
873 t1->rst = rst;
874 t1->window = htons(win);
875
876 topt = (__be32 *)(t1 + 1);
877
878 if (tsecr) {
879 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
880 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
881 *topt++ = htonl(tsval);
882 *topt++ = htonl(tsecr);
883 }
884
885 if (mrst)
886 *topt++ = mrst;
887
888#ifdef CONFIG_TCP_MD5SIG
889 if (key) {
890 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
891 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
892 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
893 &ipv6_hdr(skb)->saddr,
894 &ipv6_hdr(skb)->daddr, t1);
895 }
896#endif
897
898 memset(&fl6, 0, sizeof(fl6));
899 fl6.daddr = ipv6_hdr(skb)->saddr;
900 fl6.saddr = ipv6_hdr(skb)->daddr;
901 fl6.flowlabel = label;
902
903 buff->ip_summed = CHECKSUM_PARTIAL;
904 buff->csum = 0;
905
906 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
907
908 fl6.flowi6_proto = IPPROTO_TCP;
909 if (rt6_need_strict(&fl6.daddr) && !oif)
910 fl6.flowi6_oif = tcp_v6_iif(skb);
911 else {
912 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
913 oif = skb->skb_iif;
914
915 fl6.flowi6_oif = oif;
916 }
917
918 if (sk)
919 mark = (sk->sk_state == TCP_TIME_WAIT) ?
920 inet_twsk(sk)->tw_mark : sk->sk_mark;
921 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
922 fl6.fl6_dport = t1->dest;
923 fl6.fl6_sport = t1->source;
924 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
925 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
926
927
928
929
930
931 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
932 if (!IS_ERR(dst)) {
933 skb_dst_set(buff, dst);
934 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
935 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
936 if (rst)
937 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
938 return;
939 }
940
941 kfree_skb(buff);
942}
943
944static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
945{
946 const struct tcphdr *th = tcp_hdr(skb);
947 u32 seq = 0, ack_seq = 0;
948 struct tcp_md5sig_key *key = NULL;
949#ifdef CONFIG_TCP_MD5SIG
950 const __u8 *hash_location = NULL;
951 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
952 unsigned char newhash[16];
953 int genhash;
954 struct sock *sk1 = NULL;
955#endif
956 int oif = 0;
957
958 if (th->rst)
959 return;
960
961
962
963
964 if (!sk && !ipv6_unicast_destination(skb))
965 return;
966
967#ifdef CONFIG_TCP_MD5SIG
968 rcu_read_lock();
969 hash_location = tcp_parse_md5sig_option(th);
970 if (sk && sk_fullsock(sk)) {
971 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
972 } else if (hash_location) {
973
974
975
976
977
978
979
980 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
981 &tcp_hashinfo, NULL, 0,
982 &ipv6h->saddr,
983 th->source, &ipv6h->daddr,
984 ntohs(th->source),
985 tcp_v6_iif_l3_slave(skb),
986 tcp_v6_sdif(skb));
987 if (!sk1)
988 goto out;
989
990 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
991 if (!key)
992 goto out;
993
994 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
995 if (genhash || memcmp(hash_location, newhash, 16) != 0)
996 goto out;
997 }
998#endif
999
1000 if (th->ack)
1001 seq = ntohl(th->ack_seq);
1002 else
1003 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1004 (th->doff << 2);
1005
1006 if (sk) {
1007 oif = sk->sk_bound_dev_if;
1008 if (sk_fullsock(sk))
1009 trace_tcp_send_reset(sk, skb);
1010 }
1011
1012 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
1013
1014#ifdef CONFIG_TCP_MD5SIG
1015out:
1016 rcu_read_unlock();
1017#endif
1018}
1019
1020static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1021 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1022 struct tcp_md5sig_key *key, u8 tclass,
1023 __be32 label)
1024{
1025 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
1026 tclass, label);
1027}
1028
1029static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1030{
1031 struct inet_timewait_sock *tw = inet_twsk(sk);
1032 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1033
1034 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1035 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1036 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
1037 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
1038 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1039
1040 inet_twsk_put(tw);
1041}
1042
1043static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1044 struct request_sock *req)
1045{
1046
1047
1048
1049
1050
1051
1052
1053
1054 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1055 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1056 tcp_rsk(req)->rcv_nxt,
1057 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1058 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1059 req->ts_recent, sk->sk_bound_dev_if,
1060 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1061 0, 0);
1062}
1063
1064
1065static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1066{
1067#ifdef CONFIG_SYN_COOKIES
1068 const struct tcphdr *th = tcp_hdr(skb);
1069
1070 if (!th->syn)
1071 sk = cookie_v6_check(sk, skb);
1072#endif
1073 return sk;
1074}
1075
1076u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1077 struct tcphdr *th, u32 *cookie)
1078{
1079 u16 mss = 0;
1080#ifdef CONFIG_SYN_COOKIES
1081 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1082 &tcp_request_sock_ipv6_ops, sk, th);
1083 if (mss) {
1084 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1085 tcp_synq_overflow(sk);
1086 }
1087#endif
1088 return mss;
1089}
1090
1091static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1092{
1093 if (skb->protocol == htons(ETH_P_IP))
1094 return tcp_v4_conn_request(sk, skb);
1095
1096 if (!ipv6_unicast_destination(skb))
1097 goto drop;
1098
1099 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1100 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1101 return 0;
1102 }
1103
1104 return tcp_conn_request(&tcp6_request_sock_ops,
1105 &tcp_request_sock_ipv6_ops, sk, skb);
1106
1107drop:
1108 tcp_listendrop(sk);
1109 return 0;
1110}
1111
1112static void tcp_v6_restore_cb(struct sk_buff *skb)
1113{
1114
1115
1116
1117
1118 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1119 sizeof(struct inet6_skb_parm));
1120}
1121
1122static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1123 struct request_sock *req,
1124 struct dst_entry *dst,
1125 struct request_sock *req_unhash,
1126 bool *own_req)
1127{
1128 struct inet_request_sock *ireq;
1129 struct ipv6_pinfo *newnp;
1130 const struct ipv6_pinfo *np = inet6_sk(sk);
1131 struct ipv6_txoptions *opt;
1132 struct tcp6_sock *newtcp6sk;
1133 struct inet_sock *newinet;
1134 bool found_dup_sk = false;
1135 struct tcp_sock *newtp;
1136 struct sock *newsk;
1137#ifdef CONFIG_TCP_MD5SIG
1138 struct tcp_md5sig_key *key;
1139#endif
1140 struct flowi6 fl6;
1141
1142 if (skb->protocol == htons(ETH_P_IP)) {
1143
1144
1145
1146
1147 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1148 req_unhash, own_req);
1149
1150 if (!newsk)
1151 return NULL;
1152
1153 newtcp6sk = (struct tcp6_sock *)newsk;
1154 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1155
1156 newinet = inet_sk(newsk);
1157 newnp = inet6_sk(newsk);
1158 newtp = tcp_sk(newsk);
1159
1160 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1161
1162 newnp->saddr = newsk->sk_v6_rcv_saddr;
1163
1164 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1165 if (sk_is_mptcp(newsk))
1166 mptcpv6_handle_mapped(newsk, true);
1167 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1168#ifdef CONFIG_TCP_MD5SIG
1169 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1170#endif
1171
1172 newnp->ipv6_mc_list = NULL;
1173 newnp->ipv6_ac_list = NULL;
1174 newnp->ipv6_fl_list = NULL;
1175 newnp->pktoptions = NULL;
1176 newnp->opt = NULL;
1177 newnp->mcast_oif = inet_iif(skb);
1178 newnp->mcast_hops = ip_hdr(skb)->ttl;
1179 newnp->rcv_flowinfo = 0;
1180 if (np->repflow)
1181 newnp->flow_label = 0;
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1194
1195 return newsk;
1196 }
1197
1198 ireq = inet_rsk(req);
1199
1200 if (sk_acceptq_is_full(sk))
1201 goto out_overflow;
1202
1203 if (!dst) {
1204 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1205 if (!dst)
1206 goto out;
1207 }
1208
1209 newsk = tcp_create_openreq_child(sk, req, skb);
1210 if (!newsk)
1211 goto out_nonewsk;
1212
1213
1214
1215
1216
1217
1218
1219 newsk->sk_gso_type = SKB_GSO_TCPV6;
1220 ip6_dst_store(newsk, dst, NULL, NULL);
1221 inet6_sk_rx_dst_set(newsk, skb);
1222
1223 newtcp6sk = (struct tcp6_sock *)newsk;
1224 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1225
1226 newtp = tcp_sk(newsk);
1227 newinet = inet_sk(newsk);
1228 newnp = inet6_sk(newsk);
1229
1230 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1231
1232 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1233 newnp->saddr = ireq->ir_v6_loc_addr;
1234 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1235 newsk->sk_bound_dev_if = ireq->ir_iif;
1236
1237
1238
1239
1240
1241 newinet->inet_opt = NULL;
1242 newnp->ipv6_mc_list = NULL;
1243 newnp->ipv6_ac_list = NULL;
1244 newnp->ipv6_fl_list = NULL;
1245
1246
1247 newnp->rxopt.all = np->rxopt.all;
1248
1249 newnp->pktoptions = NULL;
1250 newnp->opt = NULL;
1251 newnp->mcast_oif = tcp_v6_iif(skb);
1252 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1253 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1254 if (np->repflow)
1255 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1256
1257
1258
1259
1260
1261
1262
1263 opt = ireq->ipv6_opt;
1264 if (!opt)
1265 opt = rcu_dereference(np->opt);
1266 if (opt) {
1267 opt = ipv6_dup_options(newsk, opt);
1268 RCU_INIT_POINTER(newnp->opt, opt);
1269 }
1270 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1271 if (opt)
1272 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1273 opt->opt_flen;
1274
1275 tcp_ca_openreq_child(newsk, dst);
1276
1277 tcp_sync_mss(newsk, dst_mtu(dst));
1278 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1279
1280 tcp_initialize_rcv_mss(newsk);
1281
1282 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1283 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1284
1285#ifdef CONFIG_TCP_MD5SIG
1286
1287 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1288 if (key) {
1289
1290
1291
1292
1293
1294 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1295 AF_INET6, 128, key->key, key->keylen,
1296 sk_gfp_mask(sk, GFP_ATOMIC));
1297 }
1298#endif
1299
1300 if (__inet_inherit_port(sk, newsk) < 0) {
1301 inet_csk_prepare_forced_close(newsk);
1302 tcp_done(newsk);
1303 goto out;
1304 }
1305 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1306 &found_dup_sk);
1307 if (*own_req) {
1308 tcp_move_syn(newtp, req);
1309
1310
1311 if (ireq->pktopts) {
1312 newnp->pktoptions = skb_clone(ireq->pktopts,
1313 sk_gfp_mask(sk, GFP_ATOMIC));
1314 consume_skb(ireq->pktopts);
1315 ireq->pktopts = NULL;
1316 if (newnp->pktoptions) {
1317 tcp_v6_restore_cb(newnp->pktoptions);
1318 skb_set_owner_r(newnp->pktoptions, newsk);
1319 }
1320 }
1321 } else {
1322 if (!req_unhash && found_dup_sk) {
1323
1324
1325
1326 bh_unlock_sock(newsk);
1327 sock_put(newsk);
1328 newsk = NULL;
1329 }
1330 }
1331
1332 return newsk;
1333
1334out_overflow:
1335 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1336out_nonewsk:
1337 dst_release(dst);
1338out:
1339 tcp_listendrop(sk);
1340 return NULL;
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1352{
1353 struct ipv6_pinfo *np = inet6_sk(sk);
1354 struct tcp_sock *tp;
1355 struct sk_buff *opt_skb = NULL;
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 if (skb->protocol == htons(ETH_P_IP))
1366 return tcp_v4_do_rcv(sk, skb);
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 if (np->rxopt.all)
1387 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1388
1389 if (sk->sk_state == TCP_ESTABLISHED) {
1390 struct dst_entry *dst = sk->sk_rx_dst;
1391
1392 sock_rps_save_rxhash(sk, skb);
1393 sk_mark_napi_id(sk, skb);
1394 if (dst) {
1395 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1396 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1397 dst_release(dst);
1398 sk->sk_rx_dst = NULL;
1399 }
1400 }
1401
1402 tcp_rcv_established(sk, skb);
1403 if (opt_skb)
1404 goto ipv6_pktoptions;
1405 return 0;
1406 }
1407
1408 if (tcp_checksum_complete(skb))
1409 goto csum_err;
1410
1411 if (sk->sk_state == TCP_LISTEN) {
1412 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1413
1414 if (!nsk)
1415 goto discard;
1416
1417 if (nsk != sk) {
1418 if (tcp_child_process(sk, nsk, skb))
1419 goto reset;
1420 if (opt_skb)
1421 __kfree_skb(opt_skb);
1422 return 0;
1423 }
1424 } else
1425 sock_rps_save_rxhash(sk, skb);
1426
1427 if (tcp_rcv_state_process(sk, skb))
1428 goto reset;
1429 if (opt_skb)
1430 goto ipv6_pktoptions;
1431 return 0;
1432
1433reset:
1434 tcp_v6_send_reset(sk, skb);
1435discard:
1436 if (opt_skb)
1437 __kfree_skb(opt_skb);
1438 kfree_skb(skb);
1439 return 0;
1440csum_err:
1441 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1442 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1443 goto discard;
1444
1445
1446ipv6_pktoptions:
1447
1448
1449
1450
1451
1452
1453
1454 tp = tcp_sk(sk);
1455 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1456 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1457 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1458 np->mcast_oif = tcp_v6_iif(opt_skb);
1459 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1460 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1461 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1462 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1463 if (np->repflow)
1464 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1465 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1466 skb_set_owner_r(opt_skb, sk);
1467 tcp_v6_restore_cb(opt_skb);
1468 opt_skb = xchg(&np->pktoptions, opt_skb);
1469 } else {
1470 __kfree_skb(opt_skb);
1471 opt_skb = xchg(&np->pktoptions, NULL);
1472 }
1473 }
1474
1475 kfree_skb(opt_skb);
1476 return 0;
1477}
1478
1479static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1480 const struct tcphdr *th)
1481{
1482
1483
1484
1485
1486
1487 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1488 sizeof(struct inet6_skb_parm));
1489 barrier();
1490
1491 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1492 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1493 skb->len - th->doff*4);
1494 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1495 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1496 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1497 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1498 TCP_SKB_CB(skb)->sacked = 0;
1499 TCP_SKB_CB(skb)->has_rxtstamp =
1500 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1501}
1502
1503INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1504{
1505 int sdif = inet6_sdif(skb);
1506 const struct tcphdr *th;
1507 const struct ipv6hdr *hdr;
1508 bool refcounted;
1509 struct sock *sk;
1510 int ret;
1511 struct net *net = dev_net(skb->dev);
1512
1513 if (skb->pkt_type != PACKET_HOST)
1514 goto discard_it;
1515
1516
1517
1518
1519 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1520
1521 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1522 goto discard_it;
1523
1524 th = (const struct tcphdr *)skb->data;
1525
1526 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1527 goto bad_packet;
1528 if (!pskb_may_pull(skb, th->doff*4))
1529 goto discard_it;
1530
1531 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1532 goto csum_error;
1533
1534 th = (const struct tcphdr *)skb->data;
1535 hdr = ipv6_hdr(skb);
1536
1537lookup:
1538 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1539 th->source, th->dest, inet6_iif(skb), sdif,
1540 &refcounted);
1541 if (!sk)
1542 goto no_tcp_socket;
1543
1544process:
1545 if (sk->sk_state == TCP_TIME_WAIT)
1546 goto do_time_wait;
1547
1548 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1549 struct request_sock *req = inet_reqsk(sk);
1550 bool req_stolen = false;
1551 struct sock *nsk;
1552
1553 sk = req->rsk_listener;
1554 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1555 sk_drops_add(sk, skb);
1556 reqsk_put(req);
1557 goto discard_it;
1558 }
1559 if (tcp_checksum_complete(skb)) {
1560 reqsk_put(req);
1561 goto csum_error;
1562 }
1563 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1564 inet_csk_reqsk_queue_drop_and_put(sk, req);
1565 goto lookup;
1566 }
1567 sock_hold(sk);
1568 refcounted = true;
1569 nsk = NULL;
1570 if (!tcp_filter(sk, skb)) {
1571 th = (const struct tcphdr *)skb->data;
1572 hdr = ipv6_hdr(skb);
1573 tcp_v6_fill_cb(skb, hdr, th);
1574 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1575 }
1576 if (!nsk) {
1577 reqsk_put(req);
1578 if (req_stolen) {
1579
1580
1581
1582
1583
1584 tcp_v6_restore_cb(skb);
1585 sock_put(sk);
1586 goto lookup;
1587 }
1588 goto discard_and_relse;
1589 }
1590 if (nsk == sk) {
1591 reqsk_put(req);
1592 tcp_v6_restore_cb(skb);
1593 } else if (tcp_child_process(sk, nsk, skb)) {
1594 tcp_v6_send_reset(nsk, skb);
1595 goto discard_and_relse;
1596 } else {
1597 sock_put(sk);
1598 return 0;
1599 }
1600 }
1601 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1602 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1603 goto discard_and_relse;
1604 }
1605
1606 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1607 goto discard_and_relse;
1608
1609 if (tcp_v6_inbound_md5_hash(sk, skb))
1610 goto discard_and_relse;
1611
1612 if (tcp_filter(sk, skb))
1613 goto discard_and_relse;
1614 th = (const struct tcphdr *)skb->data;
1615 hdr = ipv6_hdr(skb);
1616 tcp_v6_fill_cb(skb, hdr, th);
1617
1618 skb->dev = NULL;
1619
1620 if (sk->sk_state == TCP_LISTEN) {
1621 ret = tcp_v6_do_rcv(sk, skb);
1622 goto put_and_return;
1623 }
1624
1625 sk_incoming_cpu_update(sk);
1626
1627 bh_lock_sock_nested(sk);
1628 tcp_segs_in(tcp_sk(sk), skb);
1629 ret = 0;
1630 if (!sock_owned_by_user(sk)) {
1631 ret = tcp_v6_do_rcv(sk, skb);
1632 } else if (tcp_add_backlog(sk, skb)) {
1633 goto discard_and_relse;
1634 }
1635 bh_unlock_sock(sk);
1636
1637put_and_return:
1638 if (refcounted)
1639 sock_put(sk);
1640 return ret ? -1 : 0;
1641
1642no_tcp_socket:
1643 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1644 goto discard_it;
1645
1646 tcp_v6_fill_cb(skb, hdr, th);
1647
1648 if (tcp_checksum_complete(skb)) {
1649csum_error:
1650 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1651bad_packet:
1652 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1653 } else {
1654 tcp_v6_send_reset(NULL, skb);
1655 }
1656
1657discard_it:
1658 kfree_skb(skb);
1659 return 0;
1660
1661discard_and_relse:
1662 sk_drops_add(sk, skb);
1663 if (refcounted)
1664 sock_put(sk);
1665 goto discard_it;
1666
1667do_time_wait:
1668 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1669 inet_twsk_put(inet_twsk(sk));
1670 goto discard_it;
1671 }
1672
1673 tcp_v6_fill_cb(skb, hdr, th);
1674
1675 if (tcp_checksum_complete(skb)) {
1676 inet_twsk_put(inet_twsk(sk));
1677 goto csum_error;
1678 }
1679
1680 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1681 case TCP_TW_SYN:
1682 {
1683 struct sock *sk2;
1684
1685 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1686 skb, __tcp_hdrlen(th),
1687 &ipv6_hdr(skb)->saddr, th->source,
1688 &ipv6_hdr(skb)->daddr,
1689 ntohs(th->dest),
1690 tcp_v6_iif_l3_slave(skb),
1691 sdif);
1692 if (sk2) {
1693 struct inet_timewait_sock *tw = inet_twsk(sk);
1694 inet_twsk_deschedule_put(tw);
1695 sk = sk2;
1696 tcp_v6_restore_cb(skb);
1697 refcounted = false;
1698 goto process;
1699 }
1700 }
1701
1702
1703 case TCP_TW_ACK:
1704 tcp_v6_timewait_ack(sk, skb);
1705 break;
1706 case TCP_TW_RST:
1707 tcp_v6_send_reset(sk, skb);
1708 inet_twsk_deschedule_put(inet_twsk(sk));
1709 goto discard_it;
1710 case TCP_TW_SUCCESS:
1711 ;
1712 }
1713 goto discard_it;
1714}
1715
1716INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
1717{
1718 const struct ipv6hdr *hdr;
1719 const struct tcphdr *th;
1720 struct sock *sk;
1721
1722 if (skb->pkt_type != PACKET_HOST)
1723 return;
1724
1725 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1726 return;
1727
1728 hdr = ipv6_hdr(skb);
1729 th = tcp_hdr(skb);
1730
1731 if (th->doff < sizeof(struct tcphdr) / 4)
1732 return;
1733
1734
1735 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1736 &hdr->saddr, th->source,
1737 &hdr->daddr, ntohs(th->dest),
1738 inet6_iif(skb), inet6_sdif(skb));
1739 if (sk) {
1740 skb->sk = sk;
1741 skb->destructor = sock_edemux;
1742 if (sk_fullsock(sk)) {
1743 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1744
1745 if (dst)
1746 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1747 if (dst &&
1748 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1749 skb_dst_set_noref(skb, dst);
1750 }
1751 }
1752}
1753
1754static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1755 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1756 .twsk_unique = tcp_twsk_unique,
1757 .twsk_destructor = tcp_twsk_destructor,
1758};
1759
1760INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
1761{
1762 struct ipv6_pinfo *np = inet6_sk(sk);
1763
1764 __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
1765}
1766
1767const struct inet_connection_sock_af_ops ipv6_specific = {
1768 .queue_xmit = inet6_csk_xmit,
1769 .send_check = tcp_v6_send_check,
1770 .rebuild_header = inet6_sk_rebuild_header,
1771 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1772 .conn_request = tcp_v6_conn_request,
1773 .syn_recv_sock = tcp_v6_syn_recv_sock,
1774 .net_header_len = sizeof(struct ipv6hdr),
1775 .net_frag_header_len = sizeof(struct frag_hdr),
1776 .setsockopt = ipv6_setsockopt,
1777 .getsockopt = ipv6_getsockopt,
1778 .addr2sockaddr = inet6_csk_addr2sockaddr,
1779 .sockaddr_len = sizeof(struct sockaddr_in6),
1780#ifdef CONFIG_COMPAT
1781 .compat_setsockopt = compat_ipv6_setsockopt,
1782 .compat_getsockopt = compat_ipv6_getsockopt,
1783#endif
1784 .mtu_reduced = tcp_v6_mtu_reduced,
1785};
1786
1787#ifdef CONFIG_TCP_MD5SIG
1788static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1789 .md5_lookup = tcp_v6_md5_lookup,
1790 .calc_md5_hash = tcp_v6_md5_hash_skb,
1791 .md5_parse = tcp_v6_parse_md5_keys,
1792};
1793#endif
1794
1795
1796
1797
1798static const struct inet_connection_sock_af_ops ipv6_mapped = {
1799 .queue_xmit = ip_queue_xmit,
1800 .send_check = tcp_v4_send_check,
1801 .rebuild_header = inet_sk_rebuild_header,
1802 .sk_rx_dst_set = inet_sk_rx_dst_set,
1803 .conn_request = tcp_v6_conn_request,
1804 .syn_recv_sock = tcp_v6_syn_recv_sock,
1805 .net_header_len = sizeof(struct iphdr),
1806 .setsockopt = ipv6_setsockopt,
1807 .getsockopt = ipv6_getsockopt,
1808 .addr2sockaddr = inet6_csk_addr2sockaddr,
1809 .sockaddr_len = sizeof(struct sockaddr_in6),
1810#ifdef CONFIG_COMPAT
1811 .compat_setsockopt = compat_ipv6_setsockopt,
1812 .compat_getsockopt = compat_ipv6_getsockopt,
1813#endif
1814 .mtu_reduced = tcp_v4_mtu_reduced,
1815};
1816
1817#ifdef CONFIG_TCP_MD5SIG
1818static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1819 .md5_lookup = tcp_v4_md5_lookup,
1820 .calc_md5_hash = tcp_v4_md5_hash_skb,
1821 .md5_parse = tcp_v6_parse_md5_keys,
1822};
1823#endif
1824
1825
1826
1827
1828static int tcp_v6_init_sock(struct sock *sk)
1829{
1830 struct inet_connection_sock *icsk = inet_csk(sk);
1831
1832 tcp_init_sock(sk);
1833
1834 icsk->icsk_af_ops = &ipv6_specific;
1835
1836#ifdef CONFIG_TCP_MD5SIG
1837 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1838#endif
1839
1840 return 0;
1841}
1842
1843static void tcp_v6_destroy_sock(struct sock *sk)
1844{
1845 tcp_v4_destroy_sock(sk);
1846 inet6_destroy_sock(sk);
1847}
1848
1849#ifdef CONFIG_PROC_FS
1850
1851static void get_openreq6(struct seq_file *seq,
1852 const struct request_sock *req, int i)
1853{
1854 long ttd = req->rsk_timer.expires - jiffies;
1855 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1856 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1857
1858 if (ttd < 0)
1859 ttd = 0;
1860
1861 seq_printf(seq,
1862 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1863 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1864 i,
1865 src->s6_addr32[0], src->s6_addr32[1],
1866 src->s6_addr32[2], src->s6_addr32[3],
1867 inet_rsk(req)->ir_num,
1868 dest->s6_addr32[0], dest->s6_addr32[1],
1869 dest->s6_addr32[2], dest->s6_addr32[3],
1870 ntohs(inet_rsk(req)->ir_rmt_port),
1871 TCP_SYN_RECV,
1872 0, 0,
1873 1,
1874 jiffies_to_clock_t(ttd),
1875 req->num_timeout,
1876 from_kuid_munged(seq_user_ns(seq),
1877 sock_i_uid(req->rsk_listener)),
1878 0,
1879 0,
1880 0, req);
1881}
1882
1883static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1884{
1885 const struct in6_addr *dest, *src;
1886 __u16 destp, srcp;
1887 int timer_active;
1888 unsigned long timer_expires;
1889 const struct inet_sock *inet = inet_sk(sp);
1890 const struct tcp_sock *tp = tcp_sk(sp);
1891 const struct inet_connection_sock *icsk = inet_csk(sp);
1892 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1893 int rx_queue;
1894 int state;
1895
1896 dest = &sp->sk_v6_daddr;
1897 src = &sp->sk_v6_rcv_saddr;
1898 destp = ntohs(inet->inet_dport);
1899 srcp = ntohs(inet->inet_sport);
1900
1901 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1902 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1903 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1904 timer_active = 1;
1905 timer_expires = icsk->icsk_timeout;
1906 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1907 timer_active = 4;
1908 timer_expires = icsk->icsk_timeout;
1909 } else if (timer_pending(&sp->sk_timer)) {
1910 timer_active = 2;
1911 timer_expires = sp->sk_timer.expires;
1912 } else {
1913 timer_active = 0;
1914 timer_expires = jiffies;
1915 }
1916
1917 state = inet_sk_state_load(sp);
1918 if (state == TCP_LISTEN)
1919 rx_queue = READ_ONCE(sp->sk_ack_backlog);
1920 else
1921
1922
1923
1924 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1925
1926 seq_printf(seq,
1927 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1928 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1929 i,
1930 src->s6_addr32[0], src->s6_addr32[1],
1931 src->s6_addr32[2], src->s6_addr32[3], srcp,
1932 dest->s6_addr32[0], dest->s6_addr32[1],
1933 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1934 state,
1935 READ_ONCE(tp->write_seq) - tp->snd_una,
1936 rx_queue,
1937 timer_active,
1938 jiffies_delta_to_clock_t(timer_expires - jiffies),
1939 icsk->icsk_retransmits,
1940 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1941 icsk->icsk_probes_out,
1942 sock_i_ino(sp),
1943 refcount_read(&sp->sk_refcnt), sp,
1944 jiffies_to_clock_t(icsk->icsk_rto),
1945 jiffies_to_clock_t(icsk->icsk_ack.ato),
1946 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
1947 tp->snd_cwnd,
1948 state == TCP_LISTEN ?
1949 fastopenq->max_qlen :
1950 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1951 );
1952}
1953
1954static void get_timewait6_sock(struct seq_file *seq,
1955 struct inet_timewait_sock *tw, int i)
1956{
1957 long delta = tw->tw_timer.expires - jiffies;
1958 const struct in6_addr *dest, *src;
1959 __u16 destp, srcp;
1960
1961 dest = &tw->tw_v6_daddr;
1962 src = &tw->tw_v6_rcv_saddr;
1963 destp = ntohs(tw->tw_dport);
1964 srcp = ntohs(tw->tw_sport);
1965
1966 seq_printf(seq,
1967 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1968 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1969 i,
1970 src->s6_addr32[0], src->s6_addr32[1],
1971 src->s6_addr32[2], src->s6_addr32[3], srcp,
1972 dest->s6_addr32[0], dest->s6_addr32[1],
1973 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1974 tw->tw_substate, 0, 0,
1975 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1976 refcount_read(&tw->tw_refcnt), tw);
1977}
1978
1979static int tcp6_seq_show(struct seq_file *seq, void *v)
1980{
1981 struct tcp_iter_state *st;
1982 struct sock *sk = v;
1983
1984 if (v == SEQ_START_TOKEN) {
1985 seq_puts(seq,
1986 " sl "
1987 "local_address "
1988 "remote_address "
1989 "st tx_queue rx_queue tr tm->when retrnsmt"
1990 " uid timeout inode\n");
1991 goto out;
1992 }
1993 st = seq->private;
1994
1995 if (sk->sk_state == TCP_TIME_WAIT)
1996 get_timewait6_sock(seq, v, st->num);
1997 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1998 get_openreq6(seq, v, st->num);
1999 else
2000 get_tcp6_sock(seq, v, st->num);
2001out:
2002 return 0;
2003}
2004
2005static const struct seq_operations tcp6_seq_ops = {
2006 .show = tcp6_seq_show,
2007 .start = tcp_seq_start,
2008 .next = tcp_seq_next,
2009 .stop = tcp_seq_stop,
2010};
2011
2012static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2013 .family = AF_INET6,
2014};
2015
2016int __net_init tcp6_proc_init(struct net *net)
2017{
2018 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2019 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2020 return -ENOMEM;
2021 return 0;
2022}
2023
2024void tcp6_proc_exit(struct net *net)
2025{
2026 remove_proc_entry("tcp6", net->proc_net);
2027}
2028#endif
2029
2030struct proto tcpv6_prot = {
2031 .name = "TCPv6",
2032 .owner = THIS_MODULE,
2033 .close = tcp_close,
2034 .pre_connect = tcp_v6_pre_connect,
2035 .connect = tcp_v6_connect,
2036 .disconnect = tcp_disconnect,
2037 .accept = inet_csk_accept,
2038 .ioctl = tcp_ioctl,
2039 .init = tcp_v6_init_sock,
2040 .destroy = tcp_v6_destroy_sock,
2041 .shutdown = tcp_shutdown,
2042 .setsockopt = tcp_setsockopt,
2043 .getsockopt = tcp_getsockopt,
2044 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
2045 .keepalive = tcp_set_keepalive,
2046 .recvmsg = tcp_recvmsg,
2047 .sendmsg = tcp_sendmsg,
2048 .sendpage = tcp_sendpage,
2049 .backlog_rcv = tcp_v6_do_rcv,
2050 .release_cb = tcp_release_cb,
2051 .hash = inet6_hash,
2052 .unhash = inet_unhash,
2053 .get_port = inet_csk_get_port,
2054#ifdef CONFIG_BPF_SYSCALL
2055 .psock_update_sk_prot = tcp_bpf_update_proto,
2056#endif
2057 .enter_memory_pressure = tcp_enter_memory_pressure,
2058 .leave_memory_pressure = tcp_leave_memory_pressure,
2059 .stream_memory_free = tcp_stream_memory_free,
2060 .sockets_allocated = &tcp_sockets_allocated,
2061 .memory_allocated = &tcp_memory_allocated,
2062 .memory_pressure = &tcp_memory_pressure,
2063 .orphan_count = &tcp_orphan_count,
2064 .sysctl_mem = sysctl_tcp_mem,
2065 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2066 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2067 .max_header = MAX_TCP_HEADER,
2068 .obj_size = sizeof(struct tcp6_sock),
2069 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2070 .twsk_prot = &tcp6_timewait_sock_ops,
2071 .rsk_prot = &tcp6_request_sock_ops,
2072 .h.hashinfo = &tcp_hashinfo,
2073 .no_autobind = true,
2074#ifdef CONFIG_COMPAT
2075 .compat_setsockopt = compat_tcp_setsockopt,
2076 .compat_getsockopt = compat_tcp_getsockopt,
2077#endif
2078 .diag_destroy = tcp_abort,
2079};
2080
2081
2082
2083
2084static struct inet6_protocol tcpv6_protocol = {
2085 .early_demux = tcp_v6_early_demux,
2086 .early_demux_handler = tcp_v6_early_demux,
2087 .handler = tcp_v6_rcv,
2088 .err_handler = tcp_v6_err,
2089 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2090};
2091
2092static struct inet_protosw tcpv6_protosw = {
2093 .type = SOCK_STREAM,
2094 .protocol = IPPROTO_TCP,
2095 .prot = &tcpv6_prot,
2096 .ops = &inet6_stream_ops,
2097 .flags = INET_PROTOSW_PERMANENT |
2098 INET_PROTOSW_ICSK,
2099};
2100
2101static int __net_init tcpv6_net_init(struct net *net)
2102{
2103 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2104 SOCK_RAW, IPPROTO_TCP, net);
2105}
2106
2107static void __net_exit tcpv6_net_exit(struct net *net)
2108{
2109 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2110}
2111
2112static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2113{
2114 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2115}
2116
2117static struct pernet_operations tcpv6_net_ops = {
2118 .init = tcpv6_net_init,
2119 .exit = tcpv6_net_exit,
2120 .exit_batch = tcpv6_net_exit_batch,
2121};
2122
2123int __init tcpv6_init(void)
2124{
2125 int ret;
2126
2127 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2128 if (ret)
2129 goto out;
2130
2131
2132 ret = inet6_register_protosw(&tcpv6_protosw);
2133 if (ret)
2134 goto out_tcpv6_protocol;
2135
2136 ret = register_pernet_subsys(&tcpv6_net_ops);
2137 if (ret)
2138 goto out_tcpv6_protosw;
2139
2140 ret = mptcpv6_init();
2141 if (ret)
2142 goto out_tcpv6_pernet_subsys;
2143
2144out:
2145 return ret;
2146
2147out_tcpv6_pernet_subsys:
2148 unregister_pernet_subsys(&tcpv6_net_ops);
2149out_tcpv6_protosw:
2150 inet6_unregister_protosw(&tcpv6_protosw);
2151out_tcpv6_protocol:
2152 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2153 goto out;
2154}
2155
2156void tcpv6_exit(void)
2157{
2158 unregister_pernet_subsys(&tcpv6_net_ops);
2159 inet6_unregister_protosw(&tcpv6_protosw);
2160 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2161}
2162