1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/bottom_half.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41#include <linux/slab.h>
42#include <linux/uaccess.h>
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
49#include <net/inet6_hashtables.h>
50#include <net/inet6_connection_sock.h>
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/snmp.h>
60#include <net/dsfield.h>
61#include <net/timewait_sock.h>
62#include <net/inet_common.h>
63#include <net/secure_seq.h>
64#include <net/busy_poll.h>
65
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
69#include <linux/crypto.h>
70#include <linux/scatterlist.h>
71
72static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77
78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
80#ifdef CONFIG_TCP_MD5SIG
81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83#else
84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
86{
87 return NULL;
88}
89#endif
90
91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
94
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 }
102}
103
104static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
105{
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
110}
111
112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 struct inet_sock *inet = inet_sk(sk);
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
120 struct in6_addr *saddr = NULL, *final_p, final;
121 struct ipv6_txoptions *opt;
122 struct flowi6 fl6;
123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126
127 if (addr_len < SIN6_LEN_RFC2133)
128 return -EINVAL;
129
130 if (usin->sin6_family != AF_INET6)
131 return -EAFNOSUPPORT;
132
133 memset(&fl6, 0, sizeof(fl6));
134
135 if (np->sndflow) {
136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 struct ip6_flowlabel *flowlabel;
140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
141 if (!flowlabel)
142 return -EINVAL;
143 fl6_sock_release(flowlabel);
144 }
145 }
146
147
148
149
150
151 if (ipv6_addr_any(&usin->sin6_addr))
152 usin->sin6_addr.s6_addr[15] = 0x1;
153
154 addr_type = ipv6_addr_type(&usin->sin6_addr);
155
156 if (addr_type & IPV6_ADDR_MULTICAST)
157 return -ENETUNREACH;
158
159 if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 if (addr_len >= sizeof(struct sockaddr_in6) &&
161 usin->sin6_scope_id) {
162
163
164
165 if (sk->sk_bound_dev_if &&
166 sk->sk_bound_dev_if != usin->sin6_scope_id)
167 return -EINVAL;
168
169 sk->sk_bound_dev_if = usin->sin6_scope_id;
170 }
171
172
173 if (!sk->sk_bound_dev_if)
174 return -EINVAL;
175 }
176
177 if (tp->rx_opt.ts_recent_stamp &&
178 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 tp->rx_opt.ts_recent = 0;
180 tp->rx_opt.ts_recent_stamp = 0;
181 tp->write_seq = 0;
182 }
183
184 sk->sk_v6_daddr = usin->sin6_addr;
185 np->flow_label = fl6.flowlabel;
186
187
188
189
190
191 if (addr_type == IPV6_ADDR_MAPPED) {
192 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 struct sockaddr_in sin;
194
195 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196
197 if (__ipv6_only_sock(sk))
198 return -ENETUNREACH;
199
200 sin.sin_family = AF_INET;
201 sin.sin_port = usin->sin6_port;
202 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203
204 icsk->icsk_af_ops = &ipv6_mapped;
205 sk->sk_backlog_rcv = tcp_v4_do_rcv;
206#ifdef CONFIG_TCP_MD5SIG
207 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208#endif
209
210 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211
212 if (err) {
213 icsk->icsk_ext_hdr_len = exthdrlen;
214 icsk->icsk_af_ops = &ipv6_specific;
215 sk->sk_backlog_rcv = tcp_v6_do_rcv;
216#ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_specific;
218#endif
219 goto failure;
220 }
221 np->saddr = sk->sk_v6_rcv_saddr;
222
223 return err;
224 }
225
226 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 saddr = &sk->sk_v6_rcv_saddr;
228
229 fl6.flowi6_proto = IPPROTO_TCP;
230 fl6.daddr = sk->sk_v6_daddr;
231 fl6.saddr = saddr ? *saddr : np->saddr;
232 fl6.flowi6_oif = sk->sk_bound_dev_if;
233 fl6.flowi6_mark = sk->sk_mark;
234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport;
236
237 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
238 final_p = fl6_update_dst(&fl6, opt, &final);
239
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 if (IS_ERR(dst)) {
244 err = PTR_ERR(dst);
245 goto failure;
246 }
247
248 if (!saddr) {
249 saddr = &fl6.saddr;
250 sk->sk_v6_rcv_saddr = *saddr;
251 }
252
253
254 np->saddr = *saddr;
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 ip6_dst_store(sk, dst, NULL, NULL);
259
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
264
265 icsk->icsk_ext_hdr_len = 0;
266 if (opt)
267 icsk->icsk_ext_hdr_len = opt->opt_flen +
268 opt->opt_nflen;
269
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271
272 inet->inet_dport = usin->sin6_port;
273
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
276 if (err)
277 goto late_failure;
278
279 sk_set_txhash(sk);
280
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
284 inet->inet_sport,
285 inet->inet_dport);
286
287 err = tcp_connect(sk);
288 if (err)
289 goto late_failure;
290
291 return 0;
292
293late_failure:
294 tcp_set_state(sk, TCP_CLOSE);
295 __sk_dst_reset(sk);
296failure:
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
299 return err;
300}
301
302static void tcp_v6_mtu_reduced(struct sock *sk)
303{
304 struct dst_entry *dst;
305
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 return;
308
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 if (!dst)
311 return;
312
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
316 }
317}
318
319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
321{
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
327 struct tcp_sock *tp;
328 __u32 seq, snd_una;
329 struct sock *sk;
330 bool fatal;
331 int err;
332
333 sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 &hdr->daddr, th->dest,
335 &hdr->saddr, ntohs(th->source),
336 skb->dev->ifindex);
337
338 if (!sk) {
339 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
340 ICMP6_MIB_INERRORS);
341 return;
342 }
343
344 if (sk->sk_state == TCP_TIME_WAIT) {
345 inet_twsk_put(inet_twsk(sk));
346 return;
347 }
348 seq = ntohl(th->seq);
349 fatal = icmpv6_err_convert(type, code, &err);
350 if (sk->sk_state == TCP_NEW_SYN_RECV)
351 return tcp_req_err(sk, seq, fatal);
352
353 bh_lock_sock(sk);
354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
356
357 if (sk->sk_state == TCP_CLOSE)
358 goto out;
359
360 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
362 goto out;
363 }
364
365 tp = tcp_sk(sk);
366
367 fastopen = tp->fastopen_rsk;
368 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 if (sk->sk_state != TCP_LISTEN &&
370 !between(seq, snd_una, tp->snd_nxt)) {
371 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 goto out;
373 }
374
375 np = inet6_sk(sk);
376
377 if (type == NDISC_REDIRECT) {
378 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379
380 if (dst)
381 dst->ops->redirect(dst, sk, skb);
382 goto out;
383 }
384
385 if (type == ICMPV6_PKT_TOOBIG) {
386
387
388
389
390 if (sk->sk_state == TCP_LISTEN)
391 goto out;
392
393 if (!ip6_sk_accept_pmtu(sk))
394 goto out;
395
396 tp->mtu_info = ntohl(info);
397 if (!sock_owned_by_user(sk))
398 tcp_v6_mtu_reduced(sk);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 &tp->tsq_flags))
401 sock_hold(sk);
402 goto out;
403 }
404
405
406
407 switch (sk->sk_state) {
408 case TCP_SYN_SENT:
409 case TCP_SYN_RECV:
410
411
412
413 if (fastopen && !fastopen->sk)
414 break;
415
416 if (!sock_owned_by_user(sk)) {
417 sk->sk_err = err;
418 sk->sk_error_report(sk);
419
420 tcp_done(sk);
421 } else
422 sk->sk_err_soft = err;
423 goto out;
424 }
425
426 if (!sock_owned_by_user(sk) && np->recverr) {
427 sk->sk_err = err;
428 sk->sk_error_report(sk);
429 } else
430 sk->sk_err_soft = err;
431
432out:
433 bh_unlock_sock(sk);
434 sock_put(sk);
435}
436
437
438static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 struct flowi *fl,
440 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc,
442 bool attach_req)
443{
444 struct inet_request_sock *ireq = inet_rsk(req);
445 struct ipv6_pinfo *np = inet6_sk(sk);
446 struct flowi6 *fl6 = &fl->u.ip6;
447 struct sk_buff *skb;
448 int err = -ENOMEM;
449
450
451 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
452 IPPROTO_TCP)) == NULL)
453 goto done;
454
455 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
456
457 if (skb) {
458 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
459 &ireq->ir_v6_rmt_addr);
460
461 fl6->daddr = ireq->ir_v6_rmt_addr;
462 if (np->repflow && ireq->pktopts)
463 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
464
465 rcu_read_lock();
466 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
467 np->tclass);
468 rcu_read_unlock();
469 err = net_xmit_eval(err);
470 }
471
472done:
473 return err;
474}
475
476
477static void tcp_v6_reqsk_destructor(struct request_sock *req)
478{
479 kfree_skb(inet_rsk(req)->pktopts);
480}
481
482#ifdef CONFIG_TCP_MD5SIG
483static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
484 const struct in6_addr *addr)
485{
486 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
487}
488
489static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
490 const struct sock *addr_sk)
491{
492 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
493}
494
495static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
496 int optlen)
497{
498 struct tcp_md5sig cmd;
499 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
500
501 if (optlen < sizeof(cmd))
502 return -EINVAL;
503
504 if (copy_from_user(&cmd, optval, sizeof(cmd)))
505 return -EFAULT;
506
507 if (sin6->sin6_family != AF_INET6)
508 return -EINVAL;
509
510 if (!cmd.tcpm_keylen) {
511 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
512 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
513 AF_INET);
514 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
515 AF_INET6);
516 }
517
518 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
519 return -EINVAL;
520
521 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
522 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
523 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
524
525 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
526 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
527}
528
529static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
530 const struct in6_addr *daddr,
531 const struct in6_addr *saddr, int nbytes)
532{
533 struct tcp6_pseudohdr *bp;
534 struct scatterlist sg;
535
536 bp = &hp->md5_blk.ip6;
537
538 bp->saddr = *saddr;
539 bp->daddr = *daddr;
540 bp->protocol = cpu_to_be32(IPPROTO_TCP);
541 bp->len = cpu_to_be32(nbytes);
542
543 sg_init_one(&sg, bp, sizeof(*bp));
544 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
545}
546
547static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
548 const struct in6_addr *daddr, struct in6_addr *saddr,
549 const struct tcphdr *th)
550{
551 struct tcp_md5sig_pool *hp;
552 struct hash_desc *desc;
553
554 hp = tcp_get_md5sig_pool();
555 if (!hp)
556 goto clear_hash_noput;
557 desc = &hp->md5_desc;
558
559 if (crypto_hash_init(desc))
560 goto clear_hash;
561 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
562 goto clear_hash;
563 if (tcp_md5_hash_header(hp, th))
564 goto clear_hash;
565 if (tcp_md5_hash_key(hp, key))
566 goto clear_hash;
567 if (crypto_hash_final(desc, md5_hash))
568 goto clear_hash;
569
570 tcp_put_md5sig_pool();
571 return 0;
572
573clear_hash:
574 tcp_put_md5sig_pool();
575clear_hash_noput:
576 memset(md5_hash, 0, 16);
577 return 1;
578}
579
580static int tcp_v6_md5_hash_skb(char *md5_hash,
581 const struct tcp_md5sig_key *key,
582 const struct sock *sk,
583 const struct sk_buff *skb)
584{
585 const struct in6_addr *saddr, *daddr;
586 struct tcp_md5sig_pool *hp;
587 struct hash_desc *desc;
588 const struct tcphdr *th = tcp_hdr(skb);
589
590 if (sk) {
591 saddr = &sk->sk_v6_rcv_saddr;
592 daddr = &sk->sk_v6_daddr;
593 } else {
594 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
595 saddr = &ip6h->saddr;
596 daddr = &ip6h->daddr;
597 }
598
599 hp = tcp_get_md5sig_pool();
600 if (!hp)
601 goto clear_hash_noput;
602 desc = &hp->md5_desc;
603
604 if (crypto_hash_init(desc))
605 goto clear_hash;
606
607 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
608 goto clear_hash;
609 if (tcp_md5_hash_header(hp, th))
610 goto clear_hash;
611 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
612 goto clear_hash;
613 if (tcp_md5_hash_key(hp, key))
614 goto clear_hash;
615 if (crypto_hash_final(desc, md5_hash))
616 goto clear_hash;
617
618 tcp_put_md5sig_pool();
619 return 0;
620
621clear_hash:
622 tcp_put_md5sig_pool();
623clear_hash_noput:
624 memset(md5_hash, 0, 16);
625 return 1;
626}
627
628#endif
629
630static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
631 const struct sk_buff *skb)
632{
633#ifdef CONFIG_TCP_MD5SIG
634 const __u8 *hash_location = NULL;
635 struct tcp_md5sig_key *hash_expected;
636 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
637 const struct tcphdr *th = tcp_hdr(skb);
638 int genhash;
639 u8 newhash[16];
640
641 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
642 hash_location = tcp_parse_md5sig_option(th);
643
644
645 if (!hash_expected && !hash_location)
646 return false;
647
648 if (hash_expected && !hash_location) {
649 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
650 return true;
651 }
652
653 if (!hash_expected && hash_location) {
654 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
655 return true;
656 }
657
658
659 genhash = tcp_v6_md5_hash_skb(newhash,
660 hash_expected,
661 NULL, skb);
662
663 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
664 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
665 genhash ? "failed" : "mismatch",
666 &ip6h->saddr, ntohs(th->source),
667 &ip6h->daddr, ntohs(th->dest));
668 return true;
669 }
670#endif
671 return false;
672}
673
674static void tcp_v6_init_req(struct request_sock *req,
675 const struct sock *sk_listener,
676 struct sk_buff *skb)
677{
678 struct inet_request_sock *ireq = inet_rsk(req);
679 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
680
681 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
682 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
683
684
685 if (!sk_listener->sk_bound_dev_if &&
686 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
687 ireq->ir_iif = tcp_v6_iif(skb);
688
689 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
690 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
691 np->rxopt.bits.rxinfo ||
692 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
693 np->rxopt.bits.rxohlim || np->repflow)) {
694 atomic_inc(&skb->users);
695 ireq->pktopts = skb;
696 }
697}
698
699static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
700 struct flowi *fl,
701 const struct request_sock *req,
702 bool *strict)
703{
704 if (strict)
705 *strict = true;
706 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
707}
708
709struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
710 .family = AF_INET6,
711 .obj_size = sizeof(struct tcp6_request_sock),
712 .rtx_syn_ack = tcp_rtx_synack,
713 .send_ack = tcp_v6_reqsk_send_ack,
714 .destructor = tcp_v6_reqsk_destructor,
715 .send_reset = tcp_v6_send_reset,
716 .syn_ack_timeout = tcp_syn_ack_timeout,
717};
718
719static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
720 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
721 sizeof(struct ipv6hdr),
722#ifdef CONFIG_TCP_MD5SIG
723 .req_md5_lookup = tcp_v6_md5_lookup,
724 .calc_md5_hash = tcp_v6_md5_hash_skb,
725#endif
726 .init_req = tcp_v6_init_req,
727#ifdef CONFIG_SYN_COOKIES
728 .cookie_init_seq = cookie_v6_init_sequence,
729#endif
730 .route_req = tcp_v6_route_req,
731 .init_seq = tcp_v6_init_sequence,
732 .send_synack = tcp_v6_send_synack,
733};
734
735static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
736 u32 ack, u32 win, u32 tsval, u32 tsecr,
737 int oif, struct tcp_md5sig_key *key, int rst,
738 u8 tclass, u32 label)
739{
740 const struct tcphdr *th = tcp_hdr(skb);
741 struct tcphdr *t1;
742 struct sk_buff *buff;
743 struct flowi6 fl6;
744 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
745 struct sock *ctl_sk = net->ipv6.tcp_sk;
746 unsigned int tot_len = sizeof(struct tcphdr);
747 struct dst_entry *dst;
748 __be32 *topt;
749
750 if (tsecr)
751 tot_len += TCPOLEN_TSTAMP_ALIGNED;
752#ifdef CONFIG_TCP_MD5SIG
753 if (key)
754 tot_len += TCPOLEN_MD5SIG_ALIGNED;
755#endif
756
757 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
758 GFP_ATOMIC);
759 if (!buff)
760 return;
761
762 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
763
764 t1 = (struct tcphdr *) skb_push(buff, tot_len);
765 skb_reset_transport_header(buff);
766
767
768 memset(t1, 0, sizeof(*t1));
769 t1->dest = th->source;
770 t1->source = th->dest;
771 t1->doff = tot_len / 4;
772 t1->seq = htonl(seq);
773 t1->ack_seq = htonl(ack);
774 t1->ack = !rst || !th->ack;
775 t1->rst = rst;
776 t1->window = htons(win);
777
778 topt = (__be32 *)(t1 + 1);
779
780 if (tsecr) {
781 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
782 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
783 *topt++ = htonl(tsval);
784 *topt++ = htonl(tsecr);
785 }
786
787#ifdef CONFIG_TCP_MD5SIG
788 if (key) {
789 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
790 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
791 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
792 &ipv6_hdr(skb)->saddr,
793 &ipv6_hdr(skb)->daddr, t1);
794 }
795#endif
796
797 memset(&fl6, 0, sizeof(fl6));
798 fl6.daddr = ipv6_hdr(skb)->saddr;
799 fl6.saddr = ipv6_hdr(skb)->daddr;
800 fl6.flowlabel = label;
801
802 buff->ip_summed = CHECKSUM_PARTIAL;
803 buff->csum = 0;
804
805 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
806
807 fl6.flowi6_proto = IPPROTO_TCP;
808 if (rt6_need_strict(&fl6.daddr) && !oif)
809 fl6.flowi6_oif = tcp_v6_iif(skb);
810 else
811 fl6.flowi6_oif = oif;
812 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
813 fl6.fl6_dport = t1->dest;
814 fl6.fl6_sport = t1->source;
815 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
816
817
818
819
820
821 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
822 if (!IS_ERR(dst)) {
823 skb_dst_set(buff, dst);
824 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
825 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
826 if (rst)
827 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
828 return;
829 }
830
831 kfree_skb(buff);
832}
833
834static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
835{
836 const struct tcphdr *th = tcp_hdr(skb);
837 u32 seq = 0, ack_seq = 0;
838 struct tcp_md5sig_key *key = NULL;
839#ifdef CONFIG_TCP_MD5SIG
840 const __u8 *hash_location = NULL;
841 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
842 unsigned char newhash[16];
843 int genhash;
844 struct sock *sk1 = NULL;
845#endif
846 int oif;
847
848 if (th->rst)
849 return;
850
851
852
853
854 if (!sk && !ipv6_unicast_destination(skb))
855 return;
856
857#ifdef CONFIG_TCP_MD5SIG
858 hash_location = tcp_parse_md5sig_option(th);
859 if (sk && sk_fullsock(sk)) {
860 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
861 } else if (hash_location) {
862
863
864
865
866
867
868
869 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
870 &tcp_hashinfo, &ipv6h->saddr,
871 th->source, &ipv6h->daddr,
872 ntohs(th->source), tcp_v6_iif(skb));
873 if (!sk1)
874 return;
875
876 rcu_read_lock();
877 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
878 if (!key)
879 goto release_sk1;
880
881 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
882 if (genhash || memcmp(hash_location, newhash, 16) != 0)
883 goto release_sk1;
884 }
885#endif
886
887 if (th->ack)
888 seq = ntohl(th->ack_seq);
889 else
890 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
891 (th->doff << 2);
892
893 oif = sk ? sk->sk_bound_dev_if : 0;
894 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
895
896#ifdef CONFIG_TCP_MD5SIG
897release_sk1:
898 if (sk1) {
899 rcu_read_unlock();
900 sock_put(sk1);
901 }
902#endif
903}
904
905static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
906 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
907 struct tcp_md5sig_key *key, u8 tclass,
908 u32 label)
909{
910 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
911 tclass, label);
912}
913
914static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
915{
916 struct inet_timewait_sock *tw = inet_twsk(sk);
917 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
918
919 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
920 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
921 tcp_time_stamp + tcptw->tw_ts_offset,
922 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
923 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
924
925 inet_twsk_put(tw);
926}
927
928static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
929 struct request_sock *req)
930{
931
932
933
934 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
935 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
936 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
937 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
938 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
939 0, 0);
940}
941
942
943static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
944{
945#ifdef CONFIG_SYN_COOKIES
946 const struct tcphdr *th = tcp_hdr(skb);
947
948 if (!th->syn)
949 sk = cookie_v6_check(sk, skb);
950#endif
951 return sk;
952}
953
954static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
955{
956 if (skb->protocol == htons(ETH_P_IP))
957 return tcp_v4_conn_request(sk, skb);
958
959 if (!ipv6_unicast_destination(skb))
960 goto drop;
961
962 return tcp_conn_request(&tcp6_request_sock_ops,
963 &tcp_request_sock_ipv6_ops, sk, skb);
964
965drop:
966 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
967 return 0;
968}
969
970static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
971 struct request_sock *req,
972 struct dst_entry *dst,
973 struct request_sock *req_unhash,
974 bool *own_req)
975{
976 struct inet_request_sock *ireq;
977 struct ipv6_pinfo *newnp;
978 const struct ipv6_pinfo *np = inet6_sk(sk);
979 struct ipv6_txoptions *opt;
980 struct tcp6_sock *newtcp6sk;
981 struct inet_sock *newinet;
982 struct tcp_sock *newtp;
983 struct sock *newsk;
984#ifdef CONFIG_TCP_MD5SIG
985 struct tcp_md5sig_key *key;
986#endif
987 struct flowi6 fl6;
988
989 if (skb->protocol == htons(ETH_P_IP)) {
990
991
992
993
994 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
995 req_unhash, own_req);
996
997 if (!newsk)
998 return NULL;
999
1000 newtcp6sk = (struct tcp6_sock *)newsk;
1001 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1002
1003 newinet = inet_sk(newsk);
1004 newnp = inet6_sk(newsk);
1005 newtp = tcp_sk(newsk);
1006
1007 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1008
1009 newnp->saddr = newsk->sk_v6_rcv_saddr;
1010
1011 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1012 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1013#ifdef CONFIG_TCP_MD5SIG
1014 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1015#endif
1016
1017 newnp->ipv6_ac_list = NULL;
1018 newnp->ipv6_fl_list = NULL;
1019 newnp->pktoptions = NULL;
1020 newnp->opt = NULL;
1021 newnp->mcast_oif = tcp_v6_iif(skb);
1022 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1023 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1024 if (np->repflow)
1025 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1038
1039 return newsk;
1040 }
1041
1042 ireq = inet_rsk(req);
1043
1044 if (sk_acceptq_is_full(sk))
1045 goto out_overflow;
1046
1047 if (!dst) {
1048 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1049 if (!dst)
1050 goto out;
1051 }
1052
1053 newsk = tcp_create_openreq_child(sk, req, skb);
1054 if (!newsk)
1055 goto out_nonewsk;
1056
1057
1058
1059
1060
1061
1062
1063 newsk->sk_gso_type = SKB_GSO_TCPV6;
1064 ip6_dst_store(newsk, dst, NULL, NULL);
1065 inet6_sk_rx_dst_set(newsk, skb);
1066
1067 newtcp6sk = (struct tcp6_sock *)newsk;
1068 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1069
1070 newtp = tcp_sk(newsk);
1071 newinet = inet_sk(newsk);
1072 newnp = inet6_sk(newsk);
1073
1074 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1075
1076 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1077 newnp->saddr = ireq->ir_v6_loc_addr;
1078 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1079 newsk->sk_bound_dev_if = ireq->ir_iif;
1080
1081
1082
1083
1084
1085 newinet->inet_opt = NULL;
1086 newnp->ipv6_ac_list = NULL;
1087 newnp->ipv6_fl_list = NULL;
1088
1089
1090 newnp->rxopt.all = np->rxopt.all;
1091
1092 newnp->pktoptions = NULL;
1093 newnp->opt = NULL;
1094 newnp->mcast_oif = tcp_v6_iif(skb);
1095 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1096 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1097 if (np->repflow)
1098 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1099
1100
1101
1102
1103
1104
1105
1106 opt = rcu_dereference(np->opt);
1107 if (opt) {
1108 opt = ipv6_dup_options(newsk, opt);
1109 RCU_INIT_POINTER(newnp->opt, opt);
1110 }
1111 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1112 if (opt)
1113 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1114 opt->opt_flen;
1115
1116 tcp_ca_openreq_child(newsk, dst);
1117
1118 tcp_sync_mss(newsk, dst_mtu(dst));
1119 newtp->advmss = dst_metric_advmss(dst);
1120 if (tcp_sk(sk)->rx_opt.user_mss &&
1121 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1122 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1123
1124 tcp_initialize_rcv_mss(newsk);
1125
1126 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1127 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1128
1129#ifdef CONFIG_TCP_MD5SIG
1130
1131 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1132 if (key) {
1133
1134
1135
1136
1137
1138 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1139 AF_INET6, key->key, key->keylen,
1140 sk_gfp_mask(sk, GFP_ATOMIC));
1141 }
1142#endif
1143
1144 if (__inet_inherit_port(sk, newsk) < 0) {
1145 inet_csk_prepare_forced_close(newsk);
1146 tcp_done(newsk);
1147 goto out;
1148 }
1149 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1150 if (*own_req) {
1151 tcp_move_syn(newtp, req);
1152
1153
1154 if (ireq->pktopts) {
1155 newnp->pktoptions = skb_clone(ireq->pktopts,
1156 sk_gfp_mask(sk, GFP_ATOMIC));
1157 consume_skb(ireq->pktopts);
1158 ireq->pktopts = NULL;
1159 if (newnp->pktoptions)
1160 skb_set_owner_r(newnp->pktoptions, newsk);
1161 }
1162 }
1163
1164 return newsk;
1165
1166out_overflow:
1167 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1168out_nonewsk:
1169 dst_release(dst);
1170out:
1171 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1172 return NULL;
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1184{
1185 struct ipv6_pinfo *np = inet6_sk(sk);
1186 struct tcp_sock *tp;
1187 struct sk_buff *opt_skb = NULL;
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 if (skb->protocol == htons(ETH_P_IP))
1198 return tcp_v4_do_rcv(sk, skb);
1199
1200 if (sk_filter(sk, skb))
1201 goto discard;
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 if (np->rxopt.all)
1222 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1223
1224 if (sk->sk_state == TCP_ESTABLISHED) {
1225 struct dst_entry *dst = sk->sk_rx_dst;
1226
1227 sock_rps_save_rxhash(sk, skb);
1228 sk_mark_napi_id(sk, skb);
1229 if (dst) {
1230 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1231 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1232 dst_release(dst);
1233 sk->sk_rx_dst = NULL;
1234 }
1235 }
1236
1237 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1238 if (opt_skb)
1239 goto ipv6_pktoptions;
1240 return 0;
1241 }
1242
1243 if (tcp_checksum_complete(skb))
1244 goto csum_err;
1245
1246 if (sk->sk_state == TCP_LISTEN) {
1247 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1248
1249 if (!nsk)
1250 goto discard;
1251
1252 if (nsk != sk) {
1253 sock_rps_save_rxhash(nsk, skb);
1254 sk_mark_napi_id(nsk, skb);
1255 if (tcp_child_process(sk, nsk, skb))
1256 goto reset;
1257 if (opt_skb)
1258 __kfree_skb(opt_skb);
1259 return 0;
1260 }
1261 } else
1262 sock_rps_save_rxhash(sk, skb);
1263
1264 if (tcp_rcv_state_process(sk, skb))
1265 goto reset;
1266 if (opt_skb)
1267 goto ipv6_pktoptions;
1268 return 0;
1269
1270reset:
1271 tcp_v6_send_reset(sk, skb);
1272discard:
1273 if (opt_skb)
1274 __kfree_skb(opt_skb);
1275 kfree_skb(skb);
1276 return 0;
1277csum_err:
1278 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1279 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1280 goto discard;
1281
1282
1283ipv6_pktoptions:
1284
1285
1286
1287
1288
1289
1290
1291 tp = tcp_sk(sk);
1292 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1293 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1294 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1295 np->mcast_oif = tcp_v6_iif(opt_skb);
1296 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1297 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1298 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1299 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1300 if (np->repflow)
1301 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1302 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1303 skb_set_owner_r(opt_skb, sk);
1304 opt_skb = xchg(&np->pktoptions, opt_skb);
1305 } else {
1306 __kfree_skb(opt_skb);
1307 opt_skb = xchg(&np->pktoptions, NULL);
1308 }
1309 }
1310
1311 kfree_skb(opt_skb);
1312 return 0;
1313}
1314
1315static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1316 const struct tcphdr *th)
1317{
1318
1319
1320
1321
1322
1323 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1324 sizeof(struct inet6_skb_parm));
1325 barrier();
1326
1327 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1328 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1329 skb->len - th->doff*4);
1330 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1331 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1332 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1333 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1334 TCP_SKB_CB(skb)->sacked = 0;
1335}
1336
1337static void tcp_v6_restore_cb(struct sk_buff *skb)
1338{
1339
1340
1341
1342 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1343 sizeof(struct inet6_skb_parm));
1344}
1345
1346static int tcp_v6_rcv(struct sk_buff *skb)
1347{
1348 const struct tcphdr *th;
1349 const struct ipv6hdr *hdr;
1350 struct sock *sk;
1351 int ret;
1352 struct net *net = dev_net(skb->dev);
1353
1354 if (skb->pkt_type != PACKET_HOST)
1355 goto discard_it;
1356
1357
1358
1359
1360 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1361
1362 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1363 goto discard_it;
1364
1365 th = tcp_hdr(skb);
1366
1367 if (th->doff < sizeof(struct tcphdr)/4)
1368 goto bad_packet;
1369 if (!pskb_may_pull(skb, th->doff*4))
1370 goto discard_it;
1371
1372 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1373 goto csum_error;
1374
1375 th = tcp_hdr(skb);
1376 hdr = ipv6_hdr(skb);
1377
1378lookup:
1379 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1380 inet6_iif(skb));
1381 if (!sk)
1382 goto no_tcp_socket;
1383
1384process:
1385 if (sk->sk_state == TCP_TIME_WAIT)
1386 goto do_time_wait;
1387
1388 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1389 struct request_sock *req = inet_reqsk(sk);
1390 struct sock *nsk;
1391
1392 sk = req->rsk_listener;
1393 tcp_v6_fill_cb(skb, hdr, th);
1394 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1395 reqsk_put(req);
1396 goto discard_it;
1397 }
1398 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1399 inet_csk_reqsk_queue_drop_and_put(sk, req);
1400 goto lookup;
1401 }
1402 sock_hold(sk);
1403 nsk = tcp_check_req(sk, skb, req, false);
1404 if (!nsk) {
1405 reqsk_put(req);
1406 goto discard_and_relse;
1407 }
1408 if (nsk == sk) {
1409 reqsk_put(req);
1410 tcp_v6_restore_cb(skb);
1411 } else if (tcp_child_process(sk, nsk, skb)) {
1412 tcp_v6_send_reset(nsk, skb);
1413 goto discard_and_relse;
1414 } else {
1415 sock_put(sk);
1416 return 0;
1417 }
1418 }
1419 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1420 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1421 goto discard_and_relse;
1422 }
1423
1424 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1425 goto discard_and_relse;
1426
1427 tcp_v6_fill_cb(skb, hdr, th);
1428
1429 if (tcp_v6_inbound_md5_hash(sk, skb))
1430 goto discard_and_relse;
1431
1432 if (sk_filter(sk, skb))
1433 goto discard_and_relse;
1434
1435 skb->dev = NULL;
1436
1437 if (sk->sk_state == TCP_LISTEN) {
1438 ret = tcp_v6_do_rcv(sk, skb);
1439 goto put_and_return;
1440 }
1441
1442 sk_incoming_cpu_update(sk);
1443
1444 bh_lock_sock_nested(sk);
1445 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1446 ret = 0;
1447 if (!sock_owned_by_user(sk)) {
1448 if (!tcp_prequeue(sk, skb))
1449 ret = tcp_v6_do_rcv(sk, skb);
1450 } else if (unlikely(sk_add_backlog(sk, skb,
1451 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1452 bh_unlock_sock(sk);
1453 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1454 goto discard_and_relse;
1455 }
1456 bh_unlock_sock(sk);
1457
1458put_and_return:
1459 sock_put(sk);
1460 return ret ? -1 : 0;
1461
1462no_tcp_socket:
1463 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1464 goto discard_it;
1465
1466 tcp_v6_fill_cb(skb, hdr, th);
1467
1468 if (tcp_checksum_complete(skb)) {
1469csum_error:
1470 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1471bad_packet:
1472 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1473 } else {
1474 tcp_v6_send_reset(NULL, skb);
1475 }
1476
1477discard_it:
1478 kfree_skb(skb);
1479 return 0;
1480
1481discard_and_relse:
1482 sock_put(sk);
1483 goto discard_it;
1484
1485do_time_wait:
1486 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1487 inet_twsk_put(inet_twsk(sk));
1488 goto discard_it;
1489 }
1490
1491 tcp_v6_fill_cb(skb, hdr, th);
1492
1493 if (tcp_checksum_complete(skb)) {
1494 inet_twsk_put(inet_twsk(sk));
1495 goto csum_error;
1496 }
1497
1498 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1499 case TCP_TW_SYN:
1500 {
1501 struct sock *sk2;
1502
1503 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1504 &ipv6_hdr(skb)->saddr, th->source,
1505 &ipv6_hdr(skb)->daddr,
1506 ntohs(th->dest), tcp_v6_iif(skb));
1507 if (sk2) {
1508 struct inet_timewait_sock *tw = inet_twsk(sk);
1509 inet_twsk_deschedule_put(tw);
1510 sk = sk2;
1511 tcp_v6_restore_cb(skb);
1512 goto process;
1513 }
1514
1515 }
1516 case TCP_TW_ACK:
1517 tcp_v6_timewait_ack(sk, skb);
1518 break;
1519 case TCP_TW_RST:
1520 tcp_v6_restore_cb(skb);
1521 tcp_v6_send_reset(sk, skb);
1522 inet_twsk_deschedule_put(inet_twsk(sk));
1523 goto discard_it;
1524 case TCP_TW_SUCCESS:
1525 ;
1526 }
1527 goto discard_it;
1528}
1529
1530static void tcp_v6_early_demux(struct sk_buff *skb)
1531{
1532 const struct ipv6hdr *hdr;
1533 const struct tcphdr *th;
1534 struct sock *sk;
1535
1536 if (skb->pkt_type != PACKET_HOST)
1537 return;
1538
1539 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1540 return;
1541
1542 hdr = ipv6_hdr(skb);
1543 th = tcp_hdr(skb);
1544
1545 if (th->doff < sizeof(struct tcphdr) / 4)
1546 return;
1547
1548
1549 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1550 &hdr->saddr, th->source,
1551 &hdr->daddr, ntohs(th->dest),
1552 inet6_iif(skb));
1553 if (sk) {
1554 skb->sk = sk;
1555 skb->destructor = sock_edemux;
1556 if (sk_fullsock(sk)) {
1557 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1558
1559 if (dst)
1560 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1561 if (dst &&
1562 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1563 skb_dst_set_noref(skb, dst);
1564 }
1565 }
1566}
1567
1568static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1569 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1570 .twsk_unique = tcp_twsk_unique,
1571 .twsk_destructor = tcp_twsk_destructor,
1572};
1573
1574static const struct inet_connection_sock_af_ops ipv6_specific = {
1575 .queue_xmit = inet6_csk_xmit,
1576 .send_check = tcp_v6_send_check,
1577 .rebuild_header = inet6_sk_rebuild_header,
1578 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1579 .conn_request = tcp_v6_conn_request,
1580 .syn_recv_sock = tcp_v6_syn_recv_sock,
1581 .net_header_len = sizeof(struct ipv6hdr),
1582 .net_frag_header_len = sizeof(struct frag_hdr),
1583 .setsockopt = ipv6_setsockopt,
1584 .getsockopt = ipv6_getsockopt,
1585 .addr2sockaddr = inet6_csk_addr2sockaddr,
1586 .sockaddr_len = sizeof(struct sockaddr_in6),
1587 .bind_conflict = inet6_csk_bind_conflict,
1588#ifdef CONFIG_COMPAT
1589 .compat_setsockopt = compat_ipv6_setsockopt,
1590 .compat_getsockopt = compat_ipv6_getsockopt,
1591#endif
1592 .mtu_reduced = tcp_v6_mtu_reduced,
1593};
1594
1595#ifdef CONFIG_TCP_MD5SIG
1596static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1597 .md5_lookup = tcp_v6_md5_lookup,
1598 .calc_md5_hash = tcp_v6_md5_hash_skb,
1599 .md5_parse = tcp_v6_parse_md5_keys,
1600};
1601#endif
1602
1603
1604
1605
1606static const struct inet_connection_sock_af_ops ipv6_mapped = {
1607 .queue_xmit = ip_queue_xmit,
1608 .send_check = tcp_v4_send_check,
1609 .rebuild_header = inet_sk_rebuild_header,
1610 .sk_rx_dst_set = inet_sk_rx_dst_set,
1611 .conn_request = tcp_v6_conn_request,
1612 .syn_recv_sock = tcp_v6_syn_recv_sock,
1613 .net_header_len = sizeof(struct iphdr),
1614 .setsockopt = ipv6_setsockopt,
1615 .getsockopt = ipv6_getsockopt,
1616 .addr2sockaddr = inet6_csk_addr2sockaddr,
1617 .sockaddr_len = sizeof(struct sockaddr_in6),
1618 .bind_conflict = inet6_csk_bind_conflict,
1619#ifdef CONFIG_COMPAT
1620 .compat_setsockopt = compat_ipv6_setsockopt,
1621 .compat_getsockopt = compat_ipv6_getsockopt,
1622#endif
1623 .mtu_reduced = tcp_v4_mtu_reduced,
1624};
1625
1626#ifdef CONFIG_TCP_MD5SIG
1627static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1628 .md5_lookup = tcp_v4_md5_lookup,
1629 .calc_md5_hash = tcp_v4_md5_hash_skb,
1630 .md5_parse = tcp_v6_parse_md5_keys,
1631};
1632#endif
1633
1634
1635
1636
1637static int tcp_v6_init_sock(struct sock *sk)
1638{
1639 struct inet_connection_sock *icsk = inet_csk(sk);
1640
1641 tcp_init_sock(sk);
1642
1643 icsk->icsk_af_ops = &ipv6_specific;
1644
1645#ifdef CONFIG_TCP_MD5SIG
1646 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1647#endif
1648
1649 return 0;
1650}
1651
1652static void tcp_v6_destroy_sock(struct sock *sk)
1653{
1654 tcp_v4_destroy_sock(sk);
1655 inet6_destroy_sock(sk);
1656}
1657
1658#ifdef CONFIG_PROC_FS
1659
1660static void get_openreq6(struct seq_file *seq,
1661 const struct request_sock *req, int i)
1662{
1663 long ttd = req->rsk_timer.expires - jiffies;
1664 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1665 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1666
1667 if (ttd < 0)
1668 ttd = 0;
1669
1670 seq_printf(seq,
1671 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1672 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1673 i,
1674 src->s6_addr32[0], src->s6_addr32[1],
1675 src->s6_addr32[2], src->s6_addr32[3],
1676 inet_rsk(req)->ir_num,
1677 dest->s6_addr32[0], dest->s6_addr32[1],
1678 dest->s6_addr32[2], dest->s6_addr32[3],
1679 ntohs(inet_rsk(req)->ir_rmt_port),
1680 TCP_SYN_RECV,
1681 0, 0,
1682 1,
1683 jiffies_to_clock_t(ttd),
1684 req->num_timeout,
1685 from_kuid_munged(seq_user_ns(seq),
1686 sock_i_uid(req->rsk_listener)),
1687 0,
1688 0,
1689 0, req);
1690}
1691
1692static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1693{
1694 const struct in6_addr *dest, *src;
1695 __u16 destp, srcp;
1696 int timer_active;
1697 unsigned long timer_expires;
1698 const struct inet_sock *inet = inet_sk(sp);
1699 const struct tcp_sock *tp = tcp_sk(sp);
1700 const struct inet_connection_sock *icsk = inet_csk(sp);
1701 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1702 int rx_queue;
1703 int state;
1704
1705 dest = &sp->sk_v6_daddr;
1706 src = &sp->sk_v6_rcv_saddr;
1707 destp = ntohs(inet->inet_dport);
1708 srcp = ntohs(inet->inet_sport);
1709
1710 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1711 timer_active = 1;
1712 timer_expires = icsk->icsk_timeout;
1713 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1714 timer_active = 4;
1715 timer_expires = icsk->icsk_timeout;
1716 } else if (timer_pending(&sp->sk_timer)) {
1717 timer_active = 2;
1718 timer_expires = sp->sk_timer.expires;
1719 } else {
1720 timer_active = 0;
1721 timer_expires = jiffies;
1722 }
1723
1724 state = sk_state_load(sp);
1725 if (state == TCP_LISTEN)
1726 rx_queue = sp->sk_ack_backlog;
1727 else
1728
1729
1730
1731 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1732
1733 seq_printf(seq,
1734 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1735 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1736 i,
1737 src->s6_addr32[0], src->s6_addr32[1],
1738 src->s6_addr32[2], src->s6_addr32[3], srcp,
1739 dest->s6_addr32[0], dest->s6_addr32[1],
1740 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1741 state,
1742 tp->write_seq - tp->snd_una,
1743 rx_queue,
1744 timer_active,
1745 jiffies_delta_to_clock_t(timer_expires - jiffies),
1746 icsk->icsk_retransmits,
1747 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1748 icsk->icsk_probes_out,
1749 sock_i_ino(sp),
1750 atomic_read(&sp->sk_refcnt), sp,
1751 jiffies_to_clock_t(icsk->icsk_rto),
1752 jiffies_to_clock_t(icsk->icsk_ack.ato),
1753 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1754 tp->snd_cwnd,
1755 state == TCP_LISTEN ?
1756 fastopenq->max_qlen :
1757 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1758 );
1759}
1760
1761static void get_timewait6_sock(struct seq_file *seq,
1762 struct inet_timewait_sock *tw, int i)
1763{
1764 long delta = tw->tw_timer.expires - jiffies;
1765 const struct in6_addr *dest, *src;
1766 __u16 destp, srcp;
1767
1768 dest = &tw->tw_v6_daddr;
1769 src = &tw->tw_v6_rcv_saddr;
1770 destp = ntohs(tw->tw_dport);
1771 srcp = ntohs(tw->tw_sport);
1772
1773 seq_printf(seq,
1774 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1775 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1776 i,
1777 src->s6_addr32[0], src->s6_addr32[1],
1778 src->s6_addr32[2], src->s6_addr32[3], srcp,
1779 dest->s6_addr32[0], dest->s6_addr32[1],
1780 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1781 tw->tw_substate, 0, 0,
1782 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1783 atomic_read(&tw->tw_refcnt), tw);
1784}
1785
1786static int tcp6_seq_show(struct seq_file *seq, void *v)
1787{
1788 struct tcp_iter_state *st;
1789 struct sock *sk = v;
1790
1791 if (v == SEQ_START_TOKEN) {
1792 seq_puts(seq,
1793 " sl "
1794 "local_address "
1795 "remote_address "
1796 "st tx_queue rx_queue tr tm->when retrnsmt"
1797 " uid timeout inode\n");
1798 goto out;
1799 }
1800 st = seq->private;
1801
1802 if (sk->sk_state == TCP_TIME_WAIT)
1803 get_timewait6_sock(seq, v, st->num);
1804 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1805 get_openreq6(seq, v, st->num);
1806 else
1807 get_tcp6_sock(seq, v, st->num);
1808out:
1809 return 0;
1810}
1811
1812static const struct file_operations tcp6_afinfo_seq_fops = {
1813 .owner = THIS_MODULE,
1814 .open = tcp_seq_open,
1815 .read = seq_read,
1816 .llseek = seq_lseek,
1817 .release = seq_release_net
1818};
1819
1820static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1821 .name = "tcp6",
1822 .family = AF_INET6,
1823 .seq_fops = &tcp6_afinfo_seq_fops,
1824 .seq_ops = {
1825 .show = tcp6_seq_show,
1826 },
1827};
1828
1829int __net_init tcp6_proc_init(struct net *net)
1830{
1831 return tcp_proc_register(net, &tcp6_seq_afinfo);
1832}
1833
1834void tcp6_proc_exit(struct net *net)
1835{
1836 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1837}
1838#endif
1839
1840static void tcp_v6_clear_sk(struct sock *sk, int size)
1841{
1842 struct inet_sock *inet = inet_sk(sk);
1843
1844
1845 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1846
1847 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1848 memset(&inet->pinet6 + 1, 0, size);
1849}
1850
1851struct proto tcpv6_prot = {
1852 .name = "TCPv6",
1853 .owner = THIS_MODULE,
1854 .close = tcp_close,
1855 .connect = tcp_v6_connect,
1856 .disconnect = tcp_disconnect,
1857 .accept = inet_csk_accept,
1858 .ioctl = tcp_ioctl,
1859 .init = tcp_v6_init_sock,
1860 .destroy = tcp_v6_destroy_sock,
1861 .shutdown = tcp_shutdown,
1862 .setsockopt = tcp_setsockopt,
1863 .getsockopt = tcp_getsockopt,
1864 .recvmsg = tcp_recvmsg,
1865 .sendmsg = tcp_sendmsg,
1866 .sendpage = tcp_sendpage,
1867 .backlog_rcv = tcp_v6_do_rcv,
1868 .release_cb = tcp_release_cb,
1869 .hash = inet_hash,
1870 .unhash = inet_unhash,
1871 .get_port = inet_csk_get_port,
1872 .enter_memory_pressure = tcp_enter_memory_pressure,
1873 .stream_memory_free = tcp_stream_memory_free,
1874 .sockets_allocated = &tcp_sockets_allocated,
1875 .memory_allocated = &tcp_memory_allocated,
1876 .memory_pressure = &tcp_memory_pressure,
1877 .orphan_count = &tcp_orphan_count,
1878 .sysctl_mem = sysctl_tcp_mem,
1879 .sysctl_wmem = sysctl_tcp_wmem,
1880 .sysctl_rmem = sysctl_tcp_rmem,
1881 .max_header = MAX_TCP_HEADER,
1882 .obj_size = sizeof(struct tcp6_sock),
1883 .slab_flags = SLAB_DESTROY_BY_RCU,
1884 .twsk_prot = &tcp6_timewait_sock_ops,
1885 .rsk_prot = &tcp6_request_sock_ops,
1886 .h.hashinfo = &tcp_hashinfo,
1887 .no_autobind = true,
1888#ifdef CONFIG_COMPAT
1889 .compat_setsockopt = compat_tcp_setsockopt,
1890 .compat_getsockopt = compat_tcp_getsockopt,
1891#endif
1892 .clear_sk = tcp_v6_clear_sk,
1893 .diag_destroy = tcp_abort,
1894};
1895
1896static const struct inet6_protocol tcpv6_protocol = {
1897 .early_demux = tcp_v6_early_demux,
1898 .handler = tcp_v6_rcv,
1899 .err_handler = tcp_v6_err,
1900 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1901};
1902
1903static struct inet_protosw tcpv6_protosw = {
1904 .type = SOCK_STREAM,
1905 .protocol = IPPROTO_TCP,
1906 .prot = &tcpv6_prot,
1907 .ops = &inet6_stream_ops,
1908 .flags = INET_PROTOSW_PERMANENT |
1909 INET_PROTOSW_ICSK,
1910};
1911
1912static int __net_init tcpv6_net_init(struct net *net)
1913{
1914 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1915 SOCK_RAW, IPPROTO_TCP, net);
1916}
1917
1918static void __net_exit tcpv6_net_exit(struct net *net)
1919{
1920 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1921}
1922
1923static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1924{
1925 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1926}
1927
1928static struct pernet_operations tcpv6_net_ops = {
1929 .init = tcpv6_net_init,
1930 .exit = tcpv6_net_exit,
1931 .exit_batch = tcpv6_net_exit_batch,
1932};
1933
1934int __init tcpv6_init(void)
1935{
1936 int ret;
1937
1938 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1939 if (ret)
1940 goto out;
1941
1942
1943 ret = inet6_register_protosw(&tcpv6_protosw);
1944 if (ret)
1945 goto out_tcpv6_protocol;
1946
1947 ret = register_pernet_subsys(&tcpv6_net_ops);
1948 if (ret)
1949 goto out_tcpv6_protosw;
1950out:
1951 return ret;
1952
1953out_tcpv6_protosw:
1954 inet6_unregister_protosw(&tcpv6_protosw);
1955out_tcpv6_protocol:
1956 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1957 goto out;
1958}
1959
1960void tcpv6_exit(void)
1961{
1962 unregister_pernet_subsys(&tcpv6_net_ops);
1963 inet6_unregister_protosw(&tcpv6_protosw);
1964 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1965}
1966