1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/bottom_half.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41#include <linux/slab.h>
42#include <linux/uaccess.h>
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
49#include <net/inet6_hashtables.h>
50#include <net/inet6_connection_sock.h>
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/snmp.h>
60#include <net/dsfield.h>
61#include <net/timewait_sock.h>
62#include <net/inet_common.h>
63#include <net/secure_seq.h>
64#include <net/tcp_memcontrol.h>
65#include <net/busy_poll.h>
66
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
70#include <linux/crypto.h>
71#include <linux/scatterlist.h>
72
73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79static const struct inet_connection_sock_af_ops ipv6_mapped;
80static const struct inet_connection_sock_af_ops ipv6_specific;
81#ifdef CONFIG_TCP_MD5SIG
82static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84#else
85static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
87{
88 return NULL;
89}
90#endif
91
92static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93{
94 struct dst_entry *dst = skb_dst(skb);
95
96 if (dst) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103 }
104}
105
106static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
107{
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
112}
113
114static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 int addr_len)
116{
117 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 struct inet_sock *inet = inet_sk(sk);
119 struct inet_connection_sock *icsk = inet_csk(sk);
120 struct ipv6_pinfo *np = inet6_sk(sk);
121 struct tcp_sock *tp = tcp_sk(sk);
122 struct in6_addr *saddr = NULL, *final_p, final;
123 struct flowi6 fl6;
124 struct dst_entry *dst;
125 int addr_type;
126 int err;
127
128 if (addr_len < SIN6_LEN_RFC2133)
129 return -EINVAL;
130
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
133
134 memset(&fl6, 0, sizeof(fl6));
135
136 if (np->sndflow) {
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
142 if (!flowlabel)
143 return -EINVAL;
144 fl6_sock_release(flowlabel);
145 }
146 }
147
148
149
150
151
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
154
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
156
157 if (addr_type & IPV6_ADDR_MULTICAST)
158 return -ENETUNREACH;
159
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163
164
165
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
168 return -EINVAL;
169
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
171 }
172
173
174 if (!sk->sk_bound_dev_if)
175 return -EINVAL;
176 }
177
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
182 tp->write_seq = 0;
183 }
184
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
187
188
189
190
191
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
195
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197
198 if (__ipv6_only_sock(sk))
199 return -ENETUNREACH;
200
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207#ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
209#endif
210
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
212
213 if (err) {
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217#ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
219#endif
220 goto failure;
221 }
222 np->saddr = sk->sk_v6_rcv_saddr;
223
224 return err;
225 }
226
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
229
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
237
238 final_p = fl6_update_dst(&fl6, np->opt, &final);
239
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 if (IS_ERR(dst)) {
244 err = PTR_ERR(dst);
245 goto failure;
246 }
247
248 if (!saddr) {
249 saddr = &fl6.saddr;
250 sk->sk_v6_rcv_saddr = *saddr;
251 }
252
253
254 np->saddr = *saddr;
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 __ip6_dst_store(sk, dst, NULL, NULL);
259
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
264
265 icsk->icsk_ext_hdr_len = 0;
266 if (np->opt)
267 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
268 np->opt->opt_nflen);
269
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271
272 inet->inet_dport = usin->sin6_port;
273
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
276 if (err)
277 goto late_failure;
278
279 ip6_set_txhash(sk);
280
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
284 inet->inet_sport,
285 inet->inet_dport);
286
287 err = tcp_connect(sk);
288 if (err)
289 goto late_failure;
290
291 return 0;
292
293late_failure:
294 tcp_set_state(sk, TCP_CLOSE);
295 __sk_dst_reset(sk);
296failure:
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
299 return err;
300}
301
302static void tcp_v6_mtu_reduced(struct sock *sk)
303{
304 struct dst_entry *dst;
305
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 return;
308
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 if (!dst)
311 return;
312
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
316 }
317}
318
319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
321{
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
327 struct tcp_sock *tp;
328 __u32 seq, snd_una;
329 struct sock *sk;
330 int err;
331
332 sk = __inet6_lookup_established(net, &tcp_hashinfo,
333 &hdr->daddr, th->dest,
334 &hdr->saddr, ntohs(th->source),
335 skb->dev->ifindex);
336
337 if (!sk) {
338 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
339 ICMP6_MIB_INERRORS);
340 return;
341 }
342
343 if (sk->sk_state == TCP_TIME_WAIT) {
344 inet_twsk_put(inet_twsk(sk));
345 return;
346 }
347 seq = ntohl(th->seq);
348 if (sk->sk_state == TCP_NEW_SYN_RECV)
349 return tcp_req_err(sk, seq);
350
351 bh_lock_sock(sk);
352 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
353 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
354
355 if (sk->sk_state == TCP_CLOSE)
356 goto out;
357
358 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
359 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
360 goto out;
361 }
362
363 tp = tcp_sk(sk);
364
365 fastopen = tp->fastopen_rsk;
366 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
367 if (sk->sk_state != TCP_LISTEN &&
368 !between(seq, snd_una, tp->snd_nxt)) {
369 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
370 goto out;
371 }
372
373 np = inet6_sk(sk);
374
375 if (type == NDISC_REDIRECT) {
376 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
377
378 if (dst)
379 dst->ops->redirect(dst, sk, skb);
380 goto out;
381 }
382
383 if (type == ICMPV6_PKT_TOOBIG) {
384
385
386
387
388 if (sk->sk_state == TCP_LISTEN)
389 goto out;
390
391 if (!ip6_sk_accept_pmtu(sk))
392 goto out;
393
394 tp->mtu_info = ntohl(info);
395 if (!sock_owned_by_user(sk))
396 tcp_v6_mtu_reduced(sk);
397 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
398 &tp->tsq_flags))
399 sock_hold(sk);
400 goto out;
401 }
402
403 icmpv6_err_convert(type, code, &err);
404
405
406 switch (sk->sk_state) {
407 case TCP_SYN_SENT:
408 case TCP_SYN_RECV:
409
410
411
412 if (fastopen && !fastopen->sk)
413 break;
414
415 if (!sock_owned_by_user(sk)) {
416 sk->sk_err = err;
417 sk->sk_error_report(sk);
418
419 tcp_done(sk);
420 } else
421 sk->sk_err_soft = err;
422 goto out;
423 }
424
425 if (!sock_owned_by_user(sk) && np->recverr) {
426 sk->sk_err = err;
427 sk->sk_error_report(sk);
428 } else
429 sk->sk_err_soft = err;
430
431out:
432 bh_unlock_sock(sk);
433 sock_put(sk);
434}
435
436
437static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
438 struct flowi *fl,
439 struct request_sock *req,
440 u16 queue_mapping,
441 struct tcp_fastopen_cookie *foc)
442{
443 struct inet_request_sock *ireq = inet_rsk(req);
444 struct ipv6_pinfo *np = inet6_sk(sk);
445 struct flowi6 *fl6 = &fl->u.ip6;
446 struct sk_buff *skb;
447 int err = -ENOMEM;
448
449
450 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
451 goto done;
452
453 skb = tcp_make_synack(sk, dst, req, foc);
454
455 if (skb) {
456 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
457 &ireq->ir_v6_rmt_addr);
458
459 fl6->daddr = ireq->ir_v6_rmt_addr;
460 if (np->repflow && ireq->pktopts)
461 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
462
463 skb_set_queue_mapping(skb, queue_mapping);
464 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
465 err = net_xmit_eval(err);
466 }
467
468done:
469 return err;
470}
471
472
473static void tcp_v6_reqsk_destructor(struct request_sock *req)
474{
475 kfree_skb(inet_rsk(req)->pktopts);
476}
477
478#ifdef CONFIG_TCP_MD5SIG
479static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
480 const struct in6_addr *addr)
481{
482 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
483}
484
485static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
486 const struct sock *addr_sk)
487{
488 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
489}
490
491static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
492 int optlen)
493{
494 struct tcp_md5sig cmd;
495 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
496
497 if (optlen < sizeof(cmd))
498 return -EINVAL;
499
500 if (copy_from_user(&cmd, optval, sizeof(cmd)))
501 return -EFAULT;
502
503 if (sin6->sin6_family != AF_INET6)
504 return -EINVAL;
505
506 if (!cmd.tcpm_keylen) {
507 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
508 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
509 AF_INET);
510 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
511 AF_INET6);
512 }
513
514 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
515 return -EINVAL;
516
517 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
518 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
519 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
520
521 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
522 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
523}
524
525static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
526 const struct in6_addr *daddr,
527 const struct in6_addr *saddr, int nbytes)
528{
529 struct tcp6_pseudohdr *bp;
530 struct scatterlist sg;
531
532 bp = &hp->md5_blk.ip6;
533
534 bp->saddr = *saddr;
535 bp->daddr = *daddr;
536 bp->protocol = cpu_to_be32(IPPROTO_TCP);
537 bp->len = cpu_to_be32(nbytes);
538
539 sg_init_one(&sg, bp, sizeof(*bp));
540 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
541}
542
543static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
544 const struct in6_addr *daddr, struct in6_addr *saddr,
545 const struct tcphdr *th)
546{
547 struct tcp_md5sig_pool *hp;
548 struct hash_desc *desc;
549
550 hp = tcp_get_md5sig_pool();
551 if (!hp)
552 goto clear_hash_noput;
553 desc = &hp->md5_desc;
554
555 if (crypto_hash_init(desc))
556 goto clear_hash;
557 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
558 goto clear_hash;
559 if (tcp_md5_hash_header(hp, th))
560 goto clear_hash;
561 if (tcp_md5_hash_key(hp, key))
562 goto clear_hash;
563 if (crypto_hash_final(desc, md5_hash))
564 goto clear_hash;
565
566 tcp_put_md5sig_pool();
567 return 0;
568
569clear_hash:
570 tcp_put_md5sig_pool();
571clear_hash_noput:
572 memset(md5_hash, 0, 16);
573 return 1;
574}
575
576static int tcp_v6_md5_hash_skb(char *md5_hash,
577 const struct tcp_md5sig_key *key,
578 const struct sock *sk,
579 const struct sk_buff *skb)
580{
581 const struct in6_addr *saddr, *daddr;
582 struct tcp_md5sig_pool *hp;
583 struct hash_desc *desc;
584 const struct tcphdr *th = tcp_hdr(skb);
585
586 if (sk) {
587 saddr = &sk->sk_v6_rcv_saddr;
588 daddr = &sk->sk_v6_daddr;
589 } else {
590 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
591 saddr = &ip6h->saddr;
592 daddr = &ip6h->daddr;
593 }
594
595 hp = tcp_get_md5sig_pool();
596 if (!hp)
597 goto clear_hash_noput;
598 desc = &hp->md5_desc;
599
600 if (crypto_hash_init(desc))
601 goto clear_hash;
602
603 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
604 goto clear_hash;
605 if (tcp_md5_hash_header(hp, th))
606 goto clear_hash;
607 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
608 goto clear_hash;
609 if (tcp_md5_hash_key(hp, key))
610 goto clear_hash;
611 if (crypto_hash_final(desc, md5_hash))
612 goto clear_hash;
613
614 tcp_put_md5sig_pool();
615 return 0;
616
617clear_hash:
618 tcp_put_md5sig_pool();
619clear_hash_noput:
620 memset(md5_hash, 0, 16);
621 return 1;
622}
623
624static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
625{
626 const __u8 *hash_location = NULL;
627 struct tcp_md5sig_key *hash_expected;
628 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
629 const struct tcphdr *th = tcp_hdr(skb);
630 int genhash;
631 u8 newhash[16];
632
633 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
634 hash_location = tcp_parse_md5sig_option(th);
635
636
637 if (!hash_expected && !hash_location)
638 return false;
639
640 if (hash_expected && !hash_location) {
641 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
642 return true;
643 }
644
645 if (!hash_expected && hash_location) {
646 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
647 return true;
648 }
649
650
651 genhash = tcp_v6_md5_hash_skb(newhash,
652 hash_expected,
653 NULL, skb);
654
655 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
656 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
657 genhash ? "failed" : "mismatch",
658 &ip6h->saddr, ntohs(th->source),
659 &ip6h->daddr, ntohs(th->dest));
660 return true;
661 }
662 return false;
663}
664#endif
665
666static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
667 struct sk_buff *skb)
668{
669 struct inet_request_sock *ireq = inet_rsk(req);
670 struct ipv6_pinfo *np = inet6_sk(sk);
671
672 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
673 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
674
675
676 if (!sk->sk_bound_dev_if &&
677 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
678 ireq->ir_iif = tcp_v6_iif(skb);
679
680 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
681 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
682 np->rxopt.bits.rxinfo ||
683 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
684 np->rxopt.bits.rxohlim || np->repflow)) {
685 atomic_inc(&skb->users);
686 ireq->pktopts = skb;
687 }
688}
689
690static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
691 const struct request_sock *req,
692 bool *strict)
693{
694 if (strict)
695 *strict = true;
696 return inet6_csk_route_req(sk, &fl->u.ip6, req);
697}
698
699struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
700 .family = AF_INET6,
701 .obj_size = sizeof(struct tcp6_request_sock),
702 .rtx_syn_ack = tcp_rtx_synack,
703 .send_ack = tcp_v6_reqsk_send_ack,
704 .destructor = tcp_v6_reqsk_destructor,
705 .send_reset = tcp_v6_send_reset,
706 .syn_ack_timeout = tcp_syn_ack_timeout,
707};
708
709static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
710 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
711 sizeof(struct ipv6hdr),
712#ifdef CONFIG_TCP_MD5SIG
713 .req_md5_lookup = tcp_v6_md5_lookup,
714 .calc_md5_hash = tcp_v6_md5_hash_skb,
715#endif
716 .init_req = tcp_v6_init_req,
717#ifdef CONFIG_SYN_COOKIES
718 .cookie_init_seq = cookie_v6_init_sequence,
719#endif
720 .route_req = tcp_v6_route_req,
721 .init_seq = tcp_v6_init_sequence,
722 .send_synack = tcp_v6_send_synack,
723 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
724};
725
726static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
727 u32 ack, u32 win, u32 tsval, u32 tsecr,
728 int oif, struct tcp_md5sig_key *key, int rst,
729 u8 tclass, u32 label)
730{
731 const struct tcphdr *th = tcp_hdr(skb);
732 struct tcphdr *t1;
733 struct sk_buff *buff;
734 struct flowi6 fl6;
735 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
736 struct sock *ctl_sk = net->ipv6.tcp_sk;
737 unsigned int tot_len = sizeof(struct tcphdr);
738 struct dst_entry *dst;
739 __be32 *topt;
740
741 if (tsecr)
742 tot_len += TCPOLEN_TSTAMP_ALIGNED;
743#ifdef CONFIG_TCP_MD5SIG
744 if (key)
745 tot_len += TCPOLEN_MD5SIG_ALIGNED;
746#endif
747
748 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
749 GFP_ATOMIC);
750 if (!buff)
751 return;
752
753 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
754
755 t1 = (struct tcphdr *) skb_push(buff, tot_len);
756 skb_reset_transport_header(buff);
757
758
759 memset(t1, 0, sizeof(*t1));
760 t1->dest = th->source;
761 t1->source = th->dest;
762 t1->doff = tot_len / 4;
763 t1->seq = htonl(seq);
764 t1->ack_seq = htonl(ack);
765 t1->ack = !rst || !th->ack;
766 t1->rst = rst;
767 t1->window = htons(win);
768
769 topt = (__be32 *)(t1 + 1);
770
771 if (tsecr) {
772 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
773 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
774 *topt++ = htonl(tsval);
775 *topt++ = htonl(tsecr);
776 }
777
778#ifdef CONFIG_TCP_MD5SIG
779 if (key) {
780 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
781 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
782 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
783 &ipv6_hdr(skb)->saddr,
784 &ipv6_hdr(skb)->daddr, t1);
785 }
786#endif
787
788 memset(&fl6, 0, sizeof(fl6));
789 fl6.daddr = ipv6_hdr(skb)->saddr;
790 fl6.saddr = ipv6_hdr(skb)->daddr;
791 fl6.flowlabel = label;
792
793 buff->ip_summed = CHECKSUM_PARTIAL;
794 buff->csum = 0;
795
796 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
797
798 fl6.flowi6_proto = IPPROTO_TCP;
799 if (rt6_need_strict(&fl6.daddr) && !oif)
800 fl6.flowi6_oif = tcp_v6_iif(skb);
801 else
802 fl6.flowi6_oif = oif;
803 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
804 fl6.fl6_dport = t1->dest;
805 fl6.fl6_sport = t1->source;
806 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
807
808
809
810
811
812 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
813 if (!IS_ERR(dst)) {
814 skb_dst_set(buff, dst);
815 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
816 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
817 if (rst)
818 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
819 return;
820 }
821
822 kfree_skb(buff);
823}
824
825static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
826{
827 const struct tcphdr *th = tcp_hdr(skb);
828 u32 seq = 0, ack_seq = 0;
829 struct tcp_md5sig_key *key = NULL;
830#ifdef CONFIG_TCP_MD5SIG
831 const __u8 *hash_location = NULL;
832 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
833 unsigned char newhash[16];
834 int genhash;
835 struct sock *sk1 = NULL;
836#endif
837 int oif;
838
839 if (th->rst)
840 return;
841
842
843
844
845 if (!sk && !ipv6_unicast_destination(skb))
846 return;
847
848#ifdef CONFIG_TCP_MD5SIG
849 hash_location = tcp_parse_md5sig_option(th);
850 if (!sk && hash_location) {
851
852
853
854
855
856
857
858 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
859 &tcp_hashinfo, &ipv6h->saddr,
860 th->source, &ipv6h->daddr,
861 ntohs(th->source), tcp_v6_iif(skb));
862 if (!sk1)
863 return;
864
865 rcu_read_lock();
866 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
867 if (!key)
868 goto release_sk1;
869
870 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
871 if (genhash || memcmp(hash_location, newhash, 16) != 0)
872 goto release_sk1;
873 } else {
874 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
875 }
876#endif
877
878 if (th->ack)
879 seq = ntohl(th->ack_seq);
880 else
881 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
882 (th->doff << 2);
883
884 oif = sk ? sk->sk_bound_dev_if : 0;
885 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
886
887#ifdef CONFIG_TCP_MD5SIG
888release_sk1:
889 if (sk1) {
890 rcu_read_unlock();
891 sock_put(sk1);
892 }
893#endif
894}
895
896static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
897 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
898 struct tcp_md5sig_key *key, u8 tclass,
899 u32 label)
900{
901 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
902 tclass, label);
903}
904
905static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
906{
907 struct inet_timewait_sock *tw = inet_twsk(sk);
908 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
909
910 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
911 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
912 tcp_time_stamp + tcptw->tw_ts_offset,
913 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
914 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
915
916 inet_twsk_put(tw);
917}
918
919static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
920 struct request_sock *req)
921{
922
923
924
925 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
926 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
927 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
928 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
929 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
930 0, 0);
931}
932
933
934static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
935{
936 const struct tcphdr *th = tcp_hdr(skb);
937 struct request_sock *req;
938 struct sock *nsk;
939
940
941 req = inet6_csk_search_req(sk, th->source,
942 &ipv6_hdr(skb)->saddr,
943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
944 if (req) {
945 nsk = tcp_check_req(sk, skb, req, false);
946 if (!nsk || nsk == sk)
947 reqsk_put(req);
948 return nsk;
949 }
950 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
951 &ipv6_hdr(skb)->saddr, th->source,
952 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
953 tcp_v6_iif(skb));
954
955 if (nsk) {
956 if (nsk->sk_state != TCP_TIME_WAIT) {
957 bh_lock_sock(nsk);
958 return nsk;
959 }
960 inet_twsk_put(inet_twsk(nsk));
961 return NULL;
962 }
963
964#ifdef CONFIG_SYN_COOKIES
965 if (!th->syn)
966 sk = cookie_v6_check(sk, skb);
967#endif
968 return sk;
969}
970
971static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
972{
973 if (skb->protocol == htons(ETH_P_IP))
974 return tcp_v4_conn_request(sk, skb);
975
976 if (!ipv6_unicast_destination(skb))
977 goto drop;
978
979 return tcp_conn_request(&tcp6_request_sock_ops,
980 &tcp_request_sock_ipv6_ops, sk, skb);
981
982drop:
983 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
984 return 0;
985}
986
987static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
988 struct request_sock *req,
989 struct dst_entry *dst)
990{
991 struct inet_request_sock *ireq;
992 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
993 struct tcp6_sock *newtcp6sk;
994 struct inet_sock *newinet;
995 struct tcp_sock *newtp;
996 struct sock *newsk;
997#ifdef CONFIG_TCP_MD5SIG
998 struct tcp_md5sig_key *key;
999#endif
1000 struct flowi6 fl6;
1001
1002 if (skb->protocol == htons(ETH_P_IP)) {
1003
1004
1005
1006
1007 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1008
1009 if (!newsk)
1010 return NULL;
1011
1012 newtcp6sk = (struct tcp6_sock *)newsk;
1013 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1014
1015 newinet = inet_sk(newsk);
1016 newnp = inet6_sk(newsk);
1017 newtp = tcp_sk(newsk);
1018
1019 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1020
1021 newnp->saddr = newsk->sk_v6_rcv_saddr;
1022
1023 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1024 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1025#ifdef CONFIG_TCP_MD5SIG
1026 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1027#endif
1028
1029 newnp->ipv6_ac_list = NULL;
1030 newnp->ipv6_fl_list = NULL;
1031 newnp->pktoptions = NULL;
1032 newnp->opt = NULL;
1033 newnp->mcast_oif = tcp_v6_iif(skb);
1034 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1035 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1036 if (np->repflow)
1037 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1050
1051 return newsk;
1052 }
1053
1054 ireq = inet_rsk(req);
1055
1056 if (sk_acceptq_is_full(sk))
1057 goto out_overflow;
1058
1059 if (!dst) {
1060 dst = inet6_csk_route_req(sk, &fl6, req);
1061 if (!dst)
1062 goto out;
1063 }
1064
1065 newsk = tcp_create_openreq_child(sk, req, skb);
1066 if (!newsk)
1067 goto out_nonewsk;
1068
1069
1070
1071
1072
1073
1074
1075 newsk->sk_gso_type = SKB_GSO_TCPV6;
1076 __ip6_dst_store(newsk, dst, NULL, NULL);
1077 inet6_sk_rx_dst_set(newsk, skb);
1078
1079 newtcp6sk = (struct tcp6_sock *)newsk;
1080 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1081
1082 newtp = tcp_sk(newsk);
1083 newinet = inet_sk(newsk);
1084 newnp = inet6_sk(newsk);
1085
1086 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1087
1088 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1089 newnp->saddr = ireq->ir_v6_loc_addr;
1090 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1091 newsk->sk_bound_dev_if = ireq->ir_iif;
1092
1093 ip6_set_txhash(newsk);
1094
1095
1096
1097
1098
1099 newinet->inet_opt = NULL;
1100 newnp->ipv6_ac_list = NULL;
1101 newnp->ipv6_fl_list = NULL;
1102
1103
1104 newnp->rxopt.all = np->rxopt.all;
1105
1106
1107 newnp->pktoptions = NULL;
1108 if (ireq->pktopts) {
1109 newnp->pktoptions = skb_clone(ireq->pktopts,
1110 sk_gfp_atomic(sk, GFP_ATOMIC));
1111 consume_skb(ireq->pktopts);
1112 ireq->pktopts = NULL;
1113 if (newnp->pktoptions)
1114 skb_set_owner_r(newnp->pktoptions, newsk);
1115 }
1116 newnp->opt = NULL;
1117 newnp->mcast_oif = tcp_v6_iif(skb);
1118 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1119 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1120 if (np->repflow)
1121 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1122
1123
1124
1125
1126
1127
1128
1129 if (np->opt)
1130 newnp->opt = ipv6_dup_options(newsk, np->opt);
1131
1132 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1133 if (newnp->opt)
1134 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1135 newnp->opt->opt_flen);
1136
1137 tcp_ca_openreq_child(newsk, dst);
1138
1139 tcp_sync_mss(newsk, dst_mtu(dst));
1140 newtp->advmss = dst_metric_advmss(dst);
1141 if (tcp_sk(sk)->rx_opt.user_mss &&
1142 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1143 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1144
1145 tcp_initialize_rcv_mss(newsk);
1146
1147 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1148 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1149
1150#ifdef CONFIG_TCP_MD5SIG
1151
1152 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1153 if (key) {
1154
1155
1156
1157
1158
1159 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1160 AF_INET6, key->key, key->keylen,
1161 sk_gfp_atomic(sk, GFP_ATOMIC));
1162 }
1163#endif
1164
1165 if (__inet_inherit_port(sk, newsk) < 0) {
1166 inet_csk_prepare_forced_close(newsk);
1167 tcp_done(newsk);
1168 goto out;
1169 }
1170 __inet_hash(newsk, NULL);
1171
1172 return newsk;
1173
1174out_overflow:
1175 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1176out_nonewsk:
1177 dst_release(dst);
1178out:
1179 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1180 return NULL;
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1192{
1193 struct ipv6_pinfo *np = inet6_sk(sk);
1194 struct tcp_sock *tp;
1195 struct sk_buff *opt_skb = NULL;
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 if (skb->protocol == htons(ETH_P_IP))
1206 return tcp_v4_do_rcv(sk, skb);
1207
1208 if (sk_filter(sk, skb))
1209 goto discard;
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 if (np->rxopt.all)
1230 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1231
1232 if (sk->sk_state == TCP_ESTABLISHED) {
1233 struct dst_entry *dst = sk->sk_rx_dst;
1234
1235 sock_rps_save_rxhash(sk, skb);
1236 sk_mark_napi_id(sk, skb);
1237 if (dst) {
1238 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1239 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1240 dst_release(dst);
1241 sk->sk_rx_dst = NULL;
1242 }
1243 }
1244
1245 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1246 if (opt_skb)
1247 goto ipv6_pktoptions;
1248 return 0;
1249 }
1250
1251 if (tcp_checksum_complete(skb))
1252 goto csum_err;
1253
1254 if (sk->sk_state == TCP_LISTEN) {
1255 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1256 if (!nsk)
1257 goto discard;
1258
1259
1260
1261
1262
1263
1264 if (nsk != sk) {
1265 sock_rps_save_rxhash(nsk, skb);
1266 sk_mark_napi_id(sk, skb);
1267 if (tcp_child_process(sk, nsk, skb))
1268 goto reset;
1269 if (opt_skb)
1270 __kfree_skb(opt_skb);
1271 return 0;
1272 }
1273 } else
1274 sock_rps_save_rxhash(sk, skb);
1275
1276 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1277 goto reset;
1278 if (opt_skb)
1279 goto ipv6_pktoptions;
1280 return 0;
1281
1282reset:
1283 tcp_v6_send_reset(sk, skb);
1284discard:
1285 if (opt_skb)
1286 __kfree_skb(opt_skb);
1287 kfree_skb(skb);
1288 return 0;
1289csum_err:
1290 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1291 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1292 goto discard;
1293
1294
1295ipv6_pktoptions:
1296
1297
1298
1299
1300
1301
1302
1303 tp = tcp_sk(sk);
1304 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1305 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1306 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1307 np->mcast_oif = tcp_v6_iif(opt_skb);
1308 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1309 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1310 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1311 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1312 if (np->repflow)
1313 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1314 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1315 skb_set_owner_r(opt_skb, sk);
1316 opt_skb = xchg(&np->pktoptions, opt_skb);
1317 } else {
1318 __kfree_skb(opt_skb);
1319 opt_skb = xchg(&np->pktoptions, NULL);
1320 }
1321 }
1322
1323 kfree_skb(opt_skb);
1324 return 0;
1325}
1326
1327static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1328 const struct tcphdr *th)
1329{
1330
1331
1332
1333
1334
1335 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1336 sizeof(struct inet6_skb_parm));
1337 barrier();
1338
1339 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1340 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1341 skb->len - th->doff*4);
1342 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1343 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1344 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1345 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1346 TCP_SKB_CB(skb)->sacked = 0;
1347}
1348
1349static void tcp_v6_restore_cb(struct sk_buff *skb)
1350{
1351
1352
1353
1354 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1355 sizeof(struct inet6_skb_parm));
1356}
1357
1358static int tcp_v6_rcv(struct sk_buff *skb)
1359{
1360 const struct tcphdr *th;
1361 const struct ipv6hdr *hdr;
1362 struct sock *sk;
1363 int ret;
1364 struct net *net = dev_net(skb->dev);
1365
1366 if (skb->pkt_type != PACKET_HOST)
1367 goto discard_it;
1368
1369
1370
1371
1372 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1373
1374 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1375 goto discard_it;
1376
1377 th = tcp_hdr(skb);
1378
1379 if (th->doff < sizeof(struct tcphdr)/4)
1380 goto bad_packet;
1381 if (!pskb_may_pull(skb, th->doff*4))
1382 goto discard_it;
1383
1384 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1385 goto csum_error;
1386
1387 th = tcp_hdr(skb);
1388 hdr = ipv6_hdr(skb);
1389
1390 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1391 inet6_iif(skb));
1392 if (!sk)
1393 goto no_tcp_socket;
1394
1395process:
1396 if (sk->sk_state == TCP_TIME_WAIT)
1397 goto do_time_wait;
1398
1399 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1400 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1401 goto discard_and_relse;
1402 }
1403
1404 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1405 goto discard_and_relse;
1406
1407 tcp_v6_fill_cb(skb, hdr, th);
1408
1409#ifdef CONFIG_TCP_MD5SIG
1410 if (tcp_v6_inbound_md5_hash(sk, skb))
1411 goto discard_and_relse;
1412#endif
1413
1414 if (sk_filter(sk, skb))
1415 goto discard_and_relse;
1416
1417 sk_incoming_cpu_update(sk);
1418 skb->dev = NULL;
1419
1420 bh_lock_sock_nested(sk);
1421 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1422 ret = 0;
1423 if (!sock_owned_by_user(sk)) {
1424 if (!tcp_prequeue(sk, skb))
1425 ret = tcp_v6_do_rcv(sk, skb);
1426 } else if (unlikely(sk_add_backlog(sk, skb,
1427 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1428 bh_unlock_sock(sk);
1429 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1430 goto discard_and_relse;
1431 }
1432 bh_unlock_sock(sk);
1433
1434 sock_put(sk);
1435 return ret ? -1 : 0;
1436
1437no_tcp_socket:
1438 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1439 goto discard_it;
1440
1441 tcp_v6_fill_cb(skb, hdr, th);
1442
1443 if (tcp_checksum_complete(skb)) {
1444csum_error:
1445 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1446bad_packet:
1447 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1448 } else {
1449 tcp_v6_send_reset(NULL, skb);
1450 }
1451
1452discard_it:
1453 kfree_skb(skb);
1454 return 0;
1455
1456discard_and_relse:
1457 sock_put(sk);
1458 goto discard_it;
1459
1460do_time_wait:
1461 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1462 inet_twsk_put(inet_twsk(sk));
1463 goto discard_it;
1464 }
1465
1466 tcp_v6_fill_cb(skb, hdr, th);
1467
1468 if (tcp_checksum_complete(skb)) {
1469 inet_twsk_put(inet_twsk(sk));
1470 goto csum_error;
1471 }
1472
1473 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1474 case TCP_TW_SYN:
1475 {
1476 struct sock *sk2;
1477
1478 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1479 &ipv6_hdr(skb)->saddr, th->source,
1480 &ipv6_hdr(skb)->daddr,
1481 ntohs(th->dest), tcp_v6_iif(skb));
1482 if (sk2) {
1483 struct inet_timewait_sock *tw = inet_twsk(sk);
1484 inet_twsk_deschedule(tw);
1485 inet_twsk_put(tw);
1486 sk = sk2;
1487 tcp_v6_restore_cb(skb);
1488 goto process;
1489 }
1490
1491 }
1492 case TCP_TW_ACK:
1493 tcp_v6_timewait_ack(sk, skb);
1494 break;
1495 case TCP_TW_RST:
1496 tcp_v6_restore_cb(skb);
1497 goto no_tcp_socket;
1498 case TCP_TW_SUCCESS:
1499 ;
1500 }
1501 goto discard_it;
1502}
1503
1504static void tcp_v6_early_demux(struct sk_buff *skb)
1505{
1506 const struct ipv6hdr *hdr;
1507 const struct tcphdr *th;
1508 struct sock *sk;
1509
1510 if (skb->pkt_type != PACKET_HOST)
1511 return;
1512
1513 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1514 return;
1515
1516 hdr = ipv6_hdr(skb);
1517 th = tcp_hdr(skb);
1518
1519 if (th->doff < sizeof(struct tcphdr) / 4)
1520 return;
1521
1522
1523 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1524 &hdr->saddr, th->source,
1525 &hdr->daddr, ntohs(th->dest),
1526 inet6_iif(skb));
1527 if (sk) {
1528 skb->sk = sk;
1529 skb->destructor = sock_edemux;
1530 if (sk_fullsock(sk)) {
1531 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1532
1533 if (dst)
1534 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1535 if (dst &&
1536 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1537 skb_dst_set_noref(skb, dst);
1538 }
1539 }
1540}
1541
1542static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1543 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1544 .twsk_unique = tcp_twsk_unique,
1545 .twsk_destructor = tcp_twsk_destructor,
1546};
1547
1548static const struct inet_connection_sock_af_ops ipv6_specific = {
1549 .queue_xmit = inet6_csk_xmit,
1550 .send_check = tcp_v6_send_check,
1551 .rebuild_header = inet6_sk_rebuild_header,
1552 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1553 .conn_request = tcp_v6_conn_request,
1554 .syn_recv_sock = tcp_v6_syn_recv_sock,
1555 .net_header_len = sizeof(struct ipv6hdr),
1556 .net_frag_header_len = sizeof(struct frag_hdr),
1557 .setsockopt = ipv6_setsockopt,
1558 .getsockopt = ipv6_getsockopt,
1559 .addr2sockaddr = inet6_csk_addr2sockaddr,
1560 .sockaddr_len = sizeof(struct sockaddr_in6),
1561 .bind_conflict = inet6_csk_bind_conflict,
1562#ifdef CONFIG_COMPAT
1563 .compat_setsockopt = compat_ipv6_setsockopt,
1564 .compat_getsockopt = compat_ipv6_getsockopt,
1565#endif
1566 .mtu_reduced = tcp_v6_mtu_reduced,
1567};
1568
1569#ifdef CONFIG_TCP_MD5SIG
1570static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1571 .md5_lookup = tcp_v6_md5_lookup,
1572 .calc_md5_hash = tcp_v6_md5_hash_skb,
1573 .md5_parse = tcp_v6_parse_md5_keys,
1574};
1575#endif
1576
1577
1578
1579
1580static const struct inet_connection_sock_af_ops ipv6_mapped = {
1581 .queue_xmit = ip_queue_xmit,
1582 .send_check = tcp_v4_send_check,
1583 .rebuild_header = inet_sk_rebuild_header,
1584 .sk_rx_dst_set = inet_sk_rx_dst_set,
1585 .conn_request = tcp_v6_conn_request,
1586 .syn_recv_sock = tcp_v6_syn_recv_sock,
1587 .net_header_len = sizeof(struct iphdr),
1588 .setsockopt = ipv6_setsockopt,
1589 .getsockopt = ipv6_getsockopt,
1590 .addr2sockaddr = inet6_csk_addr2sockaddr,
1591 .sockaddr_len = sizeof(struct sockaddr_in6),
1592 .bind_conflict = inet6_csk_bind_conflict,
1593#ifdef CONFIG_COMPAT
1594 .compat_setsockopt = compat_ipv6_setsockopt,
1595 .compat_getsockopt = compat_ipv6_getsockopt,
1596#endif
1597 .mtu_reduced = tcp_v4_mtu_reduced,
1598};
1599
1600#ifdef CONFIG_TCP_MD5SIG
1601static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1602 .md5_lookup = tcp_v4_md5_lookup,
1603 .calc_md5_hash = tcp_v4_md5_hash_skb,
1604 .md5_parse = tcp_v6_parse_md5_keys,
1605};
1606#endif
1607
1608
1609
1610
1611static int tcp_v6_init_sock(struct sock *sk)
1612{
1613 struct inet_connection_sock *icsk = inet_csk(sk);
1614
1615 tcp_init_sock(sk);
1616
1617 icsk->icsk_af_ops = &ipv6_specific;
1618
1619#ifdef CONFIG_TCP_MD5SIG
1620 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1621#endif
1622
1623 return 0;
1624}
1625
1626static void tcp_v6_destroy_sock(struct sock *sk)
1627{
1628 tcp_v4_destroy_sock(sk);
1629 inet6_destroy_sock(sk);
1630}
1631
1632#ifdef CONFIG_PROC_FS
1633
1634static void get_openreq6(struct seq_file *seq,
1635 struct request_sock *req, int i, kuid_t uid)
1636{
1637 long ttd = req->rsk_timer.expires - jiffies;
1638 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1639 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1640
1641 if (ttd < 0)
1642 ttd = 0;
1643
1644 seq_printf(seq,
1645 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1646 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1647 i,
1648 src->s6_addr32[0], src->s6_addr32[1],
1649 src->s6_addr32[2], src->s6_addr32[3],
1650 inet_rsk(req)->ir_num,
1651 dest->s6_addr32[0], dest->s6_addr32[1],
1652 dest->s6_addr32[2], dest->s6_addr32[3],
1653 ntohs(inet_rsk(req)->ir_rmt_port),
1654 TCP_SYN_RECV,
1655 0, 0,
1656 1,
1657 jiffies_to_clock_t(ttd),
1658 req->num_timeout,
1659 from_kuid_munged(seq_user_ns(seq), uid),
1660 0,
1661 0,
1662 0, req);
1663}
1664
1665static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1666{
1667 const struct in6_addr *dest, *src;
1668 __u16 destp, srcp;
1669 int timer_active;
1670 unsigned long timer_expires;
1671 const struct inet_sock *inet = inet_sk(sp);
1672 const struct tcp_sock *tp = tcp_sk(sp);
1673 const struct inet_connection_sock *icsk = inet_csk(sp);
1674 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1675
1676 dest = &sp->sk_v6_daddr;
1677 src = &sp->sk_v6_rcv_saddr;
1678 destp = ntohs(inet->inet_dport);
1679 srcp = ntohs(inet->inet_sport);
1680
1681 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1682 timer_active = 1;
1683 timer_expires = icsk->icsk_timeout;
1684 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1685 timer_active = 4;
1686 timer_expires = icsk->icsk_timeout;
1687 } else if (timer_pending(&sp->sk_timer)) {
1688 timer_active = 2;
1689 timer_expires = sp->sk_timer.expires;
1690 } else {
1691 timer_active = 0;
1692 timer_expires = jiffies;
1693 }
1694
1695 seq_printf(seq,
1696 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1697 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1698 i,
1699 src->s6_addr32[0], src->s6_addr32[1],
1700 src->s6_addr32[2], src->s6_addr32[3], srcp,
1701 dest->s6_addr32[0], dest->s6_addr32[1],
1702 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1703 sp->sk_state,
1704 tp->write_seq-tp->snd_una,
1705 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1706 timer_active,
1707 jiffies_delta_to_clock_t(timer_expires - jiffies),
1708 icsk->icsk_retransmits,
1709 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1710 icsk->icsk_probes_out,
1711 sock_i_ino(sp),
1712 atomic_read(&sp->sk_refcnt), sp,
1713 jiffies_to_clock_t(icsk->icsk_rto),
1714 jiffies_to_clock_t(icsk->icsk_ack.ato),
1715 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1716 tp->snd_cwnd,
1717 sp->sk_state == TCP_LISTEN ?
1718 (fastopenq ? fastopenq->max_qlen : 0) :
1719 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1720 );
1721}
1722
1723static void get_timewait6_sock(struct seq_file *seq,
1724 struct inet_timewait_sock *tw, int i)
1725{
1726 long delta = tw->tw_timer.expires - jiffies;
1727 const struct in6_addr *dest, *src;
1728 __u16 destp, srcp;
1729
1730 dest = &tw->tw_v6_daddr;
1731 src = &tw->tw_v6_rcv_saddr;
1732 destp = ntohs(tw->tw_dport);
1733 srcp = ntohs(tw->tw_sport);
1734
1735 seq_printf(seq,
1736 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1737 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1738 i,
1739 src->s6_addr32[0], src->s6_addr32[1],
1740 src->s6_addr32[2], src->s6_addr32[3], srcp,
1741 dest->s6_addr32[0], dest->s6_addr32[1],
1742 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1743 tw->tw_substate, 0, 0,
1744 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1745 atomic_read(&tw->tw_refcnt), tw);
1746}
1747
1748static int tcp6_seq_show(struct seq_file *seq, void *v)
1749{
1750 struct tcp_iter_state *st;
1751 struct sock *sk = v;
1752
1753 if (v == SEQ_START_TOKEN) {
1754 seq_puts(seq,
1755 " sl "
1756 "local_address "
1757 "remote_address "
1758 "st tx_queue rx_queue tr tm->when retrnsmt"
1759 " uid timeout inode\n");
1760 goto out;
1761 }
1762 st = seq->private;
1763
1764 switch (st->state) {
1765 case TCP_SEQ_STATE_LISTENING:
1766 case TCP_SEQ_STATE_ESTABLISHED:
1767 if (sk->sk_state == TCP_TIME_WAIT)
1768 get_timewait6_sock(seq, v, st->num);
1769 else
1770 get_tcp6_sock(seq, v, st->num);
1771 break;
1772 case TCP_SEQ_STATE_OPENREQ:
1773 get_openreq6(seq, v, st->num, st->uid);
1774 break;
1775 }
1776out:
1777 return 0;
1778}
1779
1780static const struct file_operations tcp6_afinfo_seq_fops = {
1781 .owner = THIS_MODULE,
1782 .open = tcp_seq_open,
1783 .read = seq_read,
1784 .llseek = seq_lseek,
1785 .release = seq_release_net
1786};
1787
1788static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1789 .name = "tcp6",
1790 .family = AF_INET6,
1791 .seq_fops = &tcp6_afinfo_seq_fops,
1792 .seq_ops = {
1793 .show = tcp6_seq_show,
1794 },
1795};
1796
1797int __net_init tcp6_proc_init(struct net *net)
1798{
1799 return tcp_proc_register(net, &tcp6_seq_afinfo);
1800}
1801
1802void tcp6_proc_exit(struct net *net)
1803{
1804 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1805}
1806#endif
1807
1808static void tcp_v6_clear_sk(struct sock *sk, int size)
1809{
1810 struct inet_sock *inet = inet_sk(sk);
1811
1812
1813 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1814
1815 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1816 memset(&inet->pinet6 + 1, 0, size);
1817}
1818
1819struct proto tcpv6_prot = {
1820 .name = "TCPv6",
1821 .owner = THIS_MODULE,
1822 .close = tcp_close,
1823 .connect = tcp_v6_connect,
1824 .disconnect = tcp_disconnect,
1825 .accept = inet_csk_accept,
1826 .ioctl = tcp_ioctl,
1827 .init = tcp_v6_init_sock,
1828 .destroy = tcp_v6_destroy_sock,
1829 .shutdown = tcp_shutdown,
1830 .setsockopt = tcp_setsockopt,
1831 .getsockopt = tcp_getsockopt,
1832 .recvmsg = tcp_recvmsg,
1833 .sendmsg = tcp_sendmsg,
1834 .sendpage = tcp_sendpage,
1835 .backlog_rcv = tcp_v6_do_rcv,
1836 .release_cb = tcp_release_cb,
1837 .hash = inet_hash,
1838 .unhash = inet_unhash,
1839 .get_port = inet_csk_get_port,
1840 .enter_memory_pressure = tcp_enter_memory_pressure,
1841 .stream_memory_free = tcp_stream_memory_free,
1842 .sockets_allocated = &tcp_sockets_allocated,
1843 .memory_allocated = &tcp_memory_allocated,
1844 .memory_pressure = &tcp_memory_pressure,
1845 .orphan_count = &tcp_orphan_count,
1846 .sysctl_mem = sysctl_tcp_mem,
1847 .sysctl_wmem = sysctl_tcp_wmem,
1848 .sysctl_rmem = sysctl_tcp_rmem,
1849 .max_header = MAX_TCP_HEADER,
1850 .obj_size = sizeof(struct tcp6_sock),
1851 .slab_flags = SLAB_DESTROY_BY_RCU,
1852 .twsk_prot = &tcp6_timewait_sock_ops,
1853 .rsk_prot = &tcp6_request_sock_ops,
1854 .h.hashinfo = &tcp_hashinfo,
1855 .no_autobind = true,
1856#ifdef CONFIG_COMPAT
1857 .compat_setsockopt = compat_tcp_setsockopt,
1858 .compat_getsockopt = compat_tcp_getsockopt,
1859#endif
1860#ifdef CONFIG_MEMCG_KMEM
1861 .proto_cgroup = tcp_proto_cgroup,
1862#endif
1863 .clear_sk = tcp_v6_clear_sk,
1864};
1865
1866static const struct inet6_protocol tcpv6_protocol = {
1867 .early_demux = tcp_v6_early_demux,
1868 .handler = tcp_v6_rcv,
1869 .err_handler = tcp_v6_err,
1870 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1871};
1872
1873static struct inet_protosw tcpv6_protosw = {
1874 .type = SOCK_STREAM,
1875 .protocol = IPPROTO_TCP,
1876 .prot = &tcpv6_prot,
1877 .ops = &inet6_stream_ops,
1878 .flags = INET_PROTOSW_PERMANENT |
1879 INET_PROTOSW_ICSK,
1880};
1881
1882static int __net_init tcpv6_net_init(struct net *net)
1883{
1884 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1885 SOCK_RAW, IPPROTO_TCP, net);
1886}
1887
1888static void __net_exit tcpv6_net_exit(struct net *net)
1889{
1890 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1891}
1892
1893static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1894{
1895 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1896}
1897
1898static struct pernet_operations tcpv6_net_ops = {
1899 .init = tcpv6_net_init,
1900 .exit = tcpv6_net_exit,
1901 .exit_batch = tcpv6_net_exit_batch,
1902};
1903
1904int __init tcpv6_init(void)
1905{
1906 int ret;
1907
1908 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1909 if (ret)
1910 goto out;
1911
1912
1913 ret = inet6_register_protosw(&tcpv6_protosw);
1914 if (ret)
1915 goto out_tcpv6_protocol;
1916
1917 ret = register_pernet_subsys(&tcpv6_net_ops);
1918 if (ret)
1919 goto out_tcpv6_protosw;
1920out:
1921 return ret;
1922
1923out_tcpv6_protosw:
1924 inet6_unregister_protosw(&tcpv6_protosw);
1925out_tcpv6_protocol:
1926 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1927 goto out;
1928}
1929
1930void tcpv6_exit(void)
1931{
1932 unregister_pernet_subsys(&tcpv6_net_ops);
1933 inet6_unregister_protosw(&tcpv6_protosw);
1934 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1935}
1936