1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/jhash.h>
18
19#include <net/inet_connection_sock.h>
20#include <net/inet_hashtables.h>
21#include <net/inet_timewait_sock.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include <net/tcp_states.h>
25#include <net/xfrm.h>
26#include <net/tcp.h>
27#include <net/sock_reuseport.h>
28#include <net/addrconf.h>
29
30#if IS_ENABLED(CONFIG_IPV6)
31
32
33
34
35
36
37static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
38 const struct in6_addr *sk2_rcv_saddr6,
39 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
40 bool sk1_ipv6only, bool sk2_ipv6only,
41 bool match_wildcard)
42{
43 int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
44 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
45
46
47 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
48 if (!sk2_ipv6only) {
49 if (sk1_rcv_saddr == sk2_rcv_saddr)
50 return true;
51 if (!sk1_rcv_saddr || !sk2_rcv_saddr)
52 return match_wildcard;
53 }
54 return false;
55 }
56
57 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
58 return true;
59
60 if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
61 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
62 return true;
63
64 if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
65 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
66 return true;
67
68 if (sk2_rcv_saddr6 &&
69 ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
70 return true;
71
72 return false;
73}
74#endif
75
76
77
78
79
80static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
81 bool sk2_ipv6only, bool match_wildcard)
82{
83 if (!sk2_ipv6only) {
84 if (sk1_rcv_saddr == sk2_rcv_saddr)
85 return true;
86 if (!sk1_rcv_saddr || !sk2_rcv_saddr)
87 return match_wildcard;
88 }
89 return false;
90}
91
92bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
93 bool match_wildcard)
94{
95#if IS_ENABLED(CONFIG_IPV6)
96 if (sk->sk_family == AF_INET6)
97 return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
98 inet6_rcv_saddr(sk2),
99 sk->sk_rcv_saddr,
100 sk2->sk_rcv_saddr,
101 ipv6_only_sock(sk),
102 ipv6_only_sock(sk2),
103 match_wildcard);
104#endif
105 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
106 ipv6_only_sock(sk2), match_wildcard);
107}
108EXPORT_SYMBOL(inet_rcv_saddr_equal);
109
110bool inet_rcv_saddr_any(const struct sock *sk)
111{
112#if IS_ENABLED(CONFIG_IPV6)
113 if (sk->sk_family == AF_INET6)
114 return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
115#endif
116 return !sk->sk_rcv_saddr;
117}
118
119void inet_get_local_port_range(struct net *net, int *low, int *high)
120{
121 unsigned int seq;
122
123 do {
124 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
125
126 *low = net->ipv4.ip_local_ports.range[0];
127 *high = net->ipv4.ip_local_ports.range[1];
128 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
129}
130EXPORT_SYMBOL(inet_get_local_port_range);
131
132static int inet_csk_bind_conflict(const struct sock *sk,
133 const struct inet_bind_bucket *tb,
134 bool relax, bool reuseport_ok)
135{
136 struct sock *sk2;
137 bool reuse = sk->sk_reuse;
138 bool reuseport = !!sk->sk_reuseport && reuseport_ok;
139 kuid_t uid = sock_i_uid((struct sock *)sk);
140
141
142
143
144
145
146
147
148 sk_for_each_bound(sk2, &tb->owners) {
149 if (sk != sk2 &&
150 (!sk->sk_bound_dev_if ||
151 !sk2->sk_bound_dev_if ||
152 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
153 if ((!reuse || !sk2->sk_reuse ||
154 sk2->sk_state == TCP_LISTEN) &&
155 (!reuseport || !sk2->sk_reuseport ||
156 rcu_access_pointer(sk->sk_reuseport_cb) ||
157 (sk2->sk_state != TCP_TIME_WAIT &&
158 !uid_eq(uid, sock_i_uid(sk2))))) {
159 if (inet_rcv_saddr_equal(sk, sk2, true))
160 break;
161 }
162 if (!relax && reuse && sk2->sk_reuse &&
163 sk2->sk_state != TCP_LISTEN) {
164 if (inet_rcv_saddr_equal(sk, sk2, true))
165 break;
166 }
167 }
168 }
169 return sk2 != NULL;
170}
171
172
173
174
175
176static struct inet_bind_hashbucket *
177inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
178{
179 struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
180 int port = 0;
181 struct inet_bind_hashbucket *head;
182 struct net *net = sock_net(sk);
183 int i, low, high, attempt_half;
184 struct inet_bind_bucket *tb;
185 u32 remaining, offset;
186
187 attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
188other_half_scan:
189 inet_get_local_port_range(net, &low, &high);
190 high++;
191 if (high - low < 4)
192 attempt_half = 0;
193 if (attempt_half) {
194 int half = low + (((high - low) >> 2) << 1);
195
196 if (attempt_half == 1)
197 high = half;
198 else
199 low = half;
200 }
201 remaining = high - low;
202 if (likely(remaining > 1))
203 remaining &= ~1U;
204
205 offset = prandom_u32() % remaining;
206
207
208
209 offset |= 1U;
210
211other_parity_scan:
212 port = low + offset;
213 for (i = 0; i < remaining; i += 2, port += 2) {
214 if (unlikely(port >= high))
215 port -= remaining;
216 if (inet_is_local_reserved_port(net, port))
217 continue;
218 head = &hinfo->bhash[inet_bhashfn(net, port,
219 hinfo->bhash_size)];
220 spin_lock_bh(&head->lock);
221 inet_bind_bucket_for_each(tb, &head->chain)
222 if (net_eq(ib_net(tb), net) && tb->port == port) {
223 if (!inet_csk_bind_conflict(sk, tb, false, false))
224 goto success;
225 goto next_port;
226 }
227 tb = NULL;
228 goto success;
229next_port:
230 spin_unlock_bh(&head->lock);
231 cond_resched();
232 }
233
234 offset--;
235 if (!(offset & 1))
236 goto other_parity_scan;
237
238 if (attempt_half == 1) {
239
240 attempt_half = 2;
241 goto other_half_scan;
242 }
243 return NULL;
244success:
245 *port_ret = port;
246 *tb_ret = tb;
247 return head;
248}
249
250static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
251 struct sock *sk)
252{
253 kuid_t uid = sock_i_uid(sk);
254
255 if (tb->fastreuseport <= 0)
256 return 0;
257 if (!sk->sk_reuseport)
258 return 0;
259 if (rcu_access_pointer(sk->sk_reuseport_cb))
260 return 0;
261 if (!uid_eq(tb->fastuid, uid))
262 return 0;
263
264
265
266
267
268 if (tb->fastreuseport == FASTREUSEPORT_ANY)
269 return 1;
270#if IS_ENABLED(CONFIG_IPV6)
271 if (tb->fast_sk_family == AF_INET6)
272 return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
273 inet6_rcv_saddr(sk),
274 tb->fast_rcv_saddr,
275 sk->sk_rcv_saddr,
276 tb->fast_ipv6_only,
277 ipv6_only_sock(sk), true);
278#endif
279 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
280 ipv6_only_sock(sk), true);
281}
282
283
284
285
286
287int inet_csk_get_port(struct sock *sk, unsigned short snum)
288{
289 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
290 struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
291 int ret = 1, port = snum;
292 struct inet_bind_hashbucket *head;
293 struct net *net = sock_net(sk);
294 struct inet_bind_bucket *tb = NULL;
295 kuid_t uid = sock_i_uid(sk);
296
297 if (!port) {
298 head = inet_csk_find_open_port(sk, &tb, &port);
299 if (!head)
300 return ret;
301 if (!tb)
302 goto tb_not_found;
303 goto success;
304 }
305 head = &hinfo->bhash[inet_bhashfn(net, port,
306 hinfo->bhash_size)];
307 spin_lock_bh(&head->lock);
308 inet_bind_bucket_for_each(tb, &head->chain)
309 if (net_eq(ib_net(tb), net) && tb->port == port)
310 goto tb_found;
311tb_not_found:
312 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
313 net, head, port);
314 if (!tb)
315 goto fail_unlock;
316tb_found:
317 if (!hlist_empty(&tb->owners)) {
318 if (sk->sk_reuse == SK_FORCE_REUSE)
319 goto success;
320
321 if ((tb->fastreuse > 0 && reuse) ||
322 sk_reuseport_match(tb, sk))
323 goto success;
324 if (inet_csk_bind_conflict(sk, tb, true, true))
325 goto fail_unlock;
326 }
327success:
328 if (hlist_empty(&tb->owners)) {
329 tb->fastreuse = reuse;
330 if (sk->sk_reuseport) {
331 tb->fastreuseport = FASTREUSEPORT_ANY;
332 tb->fastuid = uid;
333 tb->fast_rcv_saddr = sk->sk_rcv_saddr;
334 tb->fast_ipv6_only = ipv6_only_sock(sk);
335 tb->fast_sk_family = sk->sk_family;
336#if IS_ENABLED(CONFIG_IPV6)
337 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
338#endif
339 } else {
340 tb->fastreuseport = 0;
341 }
342 } else {
343 if (!reuse)
344 tb->fastreuse = 0;
345 if (sk->sk_reuseport) {
346
347
348
349
350
351
352
353
354
355
356
357 if (!sk_reuseport_match(tb, sk)) {
358 tb->fastreuseport = FASTREUSEPORT_STRICT;
359 tb->fastuid = uid;
360 tb->fast_rcv_saddr = sk->sk_rcv_saddr;
361 tb->fast_ipv6_only = ipv6_only_sock(sk);
362 tb->fast_sk_family = sk->sk_family;
363#if IS_ENABLED(CONFIG_IPV6)
364 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
365#endif
366 }
367 } else {
368 tb->fastreuseport = 0;
369 }
370 }
371 if (!inet_csk(sk)->icsk_bind_hash)
372 inet_bind_hash(sk, tb, port);
373 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
374 ret = 0;
375
376fail_unlock:
377 spin_unlock_bh(&head->lock);
378 return ret;
379}
380EXPORT_SYMBOL_GPL(inet_csk_get_port);
381
382
383
384
385
386static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
387{
388 struct inet_connection_sock *icsk = inet_csk(sk);
389 DEFINE_WAIT(wait);
390 int err;
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406 for (;;) {
407 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
408 TASK_INTERRUPTIBLE);
409 release_sock(sk);
410 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
411 timeo = schedule_timeout(timeo);
412 sched_annotate_sleep();
413 lock_sock(sk);
414 err = 0;
415 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
416 break;
417 err = -EINVAL;
418 if (sk->sk_state != TCP_LISTEN)
419 break;
420 err = sock_intr_errno(timeo);
421 if (signal_pending(current))
422 break;
423 err = -EAGAIN;
424 if (!timeo)
425 break;
426 }
427 finish_wait(sk_sleep(sk), &wait);
428 return err;
429}
430
431
432
433
434struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
435{
436 struct inet_connection_sock *icsk = inet_csk(sk);
437 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
438 struct request_sock *req;
439 struct sock *newsk;
440 int error;
441
442 lock_sock(sk);
443
444
445
446
447 error = -EINVAL;
448 if (sk->sk_state != TCP_LISTEN)
449 goto out_err;
450
451
452 if (reqsk_queue_empty(queue)) {
453 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
454
455
456 error = -EAGAIN;
457 if (!timeo)
458 goto out_err;
459
460 error = inet_csk_wait_for_connect(sk, timeo);
461 if (error)
462 goto out_err;
463 }
464 req = reqsk_queue_remove(queue, sk);
465 newsk = req->sk;
466
467 if (sk->sk_protocol == IPPROTO_TCP &&
468 tcp_rsk(req)->tfo_listener) {
469 spin_lock_bh(&queue->fastopenq.lock);
470 if (tcp_rsk(req)->tfo_listener) {
471
472
473
474
475
476
477 req->sk = NULL;
478 req = NULL;
479 }
480 spin_unlock_bh(&queue->fastopenq.lock);
481 }
482out:
483 release_sock(sk);
484 if (req)
485 reqsk_put(req);
486 return newsk;
487out_err:
488 newsk = NULL;
489 req = NULL;
490 *err = error;
491 goto out;
492}
493EXPORT_SYMBOL(inet_csk_accept);
494
495
496
497
498
499
500void inet_csk_init_xmit_timers(struct sock *sk,
501 void (*retransmit_handler)(struct timer_list *t),
502 void (*delack_handler)(struct timer_list *t),
503 void (*keepalive_handler)(struct timer_list *t))
504{
505 struct inet_connection_sock *icsk = inet_csk(sk);
506
507 timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
508 timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
509 timer_setup(&sk->sk_timer, keepalive_handler, 0);
510 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
511}
512EXPORT_SYMBOL(inet_csk_init_xmit_timers);
513
514void inet_csk_clear_xmit_timers(struct sock *sk)
515{
516 struct inet_connection_sock *icsk = inet_csk(sk);
517
518 icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
519
520 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
521 sk_stop_timer(sk, &icsk->icsk_delack_timer);
522 sk_stop_timer(sk, &sk->sk_timer);
523}
524EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
525
526void inet_csk_delete_keepalive_timer(struct sock *sk)
527{
528 sk_stop_timer(sk, &sk->sk_timer);
529}
530EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
531
532void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
533{
534 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
535}
536EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
537
538struct dst_entry *inet_csk_route_req(const struct sock *sk,
539 struct flowi4 *fl4,
540 const struct request_sock *req)
541{
542 const struct inet_request_sock *ireq = inet_rsk(req);
543 struct net *net = read_pnet(&ireq->ireq_net);
544 struct ip_options_rcu *opt;
545 struct rtable *rt;
546
547 rcu_read_lock();
548 opt = rcu_dereference(ireq->ireq_opt);
549
550 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
551 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
552 sk->sk_protocol, inet_sk_flowi_flags(sk),
553 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
554 ireq->ir_loc_addr, ireq->ir_rmt_port,
555 htons(ireq->ir_num), sk->sk_uid);
556 security_req_classify_flow(req, flowi4_to_flowi(fl4));
557 rt = ip_route_output_flow(net, fl4, sk);
558 if (IS_ERR(rt))
559 goto no_route;
560 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
561 goto route_err;
562 rcu_read_unlock();
563 return &rt->dst;
564
565route_err:
566 ip_rt_put(rt);
567no_route:
568 rcu_read_unlock();
569 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
570 return NULL;
571}
572EXPORT_SYMBOL_GPL(inet_csk_route_req);
573
574struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
575 struct sock *newsk,
576 const struct request_sock *req)
577{
578 const struct inet_request_sock *ireq = inet_rsk(req);
579 struct net *net = read_pnet(&ireq->ireq_net);
580 struct inet_sock *newinet = inet_sk(newsk);
581 struct ip_options_rcu *opt;
582 struct flowi4 *fl4;
583 struct rtable *rt;
584
585 opt = rcu_dereference(ireq->ireq_opt);
586 fl4 = &newinet->cork.fl.u.ip4;
587
588 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
589 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
590 sk->sk_protocol, inet_sk_flowi_flags(sk),
591 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
592 ireq->ir_loc_addr, ireq->ir_rmt_port,
593 htons(ireq->ir_num), sk->sk_uid);
594 security_req_classify_flow(req, flowi4_to_flowi(fl4));
595 rt = ip_route_output_flow(net, fl4, sk);
596 if (IS_ERR(rt))
597 goto no_route;
598 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
599 goto route_err;
600 return &rt->dst;
601
602route_err:
603 ip_rt_put(rt);
604no_route:
605 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
606 return NULL;
607}
608EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
609
610#if IS_ENABLED(CONFIG_IPV6)
611#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
612#else
613#define AF_INET_FAMILY(fam) true
614#endif
615
616
617static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
618 const int max_retries,
619 const u8 rskq_defer_accept,
620 int *expire, int *resend)
621{
622 if (!rskq_defer_accept) {
623 *expire = req->num_timeout >= thresh;
624 *resend = 1;
625 return;
626 }
627 *expire = req->num_timeout >= thresh &&
628 (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
629
630
631
632
633
634 *resend = !inet_rsk(req)->acked ||
635 req->num_timeout >= rskq_defer_accept - 1;
636}
637
638int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
639{
640 int err = req->rsk_ops->rtx_syn_ack(parent, req);
641
642 if (!err)
643 req->num_retrans++;
644 return err;
645}
646EXPORT_SYMBOL(inet_rtx_syn_ack);
647
648
649static bool reqsk_queue_unlink(struct request_sock_queue *queue,
650 struct request_sock *req)
651{
652 struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
653 bool found = false;
654
655 if (sk_hashed(req_to_sk(req))) {
656 spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
657
658 spin_lock(lock);
659 found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
660 spin_unlock(lock);
661 }
662 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
663 reqsk_put(req);
664 return found;
665}
666
667void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
668{
669 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
670 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
671 reqsk_put(req);
672 }
673}
674EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
675
676void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
677{
678 inet_csk_reqsk_queue_drop(sk, req);
679 reqsk_put(req);
680}
681EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
682
683static void reqsk_timer_handler(struct timer_list *t)
684{
685 struct request_sock *req = from_timer(req, t, rsk_timer);
686 struct sock *sk_listener = req->rsk_listener;
687 struct net *net = sock_net(sk_listener);
688 struct inet_connection_sock *icsk = inet_csk(sk_listener);
689 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
690 int qlen, expire = 0, resend = 0;
691 int max_retries, thresh;
692 u8 defer_accept;
693
694 if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
695 goto drop;
696
697 max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
698 thresh = max_retries;
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716 qlen = reqsk_queue_len(queue);
717 if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
718 int young = reqsk_queue_len_young(queue) << 1;
719
720 while (thresh > 2) {
721 if (qlen < young)
722 break;
723 thresh--;
724 young <<= 1;
725 }
726 }
727 defer_accept = READ_ONCE(queue->rskq_defer_accept);
728 if (defer_accept)
729 max_retries = defer_accept;
730 syn_ack_recalc(req, thresh, max_retries, defer_accept,
731 &expire, &resend);
732 req->rsk_ops->syn_ack_timeout(req);
733 if (!expire &&
734 (!resend ||
735 !inet_rtx_syn_ack(sk_listener, req) ||
736 inet_rsk(req)->acked)) {
737 unsigned long timeo;
738
739 if (req->num_timeout++ == 0)
740 atomic_dec(&queue->young);
741 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
742 mod_timer(&req->rsk_timer, jiffies + timeo);
743 return;
744 }
745drop:
746 inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
747}
748
749static void reqsk_queue_hash_req(struct request_sock *req,
750 unsigned long timeout)
751{
752 req->num_retrans = 0;
753 req->num_timeout = 0;
754 req->sk = NULL;
755
756 timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
757 mod_timer(&req->rsk_timer, jiffies + timeout);
758
759 inet_ehash_insert(req_to_sk(req), NULL);
760
761
762
763 smp_wmb();
764 refcount_set(&req->rsk_refcnt, 2 + 1);
765}
766
767void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
768 unsigned long timeout)
769{
770 reqsk_queue_hash_req(req, timeout);
771 inet_csk_reqsk_queue_added(sk);
772}
773EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
774
775
776
777
778
779
780
781
782
783struct sock *inet_csk_clone_lock(const struct sock *sk,
784 const struct request_sock *req,
785 const gfp_t priority)
786{
787 struct sock *newsk = sk_clone_lock(sk, priority);
788
789 if (newsk) {
790 struct inet_connection_sock *newicsk = inet_csk(newsk);
791
792 inet_sk_set_state(newsk, TCP_SYN_RECV);
793 newicsk->icsk_bind_hash = NULL;
794
795 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
796 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
797 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
798
799
800 sock_reset_flag(newsk, SOCK_RCU_FREE);
801
802 inet_sk(newsk)->mc_list = NULL;
803
804 newsk->sk_mark = inet_rsk(req)->ir_mark;
805 atomic64_set(&newsk->sk_cookie,
806 atomic64_read(&inet_rsk(req)->ir_cookie));
807
808 newicsk->icsk_retransmits = 0;
809 newicsk->icsk_backoff = 0;
810 newicsk->icsk_probes_out = 0;
811
812
813 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
814
815 security_inet_csk_clone(newsk, req);
816 }
817 return newsk;
818}
819EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
820
821
822
823
824
825
826
827void inet_csk_destroy_sock(struct sock *sk)
828{
829 WARN_ON(sk->sk_state != TCP_CLOSE);
830 WARN_ON(!sock_flag(sk, SOCK_DEAD));
831
832
833 WARN_ON(!sk_unhashed(sk));
834
835
836 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
837
838 sk->sk_prot->destroy(sk);
839
840 sk_stream_kill_queues(sk);
841
842 xfrm_sk_free_policy(sk);
843
844 sk_refcnt_debug_release(sk);
845
846 percpu_counter_dec(sk->sk_prot->orphan_count);
847
848 sock_put(sk);
849}
850EXPORT_SYMBOL(inet_csk_destroy_sock);
851
852
853
854
855void inet_csk_prepare_forced_close(struct sock *sk)
856 __releases(&sk->sk_lock.slock)
857{
858
859 bh_unlock_sock(sk);
860 sock_put(sk);
861
862
863 sock_set_flag(sk, SOCK_DEAD);
864 percpu_counter_inc(sk->sk_prot->orphan_count);
865 inet_sk(sk)->inet_num = 0;
866}
867EXPORT_SYMBOL(inet_csk_prepare_forced_close);
868
869int inet_csk_listen_start(struct sock *sk, int backlog)
870{
871 struct inet_connection_sock *icsk = inet_csk(sk);
872 struct inet_sock *inet = inet_sk(sk);
873 int err = -EADDRINUSE;
874
875 reqsk_queue_alloc(&icsk->icsk_accept_queue);
876
877 sk->sk_max_ack_backlog = backlog;
878 sk->sk_ack_backlog = 0;
879 inet_csk_delack_init(sk);
880
881
882
883
884
885
886 inet_sk_state_store(sk, TCP_LISTEN);
887 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
888 inet->inet_sport = htons(inet->inet_num);
889
890 sk_dst_reset(sk);
891 err = sk->sk_prot->hash(sk);
892
893 if (likely(!err))
894 return 0;
895 }
896
897 inet_sk_set_state(sk, TCP_CLOSE);
898 return err;
899}
900EXPORT_SYMBOL_GPL(inet_csk_listen_start);
901
902static void inet_child_forget(struct sock *sk, struct request_sock *req,
903 struct sock *child)
904{
905 sk->sk_prot->disconnect(child, O_NONBLOCK);
906
907 sock_orphan(child);
908
909 percpu_counter_inc(sk->sk_prot->orphan_count);
910
911 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
912 BUG_ON(tcp_sk(child)->fastopen_rsk != req);
913 BUG_ON(sk != req->rsk_listener);
914
915
916
917
918
919
920
921 tcp_sk(child)->fastopen_rsk = NULL;
922 }
923 inet_csk_destroy_sock(child);
924}
925
926struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
927 struct request_sock *req,
928 struct sock *child)
929{
930 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
931
932 spin_lock(&queue->rskq_lock);
933 if (unlikely(sk->sk_state != TCP_LISTEN)) {
934 inet_child_forget(sk, req, child);
935 child = NULL;
936 } else {
937 req->sk = child;
938 req->dl_next = NULL;
939 if (queue->rskq_accept_head == NULL)
940 queue->rskq_accept_head = req;
941 else
942 queue->rskq_accept_tail->dl_next = req;
943 queue->rskq_accept_tail = req;
944 sk_acceptq_added(sk);
945 }
946 spin_unlock(&queue->rskq_lock);
947 return child;
948}
949EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
950
951struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
952 struct request_sock *req, bool own_req)
953{
954 if (own_req) {
955 inet_csk_reqsk_queue_drop(sk, req);
956 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
957 if (inet_csk_reqsk_queue_add(sk, req, child))
958 return child;
959 }
960
961 bh_unlock_sock(child);
962 sock_put(child);
963 return NULL;
964}
965EXPORT_SYMBOL(inet_csk_complete_hashdance);
966
967
968
969
970
971void inet_csk_listen_stop(struct sock *sk)
972{
973 struct inet_connection_sock *icsk = inet_csk(sk);
974 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
975 struct request_sock *next, *req;
976
977
978
979
980
981
982
983
984
985 while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
986 struct sock *child = req->sk;
987
988 local_bh_disable();
989 bh_lock_sock(child);
990 WARN_ON(sock_owned_by_user(child));
991 sock_hold(child);
992
993 inet_child_forget(sk, req, child);
994 reqsk_put(req);
995 bh_unlock_sock(child);
996 local_bh_enable();
997 sock_put(child);
998
999 cond_resched();
1000 }
1001 if (queue->fastopenq.rskq_rst_head) {
1002
1003 spin_lock_bh(&queue->fastopenq.lock);
1004 req = queue->fastopenq.rskq_rst_head;
1005 queue->fastopenq.rskq_rst_head = NULL;
1006 spin_unlock_bh(&queue->fastopenq.lock);
1007 while (req != NULL) {
1008 next = req->dl_next;
1009 reqsk_put(req);
1010 req = next;
1011 }
1012 }
1013 WARN_ON_ONCE(sk->sk_ack_backlog);
1014}
1015EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1016
1017void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1018{
1019 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1020 const struct inet_sock *inet = inet_sk(sk);
1021
1022 sin->sin_family = AF_INET;
1023 sin->sin_addr.s_addr = inet->inet_daddr;
1024 sin->sin_port = inet->inet_dport;
1025}
1026EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
1027
1028#ifdef CONFIG_COMPAT
1029int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
1030 char __user *optval, int __user *optlen)
1031{
1032 const struct inet_connection_sock *icsk = inet_csk(sk);
1033
1034 if (icsk->icsk_af_ops->compat_getsockopt)
1035 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
1036 optval, optlen);
1037 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
1038 optval, optlen);
1039}
1040EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
1041
1042int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
1043 char __user *optval, unsigned int optlen)
1044{
1045 const struct inet_connection_sock *icsk = inet_csk(sk);
1046
1047 if (icsk->icsk_af_ops->compat_setsockopt)
1048 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
1049 optval, optlen);
1050 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1051 optval, optlen);
1052}
1053EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
1054#endif
1055
1056static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1057{
1058 const struct inet_sock *inet = inet_sk(sk);
1059 const struct ip_options_rcu *inet_opt;
1060 __be32 daddr = inet->inet_daddr;
1061 struct flowi4 *fl4;
1062 struct rtable *rt;
1063
1064 rcu_read_lock();
1065 inet_opt = rcu_dereference(inet->inet_opt);
1066 if (inet_opt && inet_opt->opt.srr)
1067 daddr = inet_opt->opt.faddr;
1068 fl4 = &fl->u.ip4;
1069 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1070 inet->inet_saddr, inet->inet_dport,
1071 inet->inet_sport, sk->sk_protocol,
1072 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1073 if (IS_ERR(rt))
1074 rt = NULL;
1075 if (rt)
1076 sk_setup_caps(sk, &rt->dst);
1077 rcu_read_unlock();
1078
1079 return &rt->dst;
1080}
1081
1082struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1083{
1084 struct dst_entry *dst = __sk_dst_check(sk, 0);
1085 struct inet_sock *inet = inet_sk(sk);
1086
1087 if (!dst) {
1088 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1089 if (!dst)
1090 goto out;
1091 }
1092 dst->ops->update_pmtu(dst, sk, NULL, mtu);
1093
1094 dst = __sk_dst_check(sk, 0);
1095 if (!dst)
1096 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1097out:
1098 return dst;
1099}
1100EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
1101