1
2#include <linux/crypto.h>
3#include <linux/err.h>
4#include <linux/init.h>
5#include <linux/kernel.h>
6#include <linux/list.h>
7#include <linux/tcp.h>
8#include <linux/rcupdate.h>
9#include <linux/rculist.h>
10#include <net/inetpeer.h>
11#include <net/tcp.h>
12
13void tcp_fastopen_init_key_once(struct net *net)
14{
15 u8 key[TCP_FASTOPEN_KEY_LENGTH];
16 struct tcp_fastopen_context *ctxt;
17
18 rcu_read_lock();
19 ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
20 if (ctxt) {
21 rcu_read_unlock();
22 return;
23 }
24 rcu_read_unlock();
25
26
27
28
29
30
31
32 get_random_bytes(key, sizeof(key));
33 tcp_fastopen_reset_cipher(net, NULL, key, sizeof(key));
34}
35
36static void tcp_fastopen_ctx_free(struct rcu_head *head)
37{
38 struct tcp_fastopen_context *ctx =
39 container_of(head, struct tcp_fastopen_context, rcu);
40 crypto_free_cipher(ctx->tfm);
41 kfree(ctx);
42}
43
44void tcp_fastopen_destroy_cipher(struct sock *sk)
45{
46 struct tcp_fastopen_context *ctx;
47
48 ctx = rcu_dereference_protected(
49 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
50 if (ctx)
51 call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
52}
53
54void tcp_fastopen_ctx_destroy(struct net *net)
55{
56 struct tcp_fastopen_context *ctxt;
57
58 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
59
60 ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
61 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
62 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
63 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
64
65 if (ctxt)
66 call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
67}
68
69int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
70 void *key, unsigned int len)
71{
72 struct tcp_fastopen_context *ctx, *octx;
73 struct fastopen_queue *q;
74 int err;
75
76 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
77 if (!ctx)
78 return -ENOMEM;
79 ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
80
81 if (IS_ERR(ctx->tfm)) {
82 err = PTR_ERR(ctx->tfm);
83error: kfree(ctx);
84 pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
85 return err;
86 }
87 err = crypto_cipher_setkey(ctx->tfm, key, len);
88 if (err) {
89 pr_err("TCP: TFO cipher key error: %d\n", err);
90 crypto_free_cipher(ctx->tfm);
91 goto error;
92 }
93 memcpy(ctx->key, key, len);
94
95
96 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
97 if (sk) {
98 q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
99 octx = rcu_dereference_protected(q->ctx,
100 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
101 rcu_assign_pointer(q->ctx, ctx);
102 } else {
103 octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
104 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
105 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
106 }
107 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
108
109 if (octx)
110 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
111 return err;
112}
113
114static bool __tcp_fastopen_cookie_gen(struct sock *sk, const void *path,
115 struct tcp_fastopen_cookie *foc)
116{
117 struct tcp_fastopen_context *ctx;
118 bool ok = false;
119
120 rcu_read_lock();
121
122 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
123 if (!ctx)
124 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
125
126 if (ctx) {
127 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
128 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
129 ok = true;
130 }
131 rcu_read_unlock();
132 return ok;
133}
134
135
136
137
138
139
140
141static bool tcp_fastopen_cookie_gen(struct sock *sk,
142 struct request_sock *req,
143 struct sk_buff *syn,
144 struct tcp_fastopen_cookie *foc)
145{
146 if (req->rsk_ops->family == AF_INET) {
147 const struct iphdr *iph = ip_hdr(syn);
148
149 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
150 return __tcp_fastopen_cookie_gen(sk, path, foc);
151 }
152
153#if IS_ENABLED(CONFIG_IPV6)
154 if (req->rsk_ops->family == AF_INET6) {
155 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
156 struct tcp_fastopen_cookie tmp;
157
158 if (__tcp_fastopen_cookie_gen(sk, &ip6h->saddr, &tmp)) {
159 struct in6_addr *buf = &tmp.addr;
160 int i;
161
162 for (i = 0; i < 4; i++)
163 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
164 return __tcp_fastopen_cookie_gen(sk, buf, foc);
165 }
166 }
167#endif
168 return false;
169}
170
171
172
173
174
175void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
176{
177 struct tcp_sock *tp = tcp_sk(sk);
178
179 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
180 return;
181
182 skb = skb_clone(skb, GFP_ATOMIC);
183 if (!skb)
184 return;
185
186 skb_dst_drop(skb);
187
188
189
190
191
192
193 tp->segs_in = 0;
194 tcp_segs_in(tp, skb);
195 __skb_pull(skb, tcp_hdrlen(skb));
196 sk_forced_mem_schedule(sk, skb->truesize);
197 skb_set_owner_r(skb, sk);
198
199 TCP_SKB_CB(skb)->seq++;
200 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
201
202 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
203 __skb_queue_tail(&sk->sk_receive_queue, skb);
204 tp->syn_data_acked = 1;
205
206
207
208
209 tp->bytes_received = skb->len;
210
211 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
212 tcp_fin(sk);
213}
214
215static struct sock *tcp_fastopen_create_child(struct sock *sk,
216 struct sk_buff *skb,
217 struct request_sock *req)
218{
219 struct tcp_sock *tp;
220 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
221 struct sock *child;
222 bool own_req;
223
224 req->num_retrans = 0;
225 req->num_timeout = 0;
226 req->sk = NULL;
227
228 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
229 NULL, &own_req);
230 if (!child)
231 return NULL;
232
233 spin_lock(&queue->fastopenq.lock);
234 queue->fastopenq.qlen++;
235 spin_unlock(&queue->fastopenq.lock);
236
237
238
239
240
241 tp = tcp_sk(child);
242
243 rcu_assign_pointer(tp->fastopen_rsk, req);
244 tcp_rsk(req)->tfo_listener = true;
245
246
247
248
249 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
250 tp->max_window = tp->snd_wnd;
251
252
253
254
255
256 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
257 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
258
259 refcount_set(&req->rsk_refcnt, 2);
260
261
262 tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
263
264 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
265
266 tcp_fastopen_add_skb(child, skb);
267
268 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
269 tp->rcv_wup = tp->rcv_nxt;
270
271
272
273 return child;
274}
275
276static bool tcp_fastopen_queue_check(struct sock *sk)
277{
278 struct fastopen_queue *fastopenq;
279
280
281
282
283
284
285
286
287
288
289
290 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
291 if (fastopenq->max_qlen == 0)
292 return false;
293
294 if (fastopenq->qlen >= fastopenq->max_qlen) {
295 struct request_sock *req1;
296 spin_lock(&fastopenq->lock);
297 req1 = fastopenq->rskq_rst_head;
298 if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
299 __NET_INC_STATS(sock_net(sk),
300 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
301 spin_unlock(&fastopenq->lock);
302 return false;
303 }
304 fastopenq->rskq_rst_head = req1->dl_next;
305 fastopenq->qlen--;
306 spin_unlock(&fastopenq->lock);
307 reqsk_put(req1);
308 }
309 return true;
310}
311
312static bool tcp_fastopen_no_cookie(const struct sock *sk,
313 const struct dst_entry *dst,
314 int flag)
315{
316 return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
317 tcp_sk(sk)->fastopen_no_cookie ||
318 (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
319}
320
321
322
323
324
325struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
326 struct request_sock *req,
327 struct tcp_fastopen_cookie *foc,
328 const struct dst_entry *dst)
329{
330 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
331 int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
332 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
333 struct sock *child;
334
335 if (foc->len == 0)
336 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
337
338 if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
339 (syn_data || foc->len >= 0) &&
340 tcp_fastopen_queue_check(sk))) {
341 foc->len = -1;
342 return NULL;
343 }
344
345 if (syn_data &&
346 tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
347 goto fastopen;
348
349 if (foc->len >= 0 &&
350 tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc) &&
351 foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
352 foc->len == valid_foc.len &&
353 !memcmp(foc->val, valid_foc.val, foc->len)) {
354
355
356
357
358
359
360
361
362fastopen:
363 child = tcp_fastopen_create_child(sk, skb, req);
364 if (child) {
365 foc->len = -1;
366 NET_INC_STATS(sock_net(sk),
367 LINUX_MIB_TCPFASTOPENPASSIVE);
368 return child;
369 }
370 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
371 } else if (foc->len > 0)
372 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
373
374 valid_foc.exp = foc->exp;
375 *foc = valid_foc;
376 return NULL;
377}
378
379bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
380 struct tcp_fastopen_cookie *cookie)
381{
382 const struct dst_entry *dst;
383
384 tcp_fastopen_cache_get(sk, mss, cookie);
385
386
387 if (tcp_fastopen_active_should_disable(sk)) {
388 cookie->len = -1;
389 return false;
390 }
391
392 dst = __sk_dst_get(sk);
393
394 if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
395 cookie->len = -1;
396 return true;
397 }
398 return cookie->len > 0;
399}
400
401
402
403
404
405
406
407
408bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
409{
410 struct tcp_fastopen_cookie cookie = { .len = 0 };
411 struct tcp_sock *tp = tcp_sk(sk);
412 u16 mss;
413
414 if (tp->fastopen_connect && !tp->fastopen_req) {
415 if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
416 inet_sk(sk)->defer_connect = 1;
417 return true;
418 }
419
420
421
422
423 tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
424 sk->sk_allocation);
425 if (tp->fastopen_req)
426 tp->fastopen_req->cookie = cookie;
427 else
428 *err = -ENOBUFS;
429 }
430 return false;
431}
432EXPORT_SYMBOL(tcp_fastopen_defer_connect);
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453void tcp_fastopen_active_disable(struct sock *sk)
454{
455 struct net *net = sock_net(sk);
456
457
458 WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
459
460
461
462
463 smp_mb__before_atomic();
464 atomic_inc(&net->ipv4.tfo_active_disable_times);
465
466 NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
467}
468
469
470
471
472
473bool tcp_fastopen_active_should_disable(struct sock *sk)
474{
475 unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
476 int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
477 unsigned long timeout;
478 int multiplier;
479
480 if (!tfo_da_times)
481 return false;
482
483
484 smp_rmb();
485
486
487 multiplier = 1 << min(tfo_da_times - 1, 6);
488
489
490 timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
491 multiplier * tfo_bh_timeout * HZ;
492 if (time_before(jiffies, timeout))
493 return true;
494
495
496
497
498 tcp_sk(sk)->syn_fastopen_ch = 1;
499 return false;
500}
501
502
503
504
505
506
507
508void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
509{
510 struct tcp_sock *tp = tcp_sk(sk);
511 struct dst_entry *dst;
512 struct sk_buff *skb;
513
514 if (!tp->syn_fastopen)
515 return;
516
517 if (!tp->data_segs_in) {
518 skb = skb_rb_first(&tp->out_of_order_queue);
519 if (skb && !skb_rb_next(skb)) {
520 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
521 tcp_fastopen_active_disable(sk);
522 return;
523 }
524 }
525 } else if (tp->syn_fastopen_ch &&
526 atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
527 dst = sk_dst_get(sk);
528 if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
529 atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
530 dst_release(dst);
531 }
532}
533
534void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
535{
536 u32 timeouts = inet_csk(sk)->icsk_retransmits;
537 struct tcp_sock *tp = tcp_sk(sk);
538
539
540
541
542
543
544 if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
545 (timeouts == 2 || (timeouts < 2 && expired))) {
546 tcp_fastopen_active_disable(sk);
547 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
548 }
549}
550