1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/tcp.h>
14#include <linux/slab.h>
15#include <linux/random.h>
16#include <linux/siphash.h>
17#include <linux/kernel.h>
18#include <linux/export.h>
19#include <net/secure_seq.h>
20#include <net/tcp.h>
21#include <net/route.h>
22
23static siphash_key_t syncookie_secret[2] __read_mostly;
24
25#define COOKIEBITS 24
26#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#define TS_OPT_WSCALE_MASK 0xf
43#define TS_OPT_SACK BIT(4)
44#define TS_OPT_ECN BIT(5)
45
46
47
48
49#define TSBITS 6
50#define TSMASK (((__u32)1 << TSBITS) - 1)
51
52static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
53 u32 count, int c)
54{
55 net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
56 return siphash_4u32((__force u32)saddr, (__force u32)daddr,
57 (__force u32)sport << 16 | (__force u32)dport,
58 count, &syncookie_secret[c]);
59}
60
61
62
63
64
65
66
67
68
69u64 cookie_init_timestamp(struct request_sock *req)
70{
71 struct inet_request_sock *ireq;
72 u32 ts, ts_now = tcp_time_stamp_raw();
73 u32 options = 0;
74
75 ireq = inet_rsk(req);
76
77 options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
78 if (ireq->sack_ok)
79 options |= TS_OPT_SACK;
80 if (ireq->ecn_ok)
81 options |= TS_OPT_ECN;
82
83 ts = ts_now & ~TSMASK;
84 ts |= options;
85 if (ts > ts_now) {
86 ts >>= TSBITS;
87 ts--;
88 ts <<= TSBITS;
89 ts |= options;
90 }
91 return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
92}
93
94
95static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
96 __be16 dport, __u32 sseq, __u32 data)
97{
98
99
100
101
102
103
104
105
106
107
108 u32 count = tcp_cookie_time();
109 return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
110 sseq + (count << COOKIEBITS) +
111 ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
112 & COOKIEMASK));
113}
114
115
116
117
118
119
120
121
122
123
124static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
125 __be16 sport, __be16 dport, __u32 sseq)
126{
127 u32 diff, count = tcp_cookie_time();
128
129
130 cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
131
132
133 diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
134 if (diff >= MAX_SYNCOOKIE_AGE)
135 return (__u32)-1;
136
137 return (cookie -
138 cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
139 & COOKIEMASK;
140}
141
142
143
144
145
146
147
148
149
150
151
152
153
154static __u16 const msstab[] = {
155 536,
156 1300,
157 1440,
158 1460,
159};
160
161
162
163
164
165u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
166 u16 *mssp)
167{
168 int mssind;
169 const __u16 mss = *mssp;
170
171 for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
172 if (mss >= msstab[mssind])
173 break;
174 *mssp = msstab[mssind];
175
176 return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
177 th->source, th->dest, ntohl(th->seq),
178 mssind);
179}
180EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
181
182__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mssp)
183{
184 const struct iphdr *iph = ip_hdr(skb);
185 const struct tcphdr *th = tcp_hdr(skb);
186
187 return __cookie_v4_init_sequence(iph, th, mssp);
188}
189
190
191
192
193
194int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
195 u32 cookie)
196{
197 __u32 seq = ntohl(th->seq) - 1;
198 __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
199 th->source, th->dest, seq);
200
201 return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
202}
203EXPORT_SYMBOL_GPL(__cookie_v4_check);
204
205struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
206 struct request_sock *req,
207 struct dst_entry *dst, u32 tsoff)
208{
209 struct inet_connection_sock *icsk = inet_csk(sk);
210 struct sock *child;
211 bool own_req;
212
213 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
214 NULL, &own_req);
215 if (child) {
216 refcount_set(&req->rsk_refcnt, 1);
217 tcp_sk(child)->tsoffset = tsoff;
218 sock_rps_save_rxhash(child, skb);
219
220 if (rsk_drop_req(req)) {
221 reqsk_put(req);
222 return child;
223 }
224
225 if (inet_csk_reqsk_queue_add(sk, req, child))
226 return child;
227
228 bh_unlock_sock(child);
229 sock_put(child);
230 }
231 __reqsk_free(req);
232
233 return NULL;
234}
235EXPORT_SYMBOL(tcp_get_cookie_sock);
236
237
238
239
240
241
242
243
244
245bool cookie_timestamp_decode(const struct net *net,
246 struct tcp_options_received *tcp_opt)
247{
248
249 u32 options = tcp_opt->rcv_tsecr;
250
251 if (!tcp_opt->saw_tstamp) {
252 tcp_clear_options(tcp_opt);
253 return true;
254 }
255
256 if (!net->ipv4.sysctl_tcp_timestamps)
257 return false;
258
259 tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
260
261 if (tcp_opt->sack_ok && !net->ipv4.sysctl_tcp_sack)
262 return false;
263
264 if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK)
265 return true;
266
267 tcp_opt->wscale_ok = 1;
268 tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
269
270 return net->ipv4.sysctl_tcp_window_scaling != 0;
271}
272EXPORT_SYMBOL(cookie_timestamp_decode);
273
274bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
275 const struct net *net, const struct dst_entry *dst)
276{
277 bool ecn_ok = tcp_opt->rcv_tsecr & TS_OPT_ECN;
278
279 if (!ecn_ok)
280 return false;
281
282 if (net->ipv4.sysctl_tcp_ecn)
283 return true;
284
285 return dst_feature(dst, RTAX_FEATURE_ECN);
286}
287EXPORT_SYMBOL(cookie_ecn_ok);
288
289struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
290 struct sock *sk,
291 struct sk_buff *skb)
292{
293 struct request_sock *req;
294
295#ifdef CONFIG_MPTCP
296 struct tcp_request_sock *treq;
297
298 if (sk_is_mptcp(sk))
299 ops = &mptcp_subflow_request_sock_ops;
300#endif
301
302 req = inet_reqsk_alloc(ops, sk, false);
303 if (!req)
304 return NULL;
305
306#if IS_ENABLED(CONFIG_MPTCP)
307 treq = tcp_rsk(req);
308 treq->is_mptcp = sk_is_mptcp(sk);
309 if (treq->is_mptcp) {
310 int err = mptcp_subflow_init_cookie_req(req, sk, skb);
311
312 if (err) {
313 reqsk_free(req);
314 return NULL;
315 }
316 }
317#endif
318
319 return req;
320}
321EXPORT_SYMBOL_GPL(cookie_tcp_reqsk_alloc);
322
323
324
325
326
327struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
328{
329 struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
330 struct tcp_options_received tcp_opt;
331 struct inet_request_sock *ireq;
332 struct tcp_request_sock *treq;
333 struct tcp_sock *tp = tcp_sk(sk);
334 const struct tcphdr *th = tcp_hdr(skb);
335 __u32 cookie = ntohl(th->ack_seq) - 1;
336 struct sock *ret = sk;
337 struct request_sock *req;
338 int full_space, mss;
339 struct rtable *rt;
340 __u8 rcv_wscale;
341 struct flowi4 fl4;
342 u32 tsoff = 0;
343
344 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
345 goto out;
346
347 if (tcp_synq_no_recent_overflow(sk))
348 goto out;
349
350 mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
351 if (mss == 0) {
352 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
353 goto out;
354 }
355
356 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
357
358
359 memset(&tcp_opt, 0, sizeof(tcp_opt));
360 tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
361
362 if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
363 tsoff = secure_tcp_ts_off(sock_net(sk),
364 ip_hdr(skb)->daddr,
365 ip_hdr(skb)->saddr);
366 tcp_opt.rcv_tsecr -= tsoff;
367 }
368
369 if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
370 goto out;
371
372 ret = NULL;
373 req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb);
374 if (!req)
375 goto out;
376
377 ireq = inet_rsk(req);
378 treq = tcp_rsk(req);
379 treq->rcv_isn = ntohl(th->seq) - 1;
380 treq->snt_isn = cookie;
381 treq->ts_off = 0;
382 treq->txhash = net_tx_rndhash();
383 req->mss = mss;
384 ireq->ir_num = ntohs(th->dest);
385 ireq->ir_rmt_port = th->source;
386 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
387 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
388 ireq->ir_mark = inet_request_mark(sk, skb);
389 ireq->snd_wscale = tcp_opt.snd_wscale;
390 ireq->sack_ok = tcp_opt.sack_ok;
391 ireq->wscale_ok = tcp_opt.wscale_ok;
392 ireq->tstamp_ok = tcp_opt.saw_tstamp;
393 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
394 treq->snt_synack = 0;
395 treq->tfo_listener = false;
396
397 if (IS_ENABLED(CONFIG_SMC))
398 ireq->smc_ok = 0;
399
400 ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
401
402
403
404
405 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
406
407 if (security_inet_conn_request(sk, skb, req)) {
408 reqsk_free(req);
409 goto out;
410 }
411
412 req->num_retrans = 0;
413
414
415
416
417
418
419
420 flowi4_init_output(&fl4, ireq->ir_iif, ireq->ir_mark,
421 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
422 inet_sk_flowi_flags(sk),
423 opt->srr ? opt->faddr : ireq->ir_rmt_addr,
424 ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
425 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
426 rt = ip_route_output_key(sock_net(sk), &fl4);
427 if (IS_ERR(rt)) {
428 reqsk_free(req);
429 goto out;
430 }
431
432
433 req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
434
435 full_space = tcp_full_space(sk);
436 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
437 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
438 req->rsk_window_clamp = full_space;
439
440 tcp_select_initial_window(sk, full_space, req->mss,
441 &req->rsk_rcv_wnd, &req->rsk_window_clamp,
442 ireq->wscale_ok, &rcv_wscale,
443 dst_metric(&rt->dst, RTAX_INITRWND));
444
445 ireq->rcv_wscale = rcv_wscale;
446 ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
447
448 ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
449
450
451
452 if (ret)
453 inet_sk(ret)->cork.fl.u.ip4 = fl4;
454out: return ret;
455}
456