1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef _LINUX_TCP_H
18#define _LINUX_TCP_H
19
20
21#include <linux/skbuff.h>
22#include <linux/win_minmax.h>
23#include <net/sock.h>
24#include <net/inet_connection_sock.h>
25#include <net/inet_timewait_sock.h>
26#include <uapi/linux/tcp.h>
27
28static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
29{
30 return (struct tcphdr *)skb_transport_header(skb);
31}
32
33static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
34{
35 return th->doff * 4;
36}
37
38static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
39{
40 return __tcp_hdrlen(tcp_hdr(skb));
41}
42
43static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
44{
45 return (struct tcphdr *)skb_inner_transport_header(skb);
46}
47
48static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
49{
50 return inner_tcp_hdr(skb)->doff * 4;
51}
52
53static inline unsigned int tcp_optlen(const struct sk_buff *skb)
54{
55 return (tcp_hdr(skb)->doff - 5) * 4;
56}
57
58
59#define TCP_FASTOPEN_COOKIE_MIN 4
60#define TCP_FASTOPEN_COOKIE_MAX 16
61#define TCP_FASTOPEN_COOKIE_SIZE 8
62
63
64struct tcp_fastopen_cookie {
65 union {
66 u8 val[TCP_FASTOPEN_COOKIE_MAX];
67#if IS_ENABLED(CONFIG_IPV6)
68 struct in6_addr addr;
69#endif
70 };
71 s8 len;
72 bool exp;
73};
74
75
76struct tcp_sack_block_wire {
77 __be32 start_seq;
78 __be32 end_seq;
79};
80
81struct tcp_sack_block {
82 u32 start_seq;
83 u32 end_seq;
84};
85
86
87#define TCP_SACK_SEEN (1 << 0)
88#define TCP_FACK_ENABLED (1 << 1)
89#define TCP_DSACK_SEEN (1 << 2)
90
91struct tcp_options_received {
92
93 long ts_recent_stamp;
94 u32 ts_recent;
95 u32 rcv_tsval;
96 u32 rcv_tsecr;
97 u16 saw_tstamp : 1,
98 tstamp_ok : 1,
99 dsack : 1,
100 wscale_ok : 1,
101 sack_ok : 4,
102 snd_wscale : 4,
103 rcv_wscale : 4;
104 u8 num_sacks;
105 u16 user_mss;
106 u16 mss_clamp;
107};
108
109static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
110{
111 rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
112 rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
113}
114
115
116
117
118
119#define TCP_NUM_SACKS 4
120
121struct tcp_request_sock_ops;
122
123struct tcp_request_sock {
124 struct inet_request_sock req;
125 const struct tcp_request_sock_ops *af_specific;
126 struct skb_mstamp snt_synack;
127 bool tfo_listener;
128 u32 txhash;
129 u32 rcv_isn;
130 u32 snt_isn;
131 u32 ts_off;
132 u32 last_oow_ack_time;
133 u32 rcv_nxt;
134
135
136
137};
138
139static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
140{
141 return (struct tcp_request_sock *)req;
142}
143
144struct tcp_sock {
145
146 struct inet_connection_sock inet_conn;
147 u16 tcp_header_len;
148 u16 gso_segs;
149
150
151
152
153
154 __be32 pred_flags;
155
156
157
158
159
160
161 u64 bytes_received;
162
163
164
165 u32 segs_in;
166
167
168 u32 data_segs_in;
169
170
171 u32 rcv_nxt;
172 u32 copied_seq;
173 u32 rcv_wup;
174 u32 snd_nxt;
175 u32 segs_out;
176
177
178 u32 data_segs_out;
179
180
181 u64 bytes_acked;
182
183
184
185 u32 snd_una;
186 u32 snd_sml;
187 u32 rcv_tstamp;
188 u32 lsndtime;
189 u32 last_oow_ack_time;
190
191 u32 tsoffset;
192
193 struct list_head tsq_node;
194
195
196 struct {
197 struct sk_buff_head prequeue;
198 struct task_struct *task;
199 struct msghdr *msg;
200 int memory;
201 int len;
202 } ucopy;
203
204 u32 snd_wl1;
205 u32 snd_wnd;
206 u32 max_window;
207 u32 mss_cache;
208
209 u32 window_clamp;
210 u32 rcv_ssthresh;
211
212
213 struct tcp_rack {
214 struct skb_mstamp mstamp;
215 u32 rtt_us;
216 u32 end_seq;
217 u8 advanced;
218 u8 reord;
219 } rack;
220 u16 advmss;
221 u32 chrono_start;
222 u32 chrono_stat[3];
223 u8 chrono_type:2,
224 rate_app_limited:1,
225 fastopen_connect:1,
226 unused:4;
227 u8 nonagle : 4,
228 thin_lto : 1,
229 unused1 : 1,
230 repair : 1,
231 frto : 1;
232 u8 repair_queue;
233 u8 syn_data:1,
234 syn_fastopen:1,
235 syn_fastopen_exp:1,
236 syn_fastopen_ch:1,
237 syn_data_acked:1,
238 save_syn:1,
239 is_cwnd_limited:1;
240 u32 tlp_high_seq;
241
242
243 struct skb_mstamp tcp_mstamp;
244 u32 srtt_us;
245 u32 mdev_us;
246 u32 mdev_max_us;
247 u32 rttvar_us;
248 u32 rtt_seq;
249 struct minmax rtt_min;
250
251 u32 packets_out;
252 u32 retrans_out;
253 u32 max_packets_out;
254 u32 max_packets_seq;
255
256 u16 urg_data;
257 u8 ecn_flags;
258 u8 keepalive_probes;
259 u32 reordering;
260 u32 snd_up;
261
262
263
264
265 struct tcp_options_received rx_opt;
266
267
268
269
270 u32 snd_ssthresh;
271 u32 snd_cwnd;
272 u32 snd_cwnd_cnt;
273 u32 snd_cwnd_clamp;
274 u32 snd_cwnd_used;
275 u32 snd_cwnd_stamp;
276 u32 prior_cwnd;
277 u32 prr_delivered;
278
279 u32 prr_out;
280 u32 delivered;
281 u32 lost;
282 u32 app_limited;
283 struct skb_mstamp first_tx_mstamp;
284 struct skb_mstamp delivered_mstamp;
285 u32 rate_delivered;
286 u32 rate_interval_us;
287
288 u32 rcv_wnd;
289 u32 write_seq;
290 u32 notsent_lowat;
291 u32 pushed_seq;
292 u32 lost_out;
293 u32 sacked_out;
294 u32 fackets_out;
295
296
297 struct sk_buff* lost_skb_hint;
298 struct sk_buff *retransmit_skb_hint;
299
300
301 struct rb_root out_of_order_queue;
302 struct sk_buff *ooo_last_skb;
303
304
305 struct tcp_sack_block duplicate_sack[1];
306 struct tcp_sack_block selective_acks[4];
307
308 struct tcp_sack_block recv_sack_cache[4];
309
310 struct sk_buff *highest_sack;
311
312
313
314
315
316 int lost_cnt_hint;
317
318 u32 prior_ssthresh;
319 u32 high_seq;
320
321 u32 retrans_stamp;
322
323
324 u32 undo_marker;
325 int undo_retrans;
326 u32 total_retrans;
327
328 u32 urg_seq;
329 unsigned int keepalive_time;
330 unsigned int keepalive_intvl;
331
332 int linger2;
333
334
335 struct {
336 u32 rtt_us;
337 u32 seq;
338 struct skb_mstamp time;
339 } rcv_rtt_est;
340
341
342 struct {
343 int space;
344 u32 seq;
345 struct skb_mstamp time;
346 } rcvq_space;
347
348
349 struct {
350 u32 probe_seq_start;
351 u32 probe_seq_end;
352 } mtu_probe;
353 u32 mtu_info;
354
355
356
357#ifdef CONFIG_TCP_MD5SIG
358
359 const struct tcp_sock_af_ops *af_specific;
360
361
362 struct tcp_md5sig_info __rcu *md5sig_info;
363#endif
364
365
366 struct tcp_fastopen_request *fastopen_req;
367
368
369
370 struct request_sock *fastopen_rsk;
371 u32 *saved_syn;
372};
373
374enum tsq_enum {
375 TSQ_THROTTLED,
376 TSQ_QUEUED,
377 TCP_TSQ_DEFERRED,
378 TCP_WRITE_TIMER_DEFERRED,
379 TCP_DELACK_TIMER_DEFERRED,
380 TCP_MTU_REDUCED_DEFERRED,
381
382
383};
384
385enum tsq_flags {
386 TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
387 TSQF_QUEUED = (1UL << TSQ_QUEUED),
388 TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
389 TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
390 TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
391 TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
392};
393
394static inline struct tcp_sock *tcp_sk(const struct sock *sk)
395{
396 return (struct tcp_sock *)sk;
397}
398
399struct tcp_timewait_sock {
400 struct inet_timewait_sock tw_sk;
401#define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt
402#define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt
403 u32 tw_rcv_wnd;
404 u32 tw_ts_offset;
405 u32 tw_ts_recent;
406
407
408 u32 tw_last_oow_ack_time;
409
410 long tw_ts_recent_stamp;
411#ifdef CONFIG_TCP_MD5SIG
412 struct tcp_md5sig_key *tw_md5_key;
413#endif
414};
415
416static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
417{
418 return (struct tcp_timewait_sock *)sk;
419}
420
421static inline bool tcp_passive_fastopen(const struct sock *sk)
422{
423 return (sk->sk_state == TCP_SYN_RECV &&
424 tcp_sk(sk)->fastopen_rsk != NULL);
425}
426
427static inline void fastopen_queue_tune(struct sock *sk, int backlog)
428{
429 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
430 int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
431
432 queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
433}
434
435static inline void tcp_move_syn(struct tcp_sock *tp,
436 struct request_sock *req)
437{
438 tp->saved_syn = req->saved_syn;
439 req->saved_syn = NULL;
440}
441
442static inline void tcp_saved_syn_free(struct tcp_sock *tp)
443{
444 kfree(tp->saved_syn);
445 tp->saved_syn = NULL;
446}
447
448struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
449
450static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
451{
452
453
454
455 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
456
457 return (user_mss && user_mss < mss) ? user_mss : mss;
458}
459#endif
460