1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef _LINUX_TCP_H
18#define _LINUX_TCP_H
19
20
21#include <linux/skbuff.h>
22#include <net/sock.h>
23#include <net/inet_connection_sock.h>
24#include <net/inet_timewait_sock.h>
25#include <uapi/linux/tcp.h>
26
27static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
28{
29 return (struct tcphdr *)skb_transport_header(skb);
30}
31
32static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
33{
34 return tcp_hdr(skb)->doff * 4;
35}
36
37static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
38{
39 return (struct tcphdr *)skb_inner_transport_header(skb);
40}
41
42static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
43{
44 return inner_tcp_hdr(skb)->doff * 4;
45}
46
47static inline unsigned int tcp_optlen(const struct sk_buff *skb)
48{
49 return (tcp_hdr(skb)->doff - 5) * 4;
50}
51
52
53#define TCP_FASTOPEN_COOKIE_MIN 4
54#define TCP_FASTOPEN_COOKIE_MAX 16
55#define TCP_FASTOPEN_COOKIE_SIZE 8
56
57
58struct tcp_fastopen_cookie {
59 s8 len;
60 u8 val[TCP_FASTOPEN_COOKIE_MAX];
61 bool exp;
62};
63
64
65struct tcp_sack_block_wire {
66 __be32 start_seq;
67 __be32 end_seq;
68};
69
70struct tcp_sack_block {
71 u32 start_seq;
72 u32 end_seq;
73};
74
75
76#define TCP_SACK_SEEN (1 << 0)
77#define TCP_FACK_ENABLED (1 << 1)
78#define TCP_DSACK_SEEN (1 << 2)
79
80struct tcp_options_received {
81
82 long ts_recent_stamp;
83 u32 ts_recent;
84 u32 rcv_tsval;
85 u32 rcv_tsecr;
86 u16 saw_tstamp : 1,
87 tstamp_ok : 1,
88 dsack : 1,
89 wscale_ok : 1,
90 sack_ok : 4,
91 snd_wscale : 4,
92 rcv_wscale : 4;
93 u8 num_sacks;
94 u16 user_mss;
95 u16 mss_clamp;
96};
97
98static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
99{
100 rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
101 rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
102}
103
104
105
106
107
108#define TCP_NUM_SACKS 4
109
110struct tcp_request_sock_ops;
111
112struct tcp_request_sock {
113 struct inet_request_sock req;
114 const struct tcp_request_sock_ops *af_specific;
115 bool tfo_listener;
116 u32 rcv_isn;
117 u32 snt_isn;
118 u32 snt_synack;
119 u32 last_oow_ack_time;
120 u32 rcv_nxt;
121
122
123
124};
125
126static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
127{
128 return (struct tcp_request_sock *)req;
129}
130
131struct tcp_sock {
132
133 struct inet_connection_sock inet_conn;
134 u16 tcp_header_len;
135 u16 gso_segs;
136
137
138
139
140
141 __be32 pred_flags;
142
143
144
145
146
147
148 u64 bytes_received;
149
150
151
152 u32 rcv_nxt;
153 u32 copied_seq;
154 u32 rcv_wup;
155 u32 snd_nxt;
156
157 u64 bytes_acked;
158
159
160
161 struct u64_stats_sync syncp;
162
163 u32 snd_una;
164 u32 snd_sml;
165 u32 rcv_tstamp;
166 u32 lsndtime;
167 u32 last_oow_ack_time;
168
169 u32 tsoffset;
170
171 struct list_head tsq_node;
172 unsigned long tsq_flags;
173
174
175 struct {
176 struct sk_buff_head prequeue;
177 struct task_struct *task;
178 struct msghdr *msg;
179 int memory;
180 int len;
181 } ucopy;
182
183 u32 snd_wl1;
184 u32 snd_wnd;
185 u32 max_window;
186 u32 mss_cache;
187
188 u32 window_clamp;
189 u32 rcv_ssthresh;
190
191 u16 advmss;
192 u8 unused;
193 u8 nonagle : 4,
194 thin_lto : 1,
195 thin_dupack : 1,
196 repair : 1,
197 frto : 1;
198 u8 repair_queue;
199 u8 do_early_retrans:1,
200 syn_data:1,
201 syn_fastopen:1,
202 syn_fastopen_exp:1,
203 syn_data_acked:1,
204 is_cwnd_limited:1;
205 u32 tlp_high_seq;
206
207
208 u32 srtt_us;
209 u32 mdev_us;
210 u32 mdev_max_us;
211 u32 rttvar_us;
212 u32 rtt_seq;
213
214 u32 packets_out;
215 u32 retrans_out;
216 u32 max_packets_out;
217 u32 max_packets_seq;
218
219 u16 urg_data;
220 u8 ecn_flags;
221 u8 keepalive_probes;
222 u32 reordering;
223 u32 snd_up;
224
225
226
227
228 struct tcp_options_received rx_opt;
229
230
231
232
233 u32 snd_ssthresh;
234 u32 snd_cwnd;
235 u32 snd_cwnd_cnt;
236 u32 snd_cwnd_clamp;
237 u32 snd_cwnd_used;
238 u32 snd_cwnd_stamp;
239 u32 prior_cwnd;
240 u32 prr_delivered;
241
242 u32 prr_out;
243
244 u32 rcv_wnd;
245 u32 write_seq;
246 u32 notsent_lowat;
247 u32 pushed_seq;
248 u32 lost_out;
249 u32 sacked_out;
250 u32 fackets_out;
251
252
253 struct sk_buff* lost_skb_hint;
254 struct sk_buff *retransmit_skb_hint;
255
256
257
258
259 struct sk_buff_head out_of_order_queue;
260
261
262 struct tcp_sack_block duplicate_sack[1];
263 struct tcp_sack_block selective_acks[4];
264
265 struct tcp_sack_block recv_sack_cache[4];
266
267 struct sk_buff *highest_sack;
268
269
270
271
272
273 int lost_cnt_hint;
274 u32 retransmit_high;
275
276 u32 lost_retrans_low;
277
278 u32 prior_ssthresh;
279 u32 high_seq;
280
281 u32 retrans_stamp;
282
283
284 u32 undo_marker;
285 int undo_retrans;
286 u32 total_retrans;
287
288 u32 urg_seq;
289 unsigned int keepalive_time;
290 unsigned int keepalive_intvl;
291
292 int linger2;
293
294
295 struct {
296 u32 rtt;
297 u32 seq;
298 u32 time;
299 } rcv_rtt_est;
300
301
302 struct {
303 int space;
304 u32 seq;
305 u32 time;
306 } rcvq_space;
307
308
309 struct {
310 u32 probe_seq_start;
311 u32 probe_seq_end;
312 } mtu_probe;
313 u32 mtu_info;
314
315
316
317#ifdef CONFIG_TCP_MD5SIG
318
319 const struct tcp_sock_af_ops *af_specific;
320
321
322 struct tcp_md5sig_info __rcu *md5sig_info;
323#endif
324
325
326 struct tcp_fastopen_request *fastopen_req;
327
328
329
330 struct request_sock *fastopen_rsk;
331};
332
333enum tsq_flags {
334 TSQ_THROTTLED,
335 TSQ_QUEUED,
336 TCP_TSQ_DEFERRED,
337 TCP_WRITE_TIMER_DEFERRED,
338 TCP_DELACK_TIMER_DEFERRED,
339 TCP_MTU_REDUCED_DEFERRED,
340
341
342};
343
344static inline struct tcp_sock *tcp_sk(const struct sock *sk)
345{
346 return (struct tcp_sock *)sk;
347}
348
349struct tcp_timewait_sock {
350 struct inet_timewait_sock tw_sk;
351 u32 tw_rcv_nxt;
352 u32 tw_snd_nxt;
353 u32 tw_rcv_wnd;
354 u32 tw_ts_offset;
355 u32 tw_ts_recent;
356
357
358 u32 tw_last_oow_ack_time;
359
360 long tw_ts_recent_stamp;
361#ifdef CONFIG_TCP_MD5SIG
362 struct tcp_md5sig_key *tw_md5_key;
363#endif
364};
365
366static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
367{
368 return (struct tcp_timewait_sock *)sk;
369}
370
371static inline bool tcp_passive_fastopen(const struct sock *sk)
372{
373 return (sk->sk_state == TCP_SYN_RECV &&
374 tcp_sk(sk)->fastopen_rsk != NULL);
375}
376
377extern void tcp_sock_destruct(struct sock *sk);
378
379static inline int fastopen_init_queue(struct sock *sk, int backlog)
380{
381 struct request_sock_queue *queue =
382 &inet_csk(sk)->icsk_accept_queue;
383
384 if (queue->fastopenq == NULL) {
385 queue->fastopenq = kzalloc(
386 sizeof(struct fastopen_queue),
387 sk->sk_allocation);
388 if (queue->fastopenq == NULL)
389 return -ENOMEM;
390
391 sk->sk_destruct = tcp_sock_destruct;
392 spin_lock_init(&queue->fastopenq->lock);
393 }
394 queue->fastopenq->max_qlen = backlog;
395 return 0;
396}
397
398#endif
399