1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef _LINUX_TCP_H
18#define _LINUX_TCP_H
19
20
21#include <linux/skbuff.h>
22#include <net/sock.h>
23#include <net/inet_connection_sock.h>
24#include <net/inet_timewait_sock.h>
25#include <uapi/linux/tcp.h>
26
27static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
28{
29 return (struct tcphdr *)skb_transport_header(skb);
30}
31
32static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
33{
34 return tcp_hdr(skb)->doff * 4;
35}
36
37static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
38{
39 return (struct tcphdr *)skb_inner_transport_header(skb);
40}
41
42static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
43{
44 return inner_tcp_hdr(skb)->doff * 4;
45}
46
47static inline unsigned int tcp_optlen(const struct sk_buff *skb)
48{
49 return (tcp_hdr(skb)->doff - 5) * 4;
50}
51
52
53#define TCP_FASTOPEN_COOKIE_MIN 4
54#define TCP_FASTOPEN_COOKIE_MAX 16
55#define TCP_FASTOPEN_COOKIE_SIZE 8
56
57
58struct tcp_fastopen_cookie {
59 s8 len;
60 u8 val[TCP_FASTOPEN_COOKIE_MAX];
61};
62
63
64struct tcp_sack_block_wire {
65 __be32 start_seq;
66 __be32 end_seq;
67};
68
69struct tcp_sack_block {
70 u32 start_seq;
71 u32 end_seq;
72};
73
74
75#define TCP_SACK_SEEN (1 << 0)
76#define TCP_FACK_ENABLED (1 << 1)
77#define TCP_DSACK_SEEN (1 << 2)
78
79struct tcp_options_received {
80
81 long ts_recent_stamp;
82 u32 ts_recent;
83 u32 rcv_tsval;
84 u32 rcv_tsecr;
85 u16 saw_tstamp : 1,
86 tstamp_ok : 1,
87 dsack : 1,
88 wscale_ok : 1,
89 sack_ok : 4,
90 snd_wscale : 4,
91 rcv_wscale : 4;
92 u8 num_sacks;
93 u16 user_mss;
94 u16 mss_clamp;
95};
96
97static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
98{
99 rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
100 rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
101}
102
103
104
105
106
107#define TCP_NUM_SACKS 4
108
109struct tcp_request_sock_ops;
110
111struct tcp_request_sock {
112 struct inet_request_sock req;
113 const struct tcp_request_sock_ops *af_specific;
114 struct sock *listener;
115 u32 rcv_isn;
116 u32 snt_isn;
117 u32 snt_synack;
118 u32 rcv_nxt;
119
120
121
122};
123
124static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
125{
126 return (struct tcp_request_sock *)req;
127}
128
129struct tcp_sock {
130
131 struct inet_connection_sock inet_conn;
132 u16 tcp_header_len;
133 u16 gso_segs;
134
135
136
137
138
139 __be32 pred_flags;
140
141
142
143
144
145
146 u32 rcv_nxt;
147 u32 copied_seq;
148 u32 rcv_wup;
149 u32 snd_nxt;
150
151 u32 snd_una;
152 u32 snd_sml;
153 u32 rcv_tstamp;
154 u32 lsndtime;
155
156 u32 tsoffset;
157
158 struct list_head tsq_node;
159 unsigned long tsq_flags;
160
161
162 struct {
163 struct sk_buff_head prequeue;
164 struct task_struct *task;
165 struct msghdr *msg;
166 int memory;
167 int len;
168 } ucopy;
169
170 u32 snd_wl1;
171 u32 snd_wnd;
172 u32 max_window;
173 u32 mss_cache;
174
175 u32 window_clamp;
176 u32 rcv_ssthresh;
177
178 u16 advmss;
179 u8 unused;
180 u8 nonagle : 4,
181 thin_lto : 1,
182 thin_dupack : 1,
183 repair : 1,
184 frto : 1;
185 u8 repair_queue;
186 u8 do_early_retrans:1,
187 syn_data:1,
188 syn_fastopen:1,
189 syn_data_acked:1,
190 is_cwnd_limited:1;
191 u32 tlp_high_seq;
192
193
194 u32 srtt_us;
195 u32 mdev_us;
196 u32 mdev_max_us;
197 u32 rttvar_us;
198 u32 rtt_seq;
199
200 u32 packets_out;
201 u32 retrans_out;
202 u32 max_packets_out;
203 u32 max_packets_seq;
204
205 u16 urg_data;
206 u8 ecn_flags;
207 u8 keepalive_probes;
208 u32 reordering;
209 u32 snd_up;
210
211
212
213
214 struct tcp_options_received rx_opt;
215
216
217
218
219 u32 snd_ssthresh;
220 u32 snd_cwnd;
221 u32 snd_cwnd_cnt;
222 u32 snd_cwnd_clamp;
223 u32 snd_cwnd_used;
224 u32 snd_cwnd_stamp;
225 u32 prior_cwnd;
226 u32 prr_delivered;
227
228 u32 prr_out;
229
230 u32 rcv_wnd;
231 u32 write_seq;
232 u32 notsent_lowat;
233 u32 pushed_seq;
234 u32 lost_out;
235 u32 sacked_out;
236 u32 fackets_out;
237 u32 tso_deferred;
238
239
240 struct sk_buff* lost_skb_hint;
241 struct sk_buff *retransmit_skb_hint;
242
243
244
245
246 struct sk_buff_head out_of_order_queue;
247
248
249 struct tcp_sack_block duplicate_sack[1];
250 struct tcp_sack_block selective_acks[4];
251
252 struct tcp_sack_block recv_sack_cache[4];
253
254 struct sk_buff *highest_sack;
255
256
257
258
259
260 int lost_cnt_hint;
261 u32 retransmit_high;
262
263 u32 lost_retrans_low;
264
265 u32 prior_ssthresh;
266 u32 high_seq;
267
268 u32 retrans_stamp;
269
270
271 u32 undo_marker;
272 int undo_retrans;
273 u32 total_retrans;
274
275 u32 urg_seq;
276 unsigned int keepalive_time;
277 unsigned int keepalive_intvl;
278
279 int linger2;
280
281
282 struct {
283 u32 rtt;
284 u32 seq;
285 u32 time;
286 } rcv_rtt_est;
287
288
289 struct {
290 int space;
291 u32 seq;
292 u32 time;
293 } rcvq_space;
294
295
296 struct {
297 u32 probe_seq_start;
298 u32 probe_seq_end;
299 } mtu_probe;
300 u32 mtu_info;
301
302
303
304#ifdef CONFIG_TCP_MD5SIG
305
306 const struct tcp_sock_af_ops *af_specific;
307
308
309 struct tcp_md5sig_info __rcu *md5sig_info;
310#endif
311
312
313 struct tcp_fastopen_request *fastopen_req;
314
315
316
317 struct request_sock *fastopen_rsk;
318};
319
320enum tsq_flags {
321 TSQ_THROTTLED,
322 TSQ_QUEUED,
323 TCP_TSQ_DEFERRED,
324 TCP_WRITE_TIMER_DEFERRED,
325 TCP_DELACK_TIMER_DEFERRED,
326 TCP_MTU_REDUCED_DEFERRED,
327
328
329};
330
331static inline struct tcp_sock *tcp_sk(const struct sock *sk)
332{
333 return (struct tcp_sock *)sk;
334}
335
336struct tcp_timewait_sock {
337 struct inet_timewait_sock tw_sk;
338 u32 tw_rcv_nxt;
339 u32 tw_snd_nxt;
340 u32 tw_rcv_wnd;
341 u32 tw_ts_offset;
342 u32 tw_ts_recent;
343 long tw_ts_recent_stamp;
344#ifdef CONFIG_TCP_MD5SIG
345 struct tcp_md5sig_key *tw_md5_key;
346#endif
347};
348
349static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
350{
351 return (struct tcp_timewait_sock *)sk;
352}
353
354static inline bool tcp_passive_fastopen(const struct sock *sk)
355{
356 return (sk->sk_state == TCP_SYN_RECV &&
357 tcp_sk(sk)->fastopen_rsk != NULL);
358}
359
360extern void tcp_sock_destruct(struct sock *sk);
361
362static inline int fastopen_init_queue(struct sock *sk, int backlog)
363{
364 struct request_sock_queue *queue =
365 &inet_csk(sk)->icsk_accept_queue;
366
367 if (queue->fastopenq == NULL) {
368 queue->fastopenq = kzalloc(
369 sizeof(struct fastopen_queue),
370 sk->sk_allocation);
371 if (queue->fastopenq == NULL)
372 return -ENOMEM;
373
374 sk->sk_destruct = tcp_sock_destruct;
375 spin_lock_init(&queue->fastopenq->lock);
376 }
377 queue->fastopenq->max_qlen = backlog;
378 return 0;
379}
380
381#endif
382