1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef _TCP_H
19#define _TCP_H
20
21#define FASTRETRANS_DEBUG 1
22
23#include <linux/list.h>
24#include <linux/tcp.h>
25#include <linux/bug.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
29#include <linux/skbuff.h>
30#include <linux/dmaengine.h>
31#include <linux/crypto.h>
32#include <linux/cryptohash.h>
33#include <linux/kref.h>
34
35#include <net/inet_connection_sock.h>
36#include <net/inet_timewait_sock.h>
37#include <net/inet_hashtables.h>
38#include <net/checksum.h>
39#include <net/request_sock.h>
40#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
43#include <net/tcp_states.h>
44#include <net/inet_ecn.h>
45#include <net/dst.h>
46
47#include <linux/seq_file.h>
48#include <linux/memcontrol.h>
49
50extern struct inet_hashinfo tcp_hashinfo;
51
52extern struct percpu_counter tcp_orphan_count;
53extern void tcp_time_wait(struct sock *sk, int state, int timeo);
54
55#define MAX_TCP_HEADER (128 + MAX_HEADER)
56#define MAX_TCP_OPTION_SPACE 40
57
58
59
60
61
62#define MAX_TCP_WINDOW 32767U
63
64
65#define TCP_MIN_MSS 88U
66
67
68#define TCP_BASE_MSS 512
69
70
71#define TCP_FASTRETRANS_THRESH 3
72
73
74#define TCP_MAX_REORDERING 127
75
76
77#define TCP_MAX_QUICKACKS 16U
78
79
80#define TCP_URG_VALID 0x0100
81#define TCP_URG_NOTYET 0x0200
82#define TCP_URG_READ 0x0400
83
84#define TCP_RETR1 3
85
86
87
88
89
90
91#define TCP_RETR2 15
92
93
94
95
96
97
98#define TCP_SYN_RETRIES 6
99
100
101
102
103
104
105
106
107#define TCP_SYNACK_RETRIES 5
108
109
110
111
112
113
114#define TCP_TIMEWAIT_LEN (60*HZ)
115
116#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
117
118
119
120
121
122
123#define TCP_DELACK_MAX ((unsigned)(HZ/5))
124#if HZ >= 100
125#define TCP_DELACK_MIN ((unsigned)(HZ/25))
126#define TCP_ATO_MIN ((unsigned)(HZ/25))
127#else
128#define TCP_DELACK_MIN 4U
129#define TCP_ATO_MIN 4U
130#endif
131#define TCP_RTO_MAX ((unsigned)(120*HZ))
132#define TCP_RTO_MIN ((unsigned)(HZ/5))
133#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))
134#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))
135
136
137
138
139
140
141#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U))
142
143
144
145#define TCP_KEEPALIVE_TIME (120*60*HZ)
146#define TCP_KEEPALIVE_PROBES 9
147#define TCP_KEEPALIVE_INTVL (75*HZ)
148
149#define MAX_TCP_KEEPIDLE 32767
150#define MAX_TCP_KEEPINTVL 32767
151#define MAX_TCP_KEEPCNT 127
152#define MAX_TCP_SYNCNT 127
153
154#define TCP_SYNQ_INTERVAL (HZ/5)
155
156#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
157#define TCP_PAWS_MSL 60
158
159
160
161
162
163#define TCP_PAWS_WINDOW 1
164
165
166
167
168
169
170
171#define TCPOPT_NOP 1
172#define TCPOPT_EOL 0
173#define TCPOPT_MSS 2
174#define TCPOPT_WINDOW 3
175#define TCPOPT_SACK_PERM 4
176#define TCPOPT_SACK 5
177#define TCPOPT_TIMESTAMP 8
178#define TCPOPT_MD5SIG 19
179#define TCPOPT_EXP 254
180
181
182
183#define TCPOPT_FASTOPEN_MAGIC 0xF989
184
185
186
187
188
189#define TCPOLEN_MSS 4
190#define TCPOLEN_WINDOW 3
191#define TCPOLEN_SACK_PERM 2
192#define TCPOLEN_TIMESTAMP 10
193#define TCPOLEN_MD5SIG 18
194#define TCPOLEN_EXP_FASTOPEN_BASE 4
195
196
197#define TCPOLEN_TSTAMP_ALIGNED 12
198#define TCPOLEN_WSCALE_ALIGNED 4
199#define TCPOLEN_SACKPERM_ALIGNED 4
200#define TCPOLEN_SACK_BASE 2
201#define TCPOLEN_SACK_BASE_ALIGNED 4
202#define TCPOLEN_SACK_PERBLOCK 8
203#define TCPOLEN_MD5SIG_ALIGNED 20
204#define TCPOLEN_MSS_ALIGNED 4
205
206
207#define TCP_NAGLE_OFF 1
208#define TCP_NAGLE_CORK 2
209#define TCP_NAGLE_PUSH 4
210
211
212#define TCP_THIN_LINEAR_RETRIES 6
213
214
215#define TCP_INIT_CWND 10
216
217
218#define TFO_CLIENT_ENABLE 1
219#define TFO_SERVER_ENABLE 2
220#define TFO_CLIENT_NO_COOKIE 4
221
222
223#define TFO_SERVER_COOKIE_NOT_CHKED 0x100
224
225#define TFO_SERVER_COOKIE_NOT_REQD 0x200
226
227
228
229
230#define TFO_SERVER_WO_SOCKOPT1 0x400
231#define TFO_SERVER_WO_SOCKOPT2 0x800
232
233
234
235#define TFO_SERVER_ALWAYS 0x1000
236
237extern struct inet_timewait_death_row tcp_death_row;
238
239
240extern int sysctl_tcp_timestamps;
241extern int sysctl_tcp_window_scaling;
242extern int sysctl_tcp_sack;
243extern int sysctl_tcp_fin_timeout;
244extern int sysctl_tcp_keepalive_time;
245extern int sysctl_tcp_keepalive_probes;
246extern int sysctl_tcp_keepalive_intvl;
247extern int sysctl_tcp_syn_retries;
248extern int sysctl_tcp_synack_retries;
249extern int sysctl_tcp_retries1;
250extern int sysctl_tcp_retries2;
251extern int sysctl_tcp_orphan_retries;
252extern int sysctl_tcp_syncookies;
253extern int sysctl_tcp_fastopen;
254extern int sysctl_tcp_retrans_collapse;
255extern int sysctl_tcp_stdurg;
256extern int sysctl_tcp_rfc1337;
257extern int sysctl_tcp_abort_on_overflow;
258extern int sysctl_tcp_max_orphans;
259extern int sysctl_tcp_fack;
260extern int sysctl_tcp_reordering;
261extern int sysctl_tcp_dsack;
262extern int sysctl_tcp_wmem[3];
263extern int sysctl_tcp_rmem[3];
264extern int sysctl_tcp_app_win;
265extern int sysctl_tcp_adv_win_scale;
266extern int sysctl_tcp_tw_reuse;
267extern int sysctl_tcp_frto;
268extern int sysctl_tcp_low_latency;
269extern int sysctl_tcp_dma_copybreak;
270extern int sysctl_tcp_nometrics_save;
271extern int sysctl_tcp_moderate_rcvbuf;
272extern int sysctl_tcp_tso_win_divisor;
273extern int sysctl_tcp_mtu_probing;
274extern int sysctl_tcp_base_mss;
275extern int sysctl_tcp_workaround_signed_windows;
276extern int sysctl_tcp_slow_start_after_idle;
277extern int sysctl_tcp_max_ssthresh;
278extern int sysctl_tcp_thin_linear_timeouts;
279extern int sysctl_tcp_thin_dupack;
280extern int sysctl_tcp_early_retrans;
281extern int sysctl_tcp_limit_output_bytes;
282extern int sysctl_tcp_challenge_ack_limit;
283extern unsigned int sysctl_tcp_notsent_lowat;
284extern int sysctl_tcp_min_tso_segs;
285
286extern atomic_long_t tcp_memory_allocated;
287extern struct percpu_counter tcp_sockets_allocated;
288extern int tcp_memory_pressure;
289
290
291
292
293
294
295static inline bool before(__u32 seq1, __u32 seq2)
296{
297 return (__s32)(seq1-seq2) < 0;
298}
299#define after(seq2, seq1) before(seq1, seq2)
300
301
302static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
303{
304 return seq3 - seq2 >= seq1 - seq2;
305}
306
307static inline bool tcp_out_of_memory(struct sock *sk)
308{
309 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
310 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
311 return true;
312 return false;
313}
314
315static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
316{
317 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
318 int orphans = percpu_counter_read_positive(ocp);
319
320 if (orphans << shift > sysctl_tcp_max_orphans) {
321 orphans = percpu_counter_sum_positive(ocp);
322 if (orphans << shift > sysctl_tcp_max_orphans)
323 return true;
324 }
325 return false;
326}
327
328extern bool tcp_check_oom(struct sock *sk, int shift);
329
330
331static inline void tcp_synq_overflow(struct sock *sk)
332{
333 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
334}
335
336
337static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
338{
339 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
340 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
341}
342
343extern struct proto tcp_prot;
344
345#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
346#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
347#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
348#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
349#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
350
351extern void tcp_init_mem(struct net *net);
352
353extern void tcp_tasklet_init(void);
354
355extern void tcp_v4_err(struct sk_buff *skb, u32);
356
357extern void tcp_shutdown (struct sock *sk, int how);
358
359extern void tcp_v4_early_demux(struct sk_buff *skb);
360extern int tcp_v4_rcv(struct sk_buff *skb);
361
362extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
363extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
364 size_t size);
365extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
366 size_t size, int flags);
367extern void tcp_release_cb(struct sock *sk);
368extern void tcp_wfree(struct sk_buff *skb);
369extern void tcp_write_timer_handler(struct sock *sk);
370extern void tcp_delack_timer_handler(struct sock *sk);
371extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
372extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
373 const struct tcphdr *th, unsigned int len);
374extern void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
375 const struct tcphdr *th, unsigned int len);
376extern void tcp_rcv_space_adjust(struct sock *sk);
377extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
378extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
379extern void tcp_twsk_destructor(struct sock *sk);
380extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
381 struct pipe_inode_info *pipe, size_t len,
382 unsigned int flags);
383
384static inline void tcp_dec_quickack_mode(struct sock *sk,
385 const unsigned int pkts)
386{
387 struct inet_connection_sock *icsk = inet_csk(sk);
388
389 if (icsk->icsk_ack.quick) {
390 if (pkts >= icsk->icsk_ack.quick) {
391 icsk->icsk_ack.quick = 0;
392
393 icsk->icsk_ack.ato = TCP_ATO_MIN;
394 } else
395 icsk->icsk_ack.quick -= pkts;
396 }
397}
398
399#define TCP_ECN_OK 1
400#define TCP_ECN_QUEUE_CWR 2
401#define TCP_ECN_DEMAND_CWR 4
402#define TCP_ECN_SEEN 8
403
404enum tcp_tw_status {
405 TCP_TW_SUCCESS = 0,
406 TCP_TW_RST = 1,
407 TCP_TW_ACK = 2,
408 TCP_TW_SYN = 3
409};
410
411
412extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
413 struct sk_buff *skb,
414 const struct tcphdr *th);
415extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
416 struct request_sock *req,
417 struct request_sock **prev,
418 bool fastopen);
419extern int tcp_child_process(struct sock *parent, struct sock *child,
420 struct sk_buff *skb);
421extern void tcp_enter_loss(struct sock *sk, int how);
422extern void tcp_clear_retrans(struct tcp_sock *tp);
423extern void tcp_update_metrics(struct sock *sk);
424extern void tcp_init_metrics(struct sock *sk);
425extern void tcp_metrics_init(void);
426extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
427extern bool tcp_remember_stamp(struct sock *sk);
428extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
429extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
430extern void tcp_disable_fack(struct tcp_sock *tp);
431extern void tcp_close(struct sock *sk, long timeout);
432extern void tcp_init_sock(struct sock *sk);
433extern unsigned int tcp_poll(struct file * file, struct socket *sock,
434 struct poll_table_struct *wait);
435extern int tcp_getsockopt(struct sock *sk, int level, int optname,
436 char __user *optval, int __user *optlen);
437extern int tcp_setsockopt(struct sock *sk, int level, int optname,
438 char __user *optval, unsigned int optlen);
439extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
440 char __user *optval, int __user *optlen);
441extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
442 char __user *optval, unsigned int optlen);
443extern void tcp_set_keepalive(struct sock *sk, int val);
444extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
445extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
446 size_t len, int nonblock, int flags, int *addr_len);
447extern void tcp_parse_options(const struct sk_buff *skb,
448 struct tcp_options_received *opt_rx,
449 int estab, struct tcp_fastopen_cookie *foc);
450extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
451
452
453
454
455
456extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
457extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
458extern struct sock * tcp_create_openreq_child(struct sock *sk,
459 struct request_sock *req,
460 struct sk_buff *skb);
461extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
462 struct request_sock *req,
463 struct dst_entry *dst);
464extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
465extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
466 int addr_len);
467extern int tcp_connect(struct sock *sk);
468extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
469 struct request_sock *req,
470 struct tcp_fastopen_cookie *foc);
471extern int tcp_disconnect(struct sock *sk, int flags);
472
473void tcp_connect_init(struct sock *sk);
474void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
475int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
476void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
477
478
479extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
480extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
481 u32 cookie);
482extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
483 struct ip_options *opt);
484#ifdef CONFIG_SYN_COOKIES
485extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
486 const struct tcphdr *th, u16 *mssp);
487extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
488 __u16 *mss);
489#else
490static inline __u32 cookie_v4_init_sequence(struct sock *sk,
491 struct sk_buff *skb,
492 __u16 *mss)
493{
494 return 0;
495}
496#endif
497
498extern __u32 cookie_init_timestamp(struct request_sock *req);
499extern bool cookie_check_timestamp(struct tcp_options_received *opt,
500 struct net *net, bool *ecn_ok);
501
502
503extern int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
504 u32 cookie);
505extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
506#ifdef CONFIG_SYN_COOKIES
507extern u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
508 const struct tcphdr *th, u16 *mssp);
509extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
510 __u16 *mss);
511#else
512static inline __u32 cookie_v6_init_sequence(struct sock *sk,
513 struct sk_buff *skb,
514 __u16 *mss)
515{
516 return 0;
517}
518#endif
519
520
521extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
522 int nonagle);
523extern bool tcp_may_send_now(struct sock *sk);
524extern int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
525extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
526extern void tcp_retransmit_timer(struct sock *sk);
527extern void tcp_xmit_retransmit_queue(struct sock *);
528extern void tcp_simple_retransmit(struct sock *);
529extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
530extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
531
532extern void tcp_send_probe0(struct sock *);
533extern void tcp_send_partial(struct sock *);
534extern int tcp_write_wakeup(struct sock *);
535extern void tcp_send_fin(struct sock *sk);
536extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
537extern int tcp_send_synack(struct sock *);
538extern bool tcp_syn_flood_action(struct sock *sk,
539 const struct sk_buff *skb,
540 const char *proto);
541extern void tcp_push_one(struct sock *, unsigned int mss_now);
542extern void tcp_send_ack(struct sock *sk);
543extern void tcp_send_delayed_ack(struct sock *sk);
544extern void tcp_send_loss_probe(struct sock *sk);
545extern bool tcp_schedule_loss_probe(struct sock *sk);
546
547
548extern void tcp_cwnd_application_limited(struct sock *sk);
549extern void tcp_resume_early_retransmit(struct sock *sk);
550extern void tcp_rearm_rto(struct sock *sk);
551extern void tcp_reset(struct sock *sk);
552
553
554extern void tcp_init_xmit_timers(struct sock *);
555static inline void tcp_clear_xmit_timers(struct sock *sk)
556{
557 inet_csk_clear_xmit_timers(sk);
558}
559
560extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
561extern unsigned int tcp_current_mss(struct sock *sk);
562
563
564static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
565{
566 int cutoff;
567
568
569
570
571
572
573
574
575 if (tp->max_window >= 512)
576 cutoff = (tp->max_window >> 1);
577 else
578 cutoff = tp->max_window;
579
580 if (cutoff && pktsize > cutoff)
581 return max_t(int, cutoff, 68U - tp->tcp_header_len);
582 else
583 return pktsize;
584}
585
586
587extern void tcp_get_info(const struct sock *, struct tcp_info *);
588
589
590typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
591 unsigned int, size_t);
592extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
593 sk_read_actor_t recv_actor);
594
595extern void tcp_initialize_rcv_mss(struct sock *sk);
596
597extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
598extern int tcp_mss_to_mtu(struct sock *sk, int mss);
599extern void tcp_mtup_init(struct sock *sk);
600extern void tcp_init_buffer_space(struct sock *sk);
601
602static inline void tcp_bound_rto(const struct sock *sk)
603{
604 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
605 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
606}
607
608static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
609{
610 return (tp->srtt >> 3) + tp->rttvar;
611}
612
613extern void tcp_set_rto(struct sock *sk);
614
615static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
616{
617 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
618 ntohl(TCP_FLAG_ACK) |
619 snd_wnd);
620}
621
622static inline void tcp_fast_path_on(struct tcp_sock *tp)
623{
624 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
625}
626
627static inline void tcp_fast_path_check(struct sock *sk)
628{
629 struct tcp_sock *tp = tcp_sk(sk);
630
631 if (skb_queue_empty(&tp->out_of_order_queue) &&
632 tp->rcv_wnd &&
633 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
634 !tp->urg_data)
635 tcp_fast_path_on(tp);
636}
637
638
639static inline u32 tcp_rto_min(struct sock *sk)
640{
641 const struct dst_entry *dst = __sk_dst_get(sk);
642 u32 rto_min = TCP_RTO_MIN;
643
644 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
645 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
646 return rto_min;
647}
648
649
650
651
652
653static inline u32 tcp_receive_window(const struct tcp_sock *tp)
654{
655 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
656
657 if (win < 0)
658 win = 0;
659 return (u32) win;
660}
661
662
663
664
665
666extern u32 __tcp_select_window(struct sock *sk);
667
668void tcp_send_window_probe(struct sock *sk);
669
670
671
672
673
674
675
676#define tcp_time_stamp ((__u32)(jiffies))
677
678#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
679
680#define TCPHDR_FIN 0x01
681#define TCPHDR_SYN 0x02
682#define TCPHDR_RST 0x04
683#define TCPHDR_PSH 0x08
684#define TCPHDR_ACK 0x10
685#define TCPHDR_URG 0x20
686#define TCPHDR_ECE 0x40
687#define TCPHDR_CWR 0x80
688
689
690
691
692
693
694
695struct tcp_skb_cb {
696 union {
697 struct inet_skb_parm h4;
698#if IS_ENABLED(CONFIG_IPV6)
699 struct inet6_skb_parm h6;
700#endif
701 } header;
702 __u32 seq;
703 __u32 end_seq;
704 __u32 when;
705 __u8 tcp_flags;
706
707 __u8 sacked;
708#define TCPCB_SACKED_ACKED 0x01
709#define TCPCB_SACKED_RETRANS 0x02
710#define TCPCB_LOST 0x04
711#define TCPCB_TAGBITS 0x07
712#define TCPCB_EVER_RETRANS 0x80
713#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
714
715 __u8 ip_dsfield;
716
717 __u32 ack_seq;
718};
719
720#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
721
722
723
724
725
726
727
728static inline void
729TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
730 struct net *net)
731{
732 const struct tcphdr *th = tcp_hdr(skb);
733
734 if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr &&
735 INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
736 inet_rsk(req)->ecn_ok = 1;
737}
738
739
740
741
742static inline int tcp_skb_pcount(const struct sk_buff *skb)
743{
744 return skb_shinfo(skb)->gso_segs;
745}
746
747
748static inline int tcp_skb_mss(const struct sk_buff *skb)
749{
750 return skb_shinfo(skb)->gso_size;
751}
752
753
754enum tcp_ca_event {
755 CA_EVENT_TX_START,
756 CA_EVENT_CWND_RESTART,
757 CA_EVENT_COMPLETE_CWR,
758 CA_EVENT_LOSS,
759 CA_EVENT_FAST_ACK,
760 CA_EVENT_SLOW_ACK,
761};
762
763
764
765
766#define TCP_CA_NAME_MAX 16
767#define TCP_CA_MAX 128
768#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
769
770#define TCP_CONG_NON_RESTRICTED 0x1
771#define TCP_CONG_RTT_STAMP 0x2
772
773struct tcp_congestion_ops {
774 struct list_head list;
775 unsigned long flags;
776
777
778 void (*init)(struct sock *sk);
779
780 void (*release)(struct sock *sk);
781
782
783 u32 (*ssthresh)(struct sock *sk);
784
785 u32 (*min_cwnd)(const struct sock *sk);
786
787 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
788
789 void (*set_state)(struct sock *sk, u8 new_state);
790
791 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
792
793 u32 (*undo_cwnd)(struct sock *sk);
794
795 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
796
797 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
798
799 char name[TCP_CA_NAME_MAX];
800 struct module *owner;
801};
802
803extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
804extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
805
806extern void tcp_init_congestion_control(struct sock *sk);
807extern void tcp_cleanup_congestion_control(struct sock *sk);
808extern int tcp_set_default_congestion_control(const char *name);
809extern void tcp_get_default_congestion_control(char *name);
810extern void tcp_get_available_congestion_control(char *buf, size_t len);
811extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
812extern int tcp_set_allowed_congestion_control(char *allowed);
813extern int tcp_set_congestion_control(struct sock *sk, const char *name);
814extern void tcp_slow_start(struct tcp_sock *tp);
815extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
816
817extern struct tcp_congestion_ops tcp_init_congestion_ops;
818extern u32 tcp_reno_ssthresh(struct sock *sk);
819extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
820extern u32 tcp_reno_min_cwnd(const struct sock *sk);
821extern struct tcp_congestion_ops tcp_reno;
822
823static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
824{
825 struct inet_connection_sock *icsk = inet_csk(sk);
826
827 if (icsk->icsk_ca_ops->set_state)
828 icsk->icsk_ca_ops->set_state(sk, ca_state);
829 icsk->icsk_ca_state = ca_state;
830}
831
832static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
833{
834 const struct inet_connection_sock *icsk = inet_csk(sk);
835
836 if (icsk->icsk_ca_ops->cwnd_event)
837 icsk->icsk_ca_ops->cwnd_event(sk, event);
838}
839
840
841
842
843
844
845
846
847
848static inline int tcp_is_sack(const struct tcp_sock *tp)
849{
850 return tp->rx_opt.sack_ok;
851}
852
853static inline bool tcp_is_reno(const struct tcp_sock *tp)
854{
855 return !tcp_is_sack(tp);
856}
857
858static inline bool tcp_is_fack(const struct tcp_sock *tp)
859{
860 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
861}
862
863static inline void tcp_enable_fack(struct tcp_sock *tp)
864{
865 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
866}
867
868
869
870
871static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
872{
873 tp->do_early_retrans = sysctl_tcp_early_retrans &&
874 sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
875 sysctl_tcp_reordering == 3;
876}
877
878static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
879{
880 tp->do_early_retrans = 0;
881}
882
883static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
884{
885 return tp->sacked_out + tp->lost_out;
886}
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
903{
904 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
905}
906
907#define TCP_INFINITE_SSTHRESH 0x7fffffff
908
909static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
910{
911 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
912}
913
914static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
915{
916 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
917 (1 << inet_csk(sk)->icsk_ca_state);
918}
919
920
921
922
923
924static inline __u32 tcp_current_ssthresh(const struct sock *sk)
925{
926 const struct tcp_sock *tp = tcp_sk(sk);
927
928 if (tcp_in_cwnd_reduction(sk))
929 return tp->snd_ssthresh;
930 else
931 return max(tp->snd_ssthresh,
932 ((tp->snd_cwnd >> 1) +
933 (tp->snd_cwnd >> 2)));
934}
935
936
937#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
938
939extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
940extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
941
942
943
944
945static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
946{
947 return 3;
948}
949
950
951
952
953
954
955
956static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
957{
958 return tp->reordering;
959}
960
961
962static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
963{
964 return tp->snd_una + tp->snd_wnd;
965}
966extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
967
968static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
969 const struct sk_buff *skb)
970{
971 if (skb->len < mss)
972 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
973}
974
975static inline void tcp_check_probe_timer(struct sock *sk)
976{
977 const struct tcp_sock *tp = tcp_sk(sk);
978 const struct inet_connection_sock *icsk = inet_csk(sk);
979
980 if (!tp->packets_out && !icsk->icsk_pending)
981 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
982 icsk->icsk_rto, TCP_RTO_MAX);
983}
984
985static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
986{
987 tp->snd_wl1 = seq;
988}
989
990static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
991{
992 tp->snd_wl1 = seq;
993}
994
995
996
997
998static inline __sum16 tcp_v4_check(int len, __be32 saddr,
999 __be32 daddr, __wsum base)
1000{
1001 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1002}
1003
1004static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1005{
1006 return __skb_checksum_complete(skb);
1007}
1008
1009static inline bool tcp_checksum_complete(struct sk_buff *skb)
1010{
1011 return !skb_csum_unnecessary(skb) &&
1012 __tcp_checksum_complete(skb);
1013}
1014
1015
1016
1017static inline void tcp_prequeue_init(struct tcp_sock *tp)
1018{
1019 tp->ucopy.task = NULL;
1020 tp->ucopy.len = 0;
1021 tp->ucopy.memory = 0;
1022 skb_queue_head_init(&tp->ucopy.prequeue);
1023#ifdef CONFIG_NET_DMA
1024 tp->ucopy.dma_chan = NULL;
1025 tp->ucopy.wakeup = 0;
1026 tp->ucopy.pinned_list = NULL;
1027 tp->ucopy.dma_cookie = 0;
1028#endif
1029}
1030
1031extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1032
1033#undef STATE_TRACE
1034
1035#ifdef STATE_TRACE
1036static const char *statename[]={
1037 "Unused","Established","Syn Sent","Syn Recv",
1038 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1039 "Close Wait","Last ACK","Listen","Closing"
1040};
1041#endif
1042extern void tcp_set_state(struct sock *sk, int state);
1043
1044extern void tcp_done(struct sock *sk);
1045
1046static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1047{
1048 rx_opt->dsack = 0;
1049 rx_opt->num_sacks = 0;
1050}
1051
1052extern u32 tcp_default_init_rwnd(u32 mss);
1053
1054
1055extern void tcp_select_initial_window(int __space, __u32 mss,
1056 __u32 *rcv_wnd, __u32 *window_clamp,
1057 int wscale_ok, __u8 *rcv_wscale,
1058 __u32 init_rcv_wnd);
1059
1060static inline int tcp_win_from_space(int space)
1061{
1062 return sysctl_tcp_adv_win_scale<=0 ?
1063 (space>>(-sysctl_tcp_adv_win_scale)) :
1064 space - (space>>sysctl_tcp_adv_win_scale);
1065}
1066
1067
1068static inline int tcp_space(const struct sock *sk)
1069{
1070 return tcp_win_from_space(sk->sk_rcvbuf -
1071 atomic_read(&sk->sk_rmem_alloc));
1072}
1073
1074static inline int tcp_full_space(const struct sock *sk)
1075{
1076 return tcp_win_from_space(sk->sk_rcvbuf);
1077}
1078
1079static inline void tcp_openreq_init(struct request_sock *req,
1080 struct tcp_options_received *rx_opt,
1081 struct sk_buff *skb)
1082{
1083 struct inet_request_sock *ireq = inet_rsk(req);
1084
1085 req->rcv_wnd = 0;
1086 req->cookie_ts = 0;
1087 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1088 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1089 tcp_rsk(req)->snt_synack = 0;
1090 req->mss = rx_opt->mss_clamp;
1091 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1092 ireq->tstamp_ok = rx_opt->tstamp_ok;
1093 ireq->sack_ok = rx_opt->sack_ok;
1094 ireq->snd_wscale = rx_opt->snd_wscale;
1095 ireq->wscale_ok = rx_opt->wscale_ok;
1096 ireq->acked = 0;
1097 ireq->ecn_ok = 0;
1098 ireq->rmt_port = tcp_hdr(skb)->source;
1099 ireq->loc_port = tcp_hdr(skb)->dest;
1100}
1101
1102extern void tcp_enter_memory_pressure(struct sock *sk);
1103
1104static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1105{
1106 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1107}
1108
1109static inline int keepalive_time_when(const struct tcp_sock *tp)
1110{
1111 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1112}
1113
1114static inline int keepalive_probes(const struct tcp_sock *tp)
1115{
1116 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1117}
1118
1119static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1120{
1121 const struct inet_connection_sock *icsk = &tp->inet_conn;
1122
1123 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1124 tcp_time_stamp - tp->rcv_tstamp);
1125}
1126
1127static inline int tcp_fin_time(const struct sock *sk)
1128{
1129 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1130 const int rto = inet_csk(sk)->icsk_rto;
1131
1132 if (fin_timeout < (rto << 2) - (rto >> 1))
1133 fin_timeout = (rto << 2) - (rto >> 1);
1134
1135 return fin_timeout;
1136}
1137
1138static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1139 int paws_win)
1140{
1141 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1142 return true;
1143 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1144 return true;
1145
1146
1147
1148
1149
1150 if (!rx_opt->ts_recent)
1151 return true;
1152 return false;
1153}
1154
1155static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1156 int rst)
1157{
1158 if (tcp_paws_check(rx_opt, 0))
1159 return false;
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1174 return false;
1175 return true;
1176}
1177
1178static inline void tcp_mib_init(struct net *net)
1179{
1180
1181 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1182 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1183 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1184 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1185}
1186
1187
1188static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1189{
1190 tp->lost_skb_hint = NULL;
1191}
1192
1193static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1194{
1195 tcp_clear_retrans_hints_partial(tp);
1196 tp->retransmit_skb_hint = NULL;
1197}
1198
1199
1200struct crypto_hash;
1201
1202union tcp_md5_addr {
1203 struct in_addr a4;
1204#if IS_ENABLED(CONFIG_IPV6)
1205 struct in6_addr a6;
1206#endif
1207};
1208
1209
1210struct tcp_md5sig_key {
1211 struct hlist_node node;
1212 u8 keylen;
1213 u8 family;
1214 union tcp_md5_addr addr;
1215 u8 key[TCP_MD5SIG_MAXKEYLEN];
1216 struct rcu_head rcu;
1217};
1218
1219
1220struct tcp_md5sig_info {
1221 struct hlist_head head;
1222 struct rcu_head rcu;
1223};
1224
1225
1226struct tcp4_pseudohdr {
1227 __be32 saddr;
1228 __be32 daddr;
1229 __u8 pad;
1230 __u8 protocol;
1231 __be16 len;
1232};
1233
1234struct tcp6_pseudohdr {
1235 struct in6_addr saddr;
1236 struct in6_addr daddr;
1237 __be32 len;
1238 __be32 protocol;
1239};
1240
1241union tcp_md5sum_block {
1242 struct tcp4_pseudohdr ip4;
1243#if IS_ENABLED(CONFIG_IPV6)
1244 struct tcp6_pseudohdr ip6;
1245#endif
1246};
1247
1248
1249struct tcp_md5sig_pool {
1250 struct hash_desc md5_desc;
1251 union tcp_md5sum_block md5_blk;
1252};
1253
1254
1255extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1256 const struct sock *sk,
1257 const struct request_sock *req,
1258 const struct sk_buff *skb);
1259extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1260 int family, const u8 *newkey,
1261 u8 newkeylen, gfp_t gfp);
1262extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1263 int family);
1264extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1265 struct sock *addr_sk);
1266
1267#ifdef CONFIG_TCP_MD5SIG
1268extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1269 const union tcp_md5_addr *addr, int family);
1270#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1271#else
1272static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1273 const union tcp_md5_addr *addr,
1274 int family)
1275{
1276 return NULL;
1277}
1278#define tcp_twsk_md5_key(twsk) NULL
1279#endif
1280
1281extern bool tcp_alloc_md5sig_pool(void);
1282
1283extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1284static inline void tcp_put_md5sig_pool(void)
1285{
1286 local_bh_enable();
1287}
1288
1289extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1290extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1291 unsigned int header_len);
1292extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1293 const struct tcp_md5sig_key *key);
1294
1295
1296extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1297 struct tcp_fastopen_cookie *cookie,
1298 int *syn_loss, unsigned long *last_syn_loss);
1299extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1300 struct tcp_fastopen_cookie *cookie,
1301 bool syn_lost);
1302struct tcp_fastopen_request {
1303
1304 struct tcp_fastopen_cookie cookie;
1305 struct msghdr *data;
1306 u16 copied;
1307};
1308void tcp_free_fastopen_req(struct tcp_sock *tp);
1309
1310extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1311int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1312extern void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
1313 struct tcp_fastopen_cookie *foc);
1314
1315#define TCP_FASTOPEN_KEY_LENGTH 16
1316
1317
1318struct tcp_fastopen_context {
1319 struct crypto_cipher *tfm;
1320 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
1321 struct rcu_head rcu;
1322};
1323
1324
1325static inline void tcp_write_queue_purge(struct sock *sk)
1326{
1327 struct sk_buff *skb;
1328
1329 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1330 sk_wmem_free_skb(sk, skb);
1331 sk_mem_reclaim(sk);
1332 tcp_clear_all_retrans_hints(tcp_sk(sk));
1333}
1334
1335static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1336{
1337 return skb_peek(&sk->sk_write_queue);
1338}
1339
1340static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1341{
1342 return skb_peek_tail(&sk->sk_write_queue);
1343}
1344
1345static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1346 const struct sk_buff *skb)
1347{
1348 return skb_queue_next(&sk->sk_write_queue, skb);
1349}
1350
1351static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1352 const struct sk_buff *skb)
1353{
1354 return skb_queue_prev(&sk->sk_write_queue, skb);
1355}
1356
1357#define tcp_for_write_queue(skb, sk) \
1358 skb_queue_walk(&(sk)->sk_write_queue, skb)
1359
1360#define tcp_for_write_queue_from(skb, sk) \
1361 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1362
1363#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1364 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1365
1366static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1367{
1368 return sk->sk_send_head;
1369}
1370
1371static inline bool tcp_skb_is_last(const struct sock *sk,
1372 const struct sk_buff *skb)
1373{
1374 return skb_queue_is_last(&sk->sk_write_queue, skb);
1375}
1376
1377static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1378{
1379 if (tcp_skb_is_last(sk, skb))
1380 sk->sk_send_head = NULL;
1381 else
1382 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1383}
1384
1385static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1386{
1387 if (sk->sk_send_head == skb_unlinked)
1388 sk->sk_send_head = NULL;
1389}
1390
1391static inline void tcp_init_send_head(struct sock *sk)
1392{
1393 sk->sk_send_head = NULL;
1394}
1395
1396static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1397{
1398 __skb_queue_tail(&sk->sk_write_queue, skb);
1399}
1400
1401static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1402{
1403 __tcp_add_write_queue_tail(sk, skb);
1404
1405
1406 if (sk->sk_send_head == NULL) {
1407 sk->sk_send_head = skb;
1408
1409 if (tcp_sk(sk)->highest_sack == NULL)
1410 tcp_sk(sk)->highest_sack = skb;
1411 }
1412}
1413
1414static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1415{
1416 __skb_queue_head(&sk->sk_write_queue, skb);
1417}
1418
1419
1420static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1421 struct sk_buff *buff,
1422 struct sock *sk)
1423{
1424 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1425}
1426
1427
1428static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1429 struct sk_buff *skb,
1430 struct sock *sk)
1431{
1432 __skb_queue_before(&sk->sk_write_queue, skb, new);
1433
1434 if (sk->sk_send_head == skb)
1435 sk->sk_send_head = new;
1436}
1437
1438static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1439{
1440 __skb_unlink(skb, &sk->sk_write_queue);
1441}
1442
1443static inline bool tcp_write_queue_empty(struct sock *sk)
1444{
1445 return skb_queue_empty(&sk->sk_write_queue);
1446}
1447
1448static inline void tcp_push_pending_frames(struct sock *sk)
1449{
1450 if (tcp_send_head(sk)) {
1451 struct tcp_sock *tp = tcp_sk(sk);
1452
1453 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1454 }
1455}
1456
1457
1458
1459
1460
1461static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1462{
1463 if (!tp->sacked_out)
1464 return tp->snd_una;
1465
1466 if (tp->highest_sack == NULL)
1467 return tp->snd_nxt;
1468
1469 return TCP_SKB_CB(tp->highest_sack)->seq;
1470}
1471
1472static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1473{
1474 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1475 tcp_write_queue_next(sk, skb);
1476}
1477
1478static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1479{
1480 return tcp_sk(sk)->highest_sack;
1481}
1482
1483static inline void tcp_highest_sack_reset(struct sock *sk)
1484{
1485 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1486}
1487
1488
1489static inline void tcp_highest_sack_combine(struct sock *sk,
1490 struct sk_buff *old,
1491 struct sk_buff *new)
1492{
1493 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1494 tcp_sk(sk)->highest_sack = new;
1495}
1496
1497
1498
1499
1500static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1501{
1502 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1503}
1504
1505
1506enum tcp_seq_states {
1507 TCP_SEQ_STATE_LISTENING,
1508 TCP_SEQ_STATE_OPENREQ,
1509 TCP_SEQ_STATE_ESTABLISHED,
1510 TCP_SEQ_STATE_TIME_WAIT,
1511};
1512
1513int tcp_seq_open(struct inode *inode, struct file *file);
1514
1515struct tcp_seq_afinfo {
1516 char *name;
1517 sa_family_t family;
1518 const struct file_operations *seq_fops;
1519 struct seq_operations seq_ops;
1520};
1521
1522struct tcp_iter_state {
1523 struct seq_net_private p;
1524 sa_family_t family;
1525 enum tcp_seq_states state;
1526 struct sock *syn_wait_sk;
1527 int bucket, offset, sbucket, num;
1528 kuid_t uid;
1529 loff_t last_pos;
1530};
1531
1532extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1533extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1534
1535extern struct request_sock_ops tcp_request_sock_ops;
1536extern struct request_sock_ops tcp6_request_sock_ops;
1537
1538extern void tcp_v4_destroy_sock(struct sock *sk);
1539
1540extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
1541 netdev_features_t features);
1542extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1543 struct sk_buff *skb);
1544extern int tcp_gro_complete(struct sk_buff *skb);
1545
1546extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
1547 __be32 daddr);
1548
1549static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1550{
1551 return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
1552}
1553
1554static inline bool tcp_stream_memory_free(const struct sock *sk)
1555{
1556 const struct tcp_sock *tp = tcp_sk(sk);
1557 u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1558
1559 return notsent_bytes < tcp_notsent_lowat(tp);
1560}
1561
1562#ifdef CONFIG_PROC_FS
1563extern int tcp4_proc_init(void);
1564extern void tcp4_proc_exit(void);
1565#endif
1566
1567
1568struct tcp_sock_af_ops {
1569#ifdef CONFIG_TCP_MD5SIG
1570 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1571 struct sock *addr_sk);
1572 int (*calc_md5_hash) (char *location,
1573 struct tcp_md5sig_key *md5,
1574 const struct sock *sk,
1575 const struct request_sock *req,
1576 const struct sk_buff *skb);
1577 int (*md5_parse) (struct sock *sk,
1578 char __user *optval,
1579 int optlen);
1580#endif
1581};
1582
1583struct tcp_request_sock_ops {
1584#ifdef CONFIG_TCP_MD5SIG
1585 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1586 struct request_sock *req);
1587 int (*calc_md5_hash) (char *location,
1588 struct tcp_md5sig_key *md5,
1589 const struct sock *sk,
1590 const struct request_sock *req,
1591 const struct sk_buff *skb);
1592#endif
1593};
1594
1595extern int tcpv4_offload_init(void);
1596
1597extern void tcp_v4_init(void);
1598extern void tcp_init(void);
1599
1600#endif
1601