1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef _TCP_H
19#define _TCP_H
20
21#define FASTRETRANS_DEBUG 1
22
23#include <linux/list.h>
24#include <linux/tcp.h>
25#include <linux/bug.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
29#include <linux/skbuff.h>
30#include <linux/cryptohash.h>
31#include <linux/kref.h>
32#include <linux/ktime.h>
33
34#include <net/inet_connection_sock.h>
35#include <net/inet_timewait_sock.h>
36#include <net/inet_hashtables.h>
37#include <net/checksum.h>
38#include <net/request_sock.h>
39#include <net/sock.h>
40#include <net/snmp.h>
41#include <net/ip.h>
42#include <net/tcp_states.h>
43#include <net/inet_ecn.h>
44#include <net/dst.h>
45
46#include <linux/seq_file.h>
47#include <linux/memcontrol.h>
48
49extern struct inet_hashinfo tcp_hashinfo;
50
51extern struct percpu_counter tcp_orphan_count;
52void tcp_time_wait(struct sock *sk, int state, int timeo);
53
54#define MAX_TCP_HEADER (128 + MAX_HEADER)
55#define MAX_TCP_OPTION_SPACE 40
56
57
58
59
60
61#define MAX_TCP_WINDOW 32767U
62
63
64#define TCP_MIN_MSS 88U
65
66
67#define TCP_BASE_MSS 1024
68
69
70#define TCP_PROBE_INTERVAL 600
71
72
73#define TCP_PROBE_THRESHOLD 8
74
75
76#define TCP_FASTRETRANS_THRESH 3
77
78
79#define TCP_MAX_QUICKACKS 16U
80
81
82#define TCP_URG_VALID 0x0100
83#define TCP_URG_NOTYET 0x0200
84#define TCP_URG_READ 0x0400
85
86#define TCP_RETR1 3
87
88
89
90
91
92
93#define TCP_RETR2 15
94
95
96
97
98
99
100#define TCP_SYN_RETRIES 6
101
102
103
104
105
106
107
108
109#define TCP_SYNACK_RETRIES 5
110
111
112
113
114
115
116#define TCP_TIMEWAIT_LEN (60*HZ)
117
118#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
119
120
121
122
123
124
125#define TCP_DELACK_MAX ((unsigned)(HZ/5))
126#if HZ >= 100
127#define TCP_DELACK_MIN ((unsigned)(HZ/25))
128#define TCP_ATO_MIN ((unsigned)(HZ/25))
129#else
130#define TCP_DELACK_MIN 4U
131#define TCP_ATO_MIN 4U
132#endif
133#define TCP_RTO_MAX ((unsigned)(120*HZ))
134#define TCP_RTO_MIN ((unsigned)(HZ/5))
135#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))
136#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))
137
138
139
140
141
142
143#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U))
144
145
146
147#define TCP_KEEPALIVE_TIME (120*60*HZ)
148#define TCP_KEEPALIVE_PROBES 9
149#define TCP_KEEPALIVE_INTVL (75*HZ)
150
151#define MAX_TCP_KEEPIDLE 32767
152#define MAX_TCP_KEEPINTVL 32767
153#define MAX_TCP_KEEPCNT 127
154#define MAX_TCP_SYNCNT 127
155
156#define TCP_SYNQ_INTERVAL (HZ/5)
157
158#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
159#define TCP_PAWS_MSL 60
160
161
162
163
164
165#define TCP_PAWS_WINDOW 1
166
167
168
169
170
171
172
173#define TCPOPT_NOP 1
174#define TCPOPT_EOL 0
175#define TCPOPT_MSS 2
176#define TCPOPT_WINDOW 3
177#define TCPOPT_SACK_PERM 4
178#define TCPOPT_SACK 5
179#define TCPOPT_TIMESTAMP 8
180#define TCPOPT_MD5SIG 19
181#define TCPOPT_FASTOPEN 34
182#define TCPOPT_EXP 254
183
184
185
186#define TCPOPT_FASTOPEN_MAGIC 0xF989
187
188
189
190
191
192#define TCPOLEN_MSS 4
193#define TCPOLEN_WINDOW 3
194#define TCPOLEN_SACK_PERM 2
195#define TCPOLEN_TIMESTAMP 10
196#define TCPOLEN_MD5SIG 18
197#define TCPOLEN_FASTOPEN_BASE 2
198#define TCPOLEN_EXP_FASTOPEN_BASE 4
199
200
201#define TCPOLEN_TSTAMP_ALIGNED 12
202#define TCPOLEN_WSCALE_ALIGNED 4
203#define TCPOLEN_SACKPERM_ALIGNED 4
204#define TCPOLEN_SACK_BASE 2
205#define TCPOLEN_SACK_BASE_ALIGNED 4
206#define TCPOLEN_SACK_PERBLOCK 8
207#define TCPOLEN_MD5SIG_ALIGNED 20
208#define TCPOLEN_MSS_ALIGNED 4
209
210
211#define TCP_NAGLE_OFF 1
212#define TCP_NAGLE_CORK 2
213#define TCP_NAGLE_PUSH 4
214
215
216#define TCP_THIN_LINEAR_RETRIES 6
217
218
219#define TCP_INIT_CWND 10
220
221
222#define TFO_CLIENT_ENABLE 1
223#define TFO_SERVER_ENABLE 2
224#define TFO_CLIENT_NO_COOKIE 4
225
226
227#define TFO_SERVER_COOKIE_NOT_REQD 0x200
228
229
230
231
232#define TFO_SERVER_WO_SOCKOPT1 0x400
233
234extern struct inet_timewait_death_row tcp_death_row;
235
236
237extern int sysctl_tcp_timestamps;
238extern int sysctl_tcp_window_scaling;
239extern int sysctl_tcp_sack;
240extern int sysctl_tcp_fastopen;
241extern int sysctl_tcp_retrans_collapse;
242extern int sysctl_tcp_stdurg;
243extern int sysctl_tcp_rfc1337;
244extern int sysctl_tcp_abort_on_overflow;
245extern int sysctl_tcp_max_orphans;
246extern int sysctl_tcp_fack;
247extern int sysctl_tcp_reordering;
248extern int sysctl_tcp_max_reordering;
249extern int sysctl_tcp_dsack;
250extern long sysctl_tcp_mem[3];
251extern int sysctl_tcp_wmem[3];
252extern int sysctl_tcp_rmem[3];
253extern int sysctl_tcp_app_win;
254extern int sysctl_tcp_adv_win_scale;
255extern int sysctl_tcp_frto;
256extern int sysctl_tcp_low_latency;
257extern int sysctl_tcp_nometrics_save;
258extern int sysctl_tcp_moderate_rcvbuf;
259extern int sysctl_tcp_tso_win_divisor;
260extern int sysctl_tcp_workaround_signed_windows;
261extern int sysctl_tcp_slow_start_after_idle;
262extern int sysctl_tcp_thin_linear_timeouts;
263extern int sysctl_tcp_thin_dupack;
264extern int sysctl_tcp_early_retrans;
265extern int sysctl_tcp_limit_output_bytes;
266extern int sysctl_tcp_challenge_ack_limit;
267extern int sysctl_tcp_min_tso_segs;
268extern int sysctl_tcp_min_rtt_wlen;
269extern int sysctl_tcp_autocorking;
270extern int sysctl_tcp_invalid_ratelimit;
271extern int sysctl_tcp_pacing_ss_ratio;
272extern int sysctl_tcp_pacing_ca_ratio;
273
274extern atomic_long_t tcp_memory_allocated;
275extern struct percpu_counter tcp_sockets_allocated;
276extern int tcp_memory_pressure;
277
278
279static inline bool tcp_under_memory_pressure(const struct sock *sk)
280{
281 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
282 mem_cgroup_under_socket_pressure(sk->sk_memcg))
283 return true;
284
285 return tcp_memory_pressure;
286}
287
288
289
290
291
292static inline bool before(__u32 seq1, __u32 seq2)
293{
294 return (__s32)(seq1-seq2) < 0;
295}
296#define after(seq2, seq1) before(seq1, seq2)
297
298
299static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
300{
301 return seq3 - seq2 >= seq1 - seq2;
302}
303
304static inline bool tcp_out_of_memory(struct sock *sk)
305{
306 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
307 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
308 return true;
309 return false;
310}
311
312void sk_forced_mem_schedule(struct sock *sk, int size);
313
314static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
315{
316 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
317 int orphans = percpu_counter_read_positive(ocp);
318
319 if (orphans << shift > sysctl_tcp_max_orphans) {
320 orphans = percpu_counter_sum_positive(ocp);
321 if (orphans << shift > sysctl_tcp_max_orphans)
322 return true;
323 }
324 return false;
325}
326
327bool tcp_check_oom(struct sock *sk, int shift);
328
329
330extern struct proto tcp_prot;
331
332#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
333#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
334#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
335#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
336
337void tcp_tasklet_init(void);
338
339void tcp_v4_err(struct sk_buff *skb, u32);
340
341void tcp_shutdown(struct sock *sk, int how);
342
343void tcp_v4_early_demux(struct sk_buff *skb);
344int tcp_v4_rcv(struct sk_buff *skb);
345
346int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
347int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
348int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
349 int flags);
350void tcp_release_cb(struct sock *sk);
351void tcp_wfree(struct sk_buff *skb);
352void tcp_write_timer_handler(struct sock *sk);
353void tcp_delack_timer_handler(struct sock *sk);
354int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
355int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
356void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
357 const struct tcphdr *th, unsigned int len);
358void tcp_rcv_space_adjust(struct sock *sk);
359int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
360void tcp_twsk_destructor(struct sock *sk);
361ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
362 struct pipe_inode_info *pipe, size_t len,
363 unsigned int flags);
364
365static inline void tcp_dec_quickack_mode(struct sock *sk,
366 const unsigned int pkts)
367{
368 struct inet_connection_sock *icsk = inet_csk(sk);
369
370 if (icsk->icsk_ack.quick) {
371 if (pkts >= icsk->icsk_ack.quick) {
372 icsk->icsk_ack.quick = 0;
373
374 icsk->icsk_ack.ato = TCP_ATO_MIN;
375 } else
376 icsk->icsk_ack.quick -= pkts;
377 }
378}
379
380#define TCP_ECN_OK 1
381#define TCP_ECN_QUEUE_CWR 2
382#define TCP_ECN_DEMAND_CWR 4
383#define TCP_ECN_SEEN 8
384
385enum tcp_tw_status {
386 TCP_TW_SUCCESS = 0,
387 TCP_TW_RST = 1,
388 TCP_TW_ACK = 2,
389 TCP_TW_SYN = 3
390};
391
392
393enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
394 struct sk_buff *skb,
395 const struct tcphdr *th);
396struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
397 struct request_sock *req, bool fastopen);
398int tcp_child_process(struct sock *parent, struct sock *child,
399 struct sk_buff *skb);
400void tcp_enter_loss(struct sock *sk);
401void tcp_clear_retrans(struct tcp_sock *tp);
402void tcp_update_metrics(struct sock *sk);
403void tcp_init_metrics(struct sock *sk);
404void tcp_metrics_init(void);
405bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
406 bool paws_check, bool timestamps);
407bool tcp_remember_stamp(struct sock *sk);
408bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
409void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
410void tcp_disable_fack(struct tcp_sock *tp);
411void tcp_close(struct sock *sk, long timeout);
412void tcp_init_sock(struct sock *sk);
413unsigned int tcp_poll(struct file *file, struct socket *sock,
414 struct poll_table_struct *wait);
415int tcp_getsockopt(struct sock *sk, int level, int optname,
416 char __user *optval, int __user *optlen);
417int tcp_setsockopt(struct sock *sk, int level, int optname,
418 char __user *optval, unsigned int optlen);
419int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
420 char __user *optval, int __user *optlen);
421int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
422 char __user *optval, unsigned int optlen);
423void tcp_set_keepalive(struct sock *sk, int val);
424void tcp_syn_ack_timeout(const struct request_sock *req);
425int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
426 int flags, int *addr_len);
427void tcp_parse_options(const struct sk_buff *skb,
428 struct tcp_options_received *opt_rx,
429 int estab, struct tcp_fastopen_cookie *foc);
430const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
431
432
433
434
435
436void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
437void tcp_v4_mtu_reduced(struct sock *sk);
438void tcp_req_err(struct sock *sk, u32 seq, bool abort);
439int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
440struct sock *tcp_create_openreq_child(const struct sock *sk,
441 struct request_sock *req,
442 struct sk_buff *skb);
443void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
444struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
445 struct request_sock *req,
446 struct dst_entry *dst,
447 struct request_sock *req_unhash,
448 bool *own_req);
449int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
450int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
451int tcp_connect(struct sock *sk);
452enum tcp_synack_type {
453 TCP_SYNACK_NORMAL,
454 TCP_SYNACK_FASTOPEN,
455 TCP_SYNACK_COOKIE,
456};
457struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
458 struct request_sock *req,
459 struct tcp_fastopen_cookie *foc,
460 enum tcp_synack_type synack_type);
461int tcp_disconnect(struct sock *sk, int flags);
462
463void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
464int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
465void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
466
467
468struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
469 struct request_sock *req,
470 struct dst_entry *dst);
471int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
472 u32 cookie);
473struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
474#ifdef CONFIG_SYN_COOKIES
475
476
477
478
479
480
481
482
483#define MAX_SYNCOOKIE_AGE 2
484#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
485#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
486
487
488
489
490
491static inline void tcp_synq_overflow(const struct sock *sk)
492{
493 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
494 unsigned long now = jiffies;
495
496 if (time_after(now, last_overflow + HZ))
497 tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
498}
499
500
501static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
502{
503 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
504
505 return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
506}
507
508static inline u32 tcp_cookie_time(void)
509{
510 u64 val = get_jiffies_64();
511
512 do_div(val, TCP_SYNCOOKIE_PERIOD);
513 return val;
514}
515
516u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
517 u16 *mssp);
518__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
519__u32 cookie_init_timestamp(struct request_sock *req);
520bool cookie_timestamp_decode(struct tcp_options_received *opt);
521bool cookie_ecn_ok(const struct tcp_options_received *opt,
522 const struct net *net, const struct dst_entry *dst);
523
524
525int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
526 u32 cookie);
527struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
528
529u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
530 const struct tcphdr *th, u16 *mssp);
531__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
532#endif
533
534
535u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
536 int min_tso_segs);
537void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
538 int nonagle);
539bool tcp_may_send_now(struct sock *sk);
540int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
541int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
542void tcp_retransmit_timer(struct sock *sk);
543void tcp_xmit_retransmit_queue(struct sock *);
544void tcp_simple_retransmit(struct sock *);
545int tcp_trim_head(struct sock *, struct sk_buff *, u32);
546int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
547
548void tcp_send_probe0(struct sock *);
549void tcp_send_partial(struct sock *);
550int tcp_write_wakeup(struct sock *, int mib);
551void tcp_send_fin(struct sock *sk);
552void tcp_send_active_reset(struct sock *sk, gfp_t priority);
553int tcp_send_synack(struct sock *);
554void tcp_push_one(struct sock *, unsigned int mss_now);
555void tcp_send_ack(struct sock *sk);
556void tcp_send_delayed_ack(struct sock *sk);
557void tcp_send_loss_probe(struct sock *sk);
558bool tcp_schedule_loss_probe(struct sock *sk);
559void tcp_skb_collapse_tstamp(struct sk_buff *skb,
560 const struct sk_buff *next_skb);
561
562
563void tcp_resume_early_retransmit(struct sock *sk);
564void tcp_rearm_rto(struct sock *sk);
565void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
566void tcp_reset(struct sock *sk);
567void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
568void tcp_fin(struct sock *sk);
569
570
571void tcp_init_xmit_timers(struct sock *);
572static inline void tcp_clear_xmit_timers(struct sock *sk)
573{
574 inet_csk_clear_xmit_timers(sk);
575}
576
577unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
578unsigned int tcp_current_mss(struct sock *sk);
579
580
581static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
582{
583 int cutoff;
584
585
586
587
588
589
590
591
592 if (tp->max_window > TCP_MSS_DEFAULT)
593 cutoff = (tp->max_window >> 1);
594 else
595 cutoff = tp->max_window;
596
597 if (cutoff && pktsize > cutoff)
598 return max_t(int, cutoff, 68U - tp->tcp_header_len);
599 else
600 return pktsize;
601}
602
603
604void tcp_get_info(struct sock *, struct tcp_info *);
605
606
607int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
608 sk_read_actor_t recv_actor);
609
610void tcp_initialize_rcv_mss(struct sock *sk);
611
612int tcp_mtu_to_mss(struct sock *sk, int pmtu);
613int tcp_mss_to_mtu(struct sock *sk, int mss);
614void tcp_mtup_init(struct sock *sk);
615void tcp_init_buffer_space(struct sock *sk);
616
617static inline void tcp_bound_rto(const struct sock *sk)
618{
619 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
620 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
621}
622
623static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
624{
625 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
626}
627
628static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
629{
630 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
631 ntohl(TCP_FLAG_ACK) |
632 snd_wnd);
633}
634
635static inline void tcp_fast_path_on(struct tcp_sock *tp)
636{
637 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
638}
639
640static inline void tcp_fast_path_check(struct sock *sk)
641{
642 struct tcp_sock *tp = tcp_sk(sk);
643
644 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
645 tp->rcv_wnd &&
646 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
647 !tp->urg_data)
648 tcp_fast_path_on(tp);
649}
650
651
652static inline u32 tcp_rto_min(struct sock *sk)
653{
654 const struct dst_entry *dst = __sk_dst_get(sk);
655 u32 rto_min = TCP_RTO_MIN;
656
657 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
658 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
659 return rto_min;
660}
661
662static inline u32 tcp_rto_min_us(struct sock *sk)
663{
664 return jiffies_to_usecs(tcp_rto_min(sk));
665}
666
667static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
668{
669 return dst_metric_locked(dst, RTAX_CC_ALGO);
670}
671
672
673static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
674{
675 return minmax_get(&tp->rtt_min);
676}
677
678
679
680
681
682static inline u32 tcp_receive_window(const struct tcp_sock *tp)
683{
684 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
685
686 if (win < 0)
687 win = 0;
688 return (u32) win;
689}
690
691
692
693
694
695u32 __tcp_select_window(struct sock *sk);
696
697void tcp_send_window_probe(struct sock *sk);
698
699
700
701
702
703
704
705#define tcp_time_stamp ((__u32)(jiffies))
706
707static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
708{
709 return skb->skb_mstamp.stamp_jiffies;
710}
711
712
713#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
714
715#define TCPHDR_FIN 0x01
716#define TCPHDR_SYN 0x02
717#define TCPHDR_RST 0x04
718#define TCPHDR_PSH 0x08
719#define TCPHDR_ACK 0x10
720#define TCPHDR_URG 0x20
721#define TCPHDR_ECE 0x40
722#define TCPHDR_CWR 0x80
723
724#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
725
726
727
728
729
730
731
732struct tcp_skb_cb {
733 __u32 seq;
734 __u32 end_seq;
735 union {
736
737
738
739
740
741
742 __u32 tcp_tw_isn;
743 struct {
744 u16 tcp_gso_segs;
745 u16 tcp_gso_size;
746 };
747 };
748 __u8 tcp_flags;
749
750 __u8 sacked;
751#define TCPCB_SACKED_ACKED 0x01
752#define TCPCB_SACKED_RETRANS 0x02
753#define TCPCB_LOST 0x04
754#define TCPCB_TAGBITS 0x07
755#define TCPCB_REPAIRED 0x10
756#define TCPCB_EVER_RETRANS 0x80
757#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
758 TCPCB_REPAIRED)
759
760 __u8 ip_dsfield;
761 __u8 txstamp_ack:1,
762 eor:1,
763 unused:6;
764 __u32 ack_seq;
765 union {
766 struct {
767
768 __u32 in_flight:30,
769 is_app_limited:1,
770 unused:1;
771
772 __u32 delivered;
773
774 struct skb_mstamp first_tx_mstamp;
775
776 struct skb_mstamp delivered_mstamp;
777 } tx;
778 union {
779 struct inet_skb_parm h4;
780#if IS_ENABLED(CONFIG_IPV6)
781 struct inet6_skb_parm h6;
782#endif
783 } header;
784 };
785};
786
787#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
788
789
790#if IS_ENABLED(CONFIG_IPV6)
791
792
793
794static inline int tcp_v6_iif(const struct sk_buff *skb)
795{
796 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
797
798 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
799}
800#endif
801
802
803static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
804{
805#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
806 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
807 skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
808 return true;
809#endif
810 return false;
811}
812
813
814
815
816static inline int tcp_skb_pcount(const struct sk_buff *skb)
817{
818 return TCP_SKB_CB(skb)->tcp_gso_segs;
819}
820
821static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
822{
823 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
824}
825
826static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
827{
828 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
829}
830
831
832static inline int tcp_skb_mss(const struct sk_buff *skb)
833{
834 return TCP_SKB_CB(skb)->tcp_gso_size;
835}
836
837static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
838{
839 return likely(!TCP_SKB_CB(skb)->eor);
840}
841
842
843enum tcp_ca_event {
844 CA_EVENT_TX_START,
845 CA_EVENT_CWND_RESTART,
846 CA_EVENT_COMPLETE_CWR,
847 CA_EVENT_LOSS,
848 CA_EVENT_ECN_NO_CE,
849 CA_EVENT_ECN_IS_CE,
850 CA_EVENT_DELAYED_ACK,
851 CA_EVENT_NON_DELAYED_ACK,
852};
853
854
855enum tcp_ca_ack_event_flags {
856 CA_ACK_SLOWPATH = (1 << 0),
857 CA_ACK_WIN_UPDATE = (1 << 1),
858 CA_ACK_ECE = (1 << 2),
859};
860
861
862
863
864#define TCP_CA_NAME_MAX 16
865#define TCP_CA_MAX 128
866#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
867
868#define TCP_CA_UNSPEC 0
869
870
871#define TCP_CONG_NON_RESTRICTED 0x1
872
873#define TCP_CONG_NEEDS_ECN 0x2
874
875union tcp_cc_info;
876
877struct ack_sample {
878 u32 pkts_acked;
879 s32 rtt_us;
880 u32 in_flight;
881};
882
883
884
885
886
887
888
889
890
891struct rate_sample {
892 struct skb_mstamp prior_mstamp;
893 u32 prior_delivered;
894 s32 delivered;
895 long interval_us;
896 long rtt_us;
897 int losses;
898 u32 acked_sacked;
899 u32 prior_in_flight;
900 bool is_app_limited;
901 bool is_retrans;
902};
903
904struct tcp_congestion_ops {
905 struct list_head list;
906 u32 key;
907 u32 flags;
908
909
910 void (*init)(struct sock *sk);
911
912 void (*release)(struct sock *sk);
913
914
915 u32 (*ssthresh)(struct sock *sk);
916
917 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
918
919 void (*set_state)(struct sock *sk, u8 new_state);
920
921 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
922
923 void (*in_ack_event)(struct sock *sk, u32 flags);
924
925 u32 (*undo_cwnd)(struct sock *sk);
926
927 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
928
929 u32 (*tso_segs_goal)(struct sock *sk);
930
931 u32 (*sndbuf_expand)(struct sock *sk);
932
933
934
935 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
936
937 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
938 union tcp_cc_info *info);
939
940 char name[TCP_CA_NAME_MAX];
941 struct module *owner;
942};
943
944int tcp_register_congestion_control(struct tcp_congestion_ops *type);
945void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
946
947void tcp_assign_congestion_control(struct sock *sk);
948void tcp_init_congestion_control(struct sock *sk);
949void tcp_cleanup_congestion_control(struct sock *sk);
950int tcp_set_default_congestion_control(const char *name);
951void tcp_get_default_congestion_control(char *name);
952void tcp_get_available_congestion_control(char *buf, size_t len);
953void tcp_get_allowed_congestion_control(char *buf, size_t len);
954int tcp_set_allowed_congestion_control(char *allowed);
955int tcp_set_congestion_control(struct sock *sk, const char *name);
956u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
957void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
958
959u32 tcp_reno_ssthresh(struct sock *sk);
960u32 tcp_reno_undo_cwnd(struct sock *sk);
961void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
962extern struct tcp_congestion_ops tcp_reno;
963
964struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
965u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
966#ifdef CONFIG_INET
967char *tcp_ca_get_name_by_key(u32 key, char *buffer);
968#else
969static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
970{
971 return NULL;
972}
973#endif
974
975static inline bool tcp_ca_needs_ecn(const struct sock *sk)
976{
977 const struct inet_connection_sock *icsk = inet_csk(sk);
978
979 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
980}
981
982static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
983{
984 struct inet_connection_sock *icsk = inet_csk(sk);
985
986 if (icsk->icsk_ca_ops->set_state)
987 icsk->icsk_ca_ops->set_state(sk, ca_state);
988 icsk->icsk_ca_state = ca_state;
989}
990
991static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
992{
993 const struct inet_connection_sock *icsk = inet_csk(sk);
994
995 if (icsk->icsk_ca_ops->cwnd_event)
996 icsk->icsk_ca_ops->cwnd_event(sk, event);
997}
998
999
1000void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1001void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1002 struct rate_sample *rs);
1003void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1004 struct skb_mstamp *now, struct rate_sample *rs);
1005void tcp_rate_check_app_limited(struct sock *sk);
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static inline int tcp_is_sack(const struct tcp_sock *tp)
1016{
1017 return tp->rx_opt.sack_ok;
1018}
1019
1020static inline bool tcp_is_reno(const struct tcp_sock *tp)
1021{
1022 return !tcp_is_sack(tp);
1023}
1024
1025static inline bool tcp_is_fack(const struct tcp_sock *tp)
1026{
1027 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
1028}
1029
1030static inline void tcp_enable_fack(struct tcp_sock *tp)
1031{
1032 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
1033}
1034
1035
1036
1037
1038static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
1039{
1040 struct net *net = sock_net((struct sock *)tp);
1041
1042 tp->do_early_retrans = sysctl_tcp_early_retrans &&
1043 sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
1044 net->ipv4.sysctl_tcp_reordering == 3;
1045}
1046
1047static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
1048{
1049 tp->do_early_retrans = 0;
1050}
1051
1052static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1053{
1054 return tp->sacked_out + tp->lost_out;
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1072{
1073 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1074}
1075
1076#define TCP_INFINITE_SSTHRESH 0x7fffffff
1077
1078static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1079{
1080 return tp->snd_cwnd < tp->snd_ssthresh;
1081}
1082
1083static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1084{
1085 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1086}
1087
1088static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1089{
1090 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1091 (1 << inet_csk(sk)->icsk_ca_state);
1092}
1093
1094
1095
1096
1097
1098static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1099{
1100 const struct tcp_sock *tp = tcp_sk(sk);
1101
1102 if (tcp_in_cwnd_reduction(sk))
1103 return tp->snd_ssthresh;
1104 else
1105 return max(tp->snd_ssthresh,
1106 ((tp->snd_cwnd >> 1) +
1107 (tp->snd_cwnd >> 2)));
1108}
1109
1110
1111#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1112
1113void tcp_enter_cwr(struct sock *sk);
1114__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1115
1116
1117
1118
1119static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1120{
1121 return 3;
1122}
1123
1124
1125static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1126{
1127 return tp->snd_una + tp->snd_wnd;
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1144{
1145 const struct tcp_sock *tp = tcp_sk(sk);
1146
1147
1148 if (tcp_in_slow_start(tp))
1149 return tp->snd_cwnd < 2 * tp->max_packets_out;
1150
1151 return tp->is_cwnd_limited;
1152}
1153
1154
1155
1156
1157
1158
1159
1160static inline unsigned long tcp_probe0_base(const struct sock *sk)
1161{
1162 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1163}
1164
1165
1166static inline unsigned long tcp_probe0_when(const struct sock *sk,
1167 unsigned long max_when)
1168{
1169 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1170
1171 return (unsigned long)min_t(u64, when, max_when);
1172}
1173
1174static inline void tcp_check_probe_timer(struct sock *sk)
1175{
1176 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1177 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1178 tcp_probe0_base(sk), TCP_RTO_MAX);
1179}
1180
1181static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1182{
1183 tp->snd_wl1 = seq;
1184}
1185
1186static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1187{
1188 tp->snd_wl1 = seq;
1189}
1190
1191
1192
1193
1194static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1195 __be32 daddr, __wsum base)
1196{
1197 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1198}
1199
1200static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1201{
1202 return __skb_checksum_complete(skb);
1203}
1204
1205static inline bool tcp_checksum_complete(struct sk_buff *skb)
1206{
1207 return !skb_csum_unnecessary(skb) &&
1208 __tcp_checksum_complete(skb);
1209}
1210
1211
1212
1213static inline void tcp_prequeue_init(struct tcp_sock *tp)
1214{
1215 tp->ucopy.task = NULL;
1216 tp->ucopy.len = 0;
1217 tp->ucopy.memory = 0;
1218 skb_queue_head_init(&tp->ucopy.prequeue);
1219}
1220
1221bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1222bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1223int tcp_filter(struct sock *sk, struct sk_buff *skb);
1224
1225#undef STATE_TRACE
1226
1227#ifdef STATE_TRACE
1228static const char *statename[]={
1229 "Unused","Established","Syn Sent","Syn Recv",
1230 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1231 "Close Wait","Last ACK","Listen","Closing"
1232};
1233#endif
1234void tcp_set_state(struct sock *sk, int state);
1235
1236void tcp_done(struct sock *sk);
1237
1238int tcp_abort(struct sock *sk, int err);
1239
1240static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1241{
1242 rx_opt->dsack = 0;
1243 rx_opt->num_sacks = 0;
1244}
1245
1246u32 tcp_default_init_rwnd(u32 mss);
1247void tcp_cwnd_restart(struct sock *sk, s32 delta);
1248
1249static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1250{
1251 struct tcp_sock *tp = tcp_sk(sk);
1252 s32 delta;
1253
1254 if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
1255 return;
1256 delta = tcp_time_stamp - tp->lsndtime;
1257 if (delta > inet_csk(sk)->icsk_rto)
1258 tcp_cwnd_restart(sk, delta);
1259}
1260
1261
1262void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1263 __u32 *window_clamp, int wscale_ok,
1264 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1265
1266static inline int tcp_win_from_space(int space)
1267{
1268 return sysctl_tcp_adv_win_scale<=0 ?
1269 (space>>(-sysctl_tcp_adv_win_scale)) :
1270 space - (space>>sysctl_tcp_adv_win_scale);
1271}
1272
1273
1274static inline int tcp_space(const struct sock *sk)
1275{
1276 return tcp_win_from_space(sk->sk_rcvbuf -
1277 atomic_read(&sk->sk_rmem_alloc));
1278}
1279
1280static inline int tcp_full_space(const struct sock *sk)
1281{
1282 return tcp_win_from_space(sk->sk_rcvbuf);
1283}
1284
1285extern void tcp_openreq_init_rwin(struct request_sock *req,
1286 const struct sock *sk_listener,
1287 const struct dst_entry *dst);
1288
1289void tcp_enter_memory_pressure(struct sock *sk);
1290
1291static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1292{
1293 struct net *net = sock_net((struct sock *)tp);
1294
1295 return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1296}
1297
1298static inline int keepalive_time_when(const struct tcp_sock *tp)
1299{
1300 struct net *net = sock_net((struct sock *)tp);
1301
1302 return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1303}
1304
1305static inline int keepalive_probes(const struct tcp_sock *tp)
1306{
1307 struct net *net = sock_net((struct sock *)tp);
1308
1309 return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1310}
1311
1312static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1313{
1314 const struct inet_connection_sock *icsk = &tp->inet_conn;
1315
1316 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1317 tcp_time_stamp - tp->rcv_tstamp);
1318}
1319
1320static inline int tcp_fin_time(const struct sock *sk)
1321{
1322 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1323 const int rto = inet_csk(sk)->icsk_rto;
1324
1325 if (fin_timeout < (rto << 2) - (rto >> 1))
1326 fin_timeout = (rto << 2) - (rto >> 1);
1327
1328 return fin_timeout;
1329}
1330
1331static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1332 int paws_win)
1333{
1334 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1335 return true;
1336 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1337 return true;
1338
1339
1340
1341
1342
1343 if (!rx_opt->ts_recent)
1344 return true;
1345 return false;
1346}
1347
1348static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1349 int rst)
1350{
1351 if (tcp_paws_check(rx_opt, 0))
1352 return false;
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1367 return false;
1368 return true;
1369}
1370
1371bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1372 int mib_idx, u32 *last_oow_ack_time);
1373
1374static inline void tcp_mib_init(struct net *net)
1375{
1376
1377 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1378 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1379 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1380 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1381}
1382
1383
1384static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1385{
1386 tp->lost_skb_hint = NULL;
1387}
1388
1389static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1390{
1391 tcp_clear_retrans_hints_partial(tp);
1392 tp->retransmit_skb_hint = NULL;
1393}
1394
1395union tcp_md5_addr {
1396 struct in_addr a4;
1397#if IS_ENABLED(CONFIG_IPV6)
1398 struct in6_addr a6;
1399#endif
1400};
1401
1402
1403struct tcp_md5sig_key {
1404 struct hlist_node node;
1405 u8 keylen;
1406 u8 family;
1407 union tcp_md5_addr addr;
1408 u8 key[TCP_MD5SIG_MAXKEYLEN];
1409 struct rcu_head rcu;
1410};
1411
1412
1413struct tcp_md5sig_info {
1414 struct hlist_head head;
1415 struct rcu_head rcu;
1416};
1417
1418
1419struct tcp4_pseudohdr {
1420 __be32 saddr;
1421 __be32 daddr;
1422 __u8 pad;
1423 __u8 protocol;
1424 __be16 len;
1425};
1426
1427struct tcp6_pseudohdr {
1428 struct in6_addr saddr;
1429 struct in6_addr daddr;
1430 __be32 len;
1431 __be32 protocol;
1432};
1433
1434union tcp_md5sum_block {
1435 struct tcp4_pseudohdr ip4;
1436#if IS_ENABLED(CONFIG_IPV6)
1437 struct tcp6_pseudohdr ip6;
1438#endif
1439};
1440
1441
1442struct tcp_md5sig_pool {
1443 struct ahash_request *md5_req;
1444 void *scratch;
1445};
1446
1447
1448int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1449 const struct sock *sk, const struct sk_buff *skb);
1450int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1451 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1452int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1453 int family);
1454struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1455 const struct sock *addr_sk);
1456
1457#ifdef CONFIG_TCP_MD5SIG
1458struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1459 const union tcp_md5_addr *addr,
1460 int family);
1461#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1462#else
1463static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1464 const union tcp_md5_addr *addr,
1465 int family)
1466{
1467 return NULL;
1468}
1469#define tcp_twsk_md5_key(twsk) NULL
1470#endif
1471
1472bool tcp_alloc_md5sig_pool(void);
1473
1474struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1475static inline void tcp_put_md5sig_pool(void)
1476{
1477 local_bh_enable();
1478}
1479
1480int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1481 unsigned int header_len);
1482int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1483 const struct tcp_md5sig_key *key);
1484
1485
1486void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1487 struct tcp_fastopen_cookie *cookie, int *syn_loss,
1488 unsigned long *last_syn_loss);
1489void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1490 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1491 u16 try_exp);
1492struct tcp_fastopen_request {
1493
1494 struct tcp_fastopen_cookie cookie;
1495 struct msghdr *data;
1496 size_t size;
1497 int copied;
1498};
1499void tcp_free_fastopen_req(struct tcp_sock *tp);
1500
1501extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1502int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1503void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1504struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1505 struct request_sock *req,
1506 struct tcp_fastopen_cookie *foc,
1507 struct dst_entry *dst);
1508void tcp_fastopen_init_key_once(bool publish);
1509#define TCP_FASTOPEN_KEY_LENGTH 16
1510
1511
1512struct tcp_fastopen_context {
1513 struct crypto_cipher *tfm;
1514 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
1515 struct rcu_head rcu;
1516};
1517
1518
1519
1520
1521enum tcp_chrono {
1522 TCP_CHRONO_UNSPEC,
1523 TCP_CHRONO_BUSY,
1524 TCP_CHRONO_RWND_LIMITED,
1525 TCP_CHRONO_SNDBUF_LIMITED,
1526 __TCP_CHRONO_MAX,
1527};
1528
1529void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1530void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1531
1532
1533static inline void tcp_write_queue_purge(struct sock *sk)
1534{
1535 struct sk_buff *skb;
1536
1537 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1538 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1539 sk_wmem_free_skb(sk, skb);
1540 sk_mem_reclaim(sk);
1541 tcp_clear_all_retrans_hints(tcp_sk(sk));
1542}
1543
1544static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1545{
1546 return skb_peek(&sk->sk_write_queue);
1547}
1548
1549static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1550{
1551 return skb_peek_tail(&sk->sk_write_queue);
1552}
1553
1554static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1555 const struct sk_buff *skb)
1556{
1557 return skb_queue_next(&sk->sk_write_queue, skb);
1558}
1559
1560static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1561 const struct sk_buff *skb)
1562{
1563 return skb_queue_prev(&sk->sk_write_queue, skb);
1564}
1565
1566#define tcp_for_write_queue(skb, sk) \
1567 skb_queue_walk(&(sk)->sk_write_queue, skb)
1568
1569#define tcp_for_write_queue_from(skb, sk) \
1570 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1571
1572#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1573 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1574
1575static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1576{
1577 return sk->sk_send_head;
1578}
1579
1580static inline bool tcp_skb_is_last(const struct sock *sk,
1581 const struct sk_buff *skb)
1582{
1583 return skb_queue_is_last(&sk->sk_write_queue, skb);
1584}
1585
1586static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1587{
1588 if (tcp_skb_is_last(sk, skb))
1589 sk->sk_send_head = NULL;
1590 else
1591 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1592}
1593
1594static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1595{
1596 if (sk->sk_send_head == skb_unlinked) {
1597 sk->sk_send_head = NULL;
1598 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1599 }
1600 if (tcp_sk(sk)->highest_sack == skb_unlinked)
1601 tcp_sk(sk)->highest_sack = NULL;
1602}
1603
1604static inline void tcp_init_send_head(struct sock *sk)
1605{
1606 sk->sk_send_head = NULL;
1607}
1608
1609static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1610{
1611 __skb_queue_tail(&sk->sk_write_queue, skb);
1612}
1613
1614static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1615{
1616 __tcp_add_write_queue_tail(sk, skb);
1617
1618
1619 if (sk->sk_send_head == NULL) {
1620 sk->sk_send_head = skb;
1621 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1622
1623 if (tcp_sk(sk)->highest_sack == NULL)
1624 tcp_sk(sk)->highest_sack = skb;
1625 }
1626}
1627
1628static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1629{
1630 __skb_queue_head(&sk->sk_write_queue, skb);
1631}
1632
1633
1634static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1635 struct sk_buff *buff,
1636 struct sock *sk)
1637{
1638 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1639}
1640
1641
1642static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1643 struct sk_buff *skb,
1644 struct sock *sk)
1645{
1646 __skb_queue_before(&sk->sk_write_queue, skb, new);
1647
1648 if (sk->sk_send_head == skb)
1649 sk->sk_send_head = new;
1650}
1651
1652static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1653{
1654 __skb_unlink(skb, &sk->sk_write_queue);
1655}
1656
1657static inline bool tcp_write_queue_empty(struct sock *sk)
1658{
1659 return skb_queue_empty(&sk->sk_write_queue);
1660}
1661
1662static inline void tcp_push_pending_frames(struct sock *sk)
1663{
1664 if (tcp_send_head(sk)) {
1665 struct tcp_sock *tp = tcp_sk(sk);
1666
1667 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1668 }
1669}
1670
1671
1672
1673
1674
1675static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1676{
1677 if (!tp->sacked_out)
1678 return tp->snd_una;
1679
1680 if (tp->highest_sack == NULL)
1681 return tp->snd_nxt;
1682
1683 return TCP_SKB_CB(tp->highest_sack)->seq;
1684}
1685
1686static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1687{
1688 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1689 tcp_write_queue_next(sk, skb);
1690}
1691
1692static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1693{
1694 return tcp_sk(sk)->highest_sack;
1695}
1696
1697static inline void tcp_highest_sack_reset(struct sock *sk)
1698{
1699 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1700}
1701
1702
1703static inline void tcp_highest_sack_combine(struct sock *sk,
1704 struct sk_buff *old,
1705 struct sk_buff *new)
1706{
1707 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1708 tcp_sk(sk)->highest_sack = new;
1709}
1710
1711
1712static inline bool inet_sk_transparent(const struct sock *sk)
1713{
1714 switch (sk->sk_state) {
1715 case TCP_TIME_WAIT:
1716 return inet_twsk(sk)->tw_transparent;
1717 case TCP_NEW_SYN_RECV:
1718 return inet_rsk(inet_reqsk(sk))->no_srccheck;
1719 }
1720 return inet_sk(sk)->transparent;
1721}
1722
1723
1724
1725
1726static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1727{
1728 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1729}
1730
1731
1732enum tcp_seq_states {
1733 TCP_SEQ_STATE_LISTENING,
1734 TCP_SEQ_STATE_ESTABLISHED,
1735};
1736
1737int tcp_seq_open(struct inode *inode, struct file *file);
1738
1739struct tcp_seq_afinfo {
1740 char *name;
1741 sa_family_t family;
1742 const struct file_operations *seq_fops;
1743 struct seq_operations seq_ops;
1744};
1745
1746struct tcp_iter_state {
1747 struct seq_net_private p;
1748 sa_family_t family;
1749 enum tcp_seq_states state;
1750 struct sock *syn_wait_sk;
1751 int bucket, offset, sbucket, num;
1752 loff_t last_pos;
1753};
1754
1755int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1756void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1757
1758extern struct request_sock_ops tcp_request_sock_ops;
1759extern struct request_sock_ops tcp6_request_sock_ops;
1760
1761void tcp_v4_destroy_sock(struct sock *sk);
1762
1763struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1764 netdev_features_t features);
1765struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1766int tcp_gro_complete(struct sk_buff *skb);
1767
1768void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1769
1770static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1771{
1772 struct net *net = sock_net((struct sock *)tp);
1773 return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1774}
1775
1776static inline bool tcp_stream_memory_free(const struct sock *sk)
1777{
1778 const struct tcp_sock *tp = tcp_sk(sk);
1779 u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1780
1781 return notsent_bytes < tcp_notsent_lowat(tp);
1782}
1783
1784#ifdef CONFIG_PROC_FS
1785int tcp4_proc_init(void);
1786void tcp4_proc_exit(void);
1787#endif
1788
1789int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1790int tcp_conn_request(struct request_sock_ops *rsk_ops,
1791 const struct tcp_request_sock_ops *af_ops,
1792 struct sock *sk, struct sk_buff *skb);
1793
1794
1795struct tcp_sock_af_ops {
1796#ifdef CONFIG_TCP_MD5SIG
1797 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
1798 const struct sock *addr_sk);
1799 int (*calc_md5_hash)(char *location,
1800 const struct tcp_md5sig_key *md5,
1801 const struct sock *sk,
1802 const struct sk_buff *skb);
1803 int (*md5_parse)(struct sock *sk,
1804 char __user *optval,
1805 int optlen);
1806#endif
1807};
1808
1809struct tcp_request_sock_ops {
1810 u16 mss_clamp;
1811#ifdef CONFIG_TCP_MD5SIG
1812 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1813 const struct sock *addr_sk);
1814 int (*calc_md5_hash) (char *location,
1815 const struct tcp_md5sig_key *md5,
1816 const struct sock *sk,
1817 const struct sk_buff *skb);
1818#endif
1819 void (*init_req)(struct request_sock *req,
1820 const struct sock *sk_listener,
1821 struct sk_buff *skb);
1822#ifdef CONFIG_SYN_COOKIES
1823 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
1824 __u16 *mss);
1825#endif
1826 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1827 const struct request_sock *req,
1828 bool *strict);
1829 __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
1830 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1831 struct flowi *fl, struct request_sock *req,
1832 struct tcp_fastopen_cookie *foc,
1833 enum tcp_synack_type synack_type);
1834};
1835
1836#ifdef CONFIG_SYN_COOKIES
1837static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1838 const struct sock *sk, struct sk_buff *skb,
1839 __u16 *mss)
1840{
1841 tcp_synq_overflow(sk);
1842 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
1843 return ops->cookie_init_seq(skb, mss);
1844}
1845#else
1846static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1847 const struct sock *sk, struct sk_buff *skb,
1848 __u16 *mss)
1849{
1850 return 0;
1851}
1852#endif
1853
1854int tcpv4_offload_init(void);
1855
1856void tcp_v4_init(void);
1857void tcp_init(void);
1858
1859
1860
1861
1862extern int sysctl_tcp_recovery;
1863
1864
1865#define TCP_RACK_LOST_RETRANS 0x1
1866
1867extern int tcp_rack_mark_lost(struct sock *sk);
1868
1869extern void tcp_rack_advance(struct tcp_sock *tp,
1870 const struct skb_mstamp *xmit_time, u8 sacked);
1871
1872
1873
1874
1875static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1876{
1877 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1878 struct ip_options_rcu *dopt = NULL;
1879
1880 if (opt->optlen) {
1881 int opt_size = sizeof(*dopt) + opt->optlen;
1882
1883 dopt = kmalloc(opt_size, GFP_ATOMIC);
1884 if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
1885 kfree(dopt);
1886 dopt = NULL;
1887 }
1888 }
1889 return dopt;
1890}
1891
1892
1893
1894
1895
1896
1897static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
1898{
1899 return skb->truesize == 2;
1900}
1901
1902static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
1903{
1904 skb->truesize = 2;
1905}
1906
1907static inline int tcp_inq(struct sock *sk)
1908{
1909 struct tcp_sock *tp = tcp_sk(sk);
1910 int answ;
1911
1912 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1913 answ = 0;
1914 } else if (sock_flag(sk, SOCK_URGINLINE) ||
1915 !tp->urg_data ||
1916 before(tp->urg_seq, tp->copied_seq) ||
1917 !before(tp->urg_seq, tp->rcv_nxt)) {
1918
1919 answ = tp->rcv_nxt - tp->copied_seq;
1920
1921
1922 if (answ && sock_flag(sk, SOCK_DONE))
1923 answ--;
1924 } else {
1925 answ = tp->urg_seq - tp->copied_seq;
1926 }
1927
1928 return answ;
1929}
1930
1931int tcp_peek_len(struct socket *sock);
1932
1933static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1934{
1935 u16 segs_in;
1936
1937 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1938 tp->segs_in += segs_in;
1939 if (skb->len > tcp_hdrlen(skb))
1940 tp->data_segs_in += segs_in;
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950static inline void tcp_listendrop(const struct sock *sk)
1951{
1952 atomic_inc(&((struct sock *)sk)->sk_drops);
1953 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
1954}
1955
1956#endif
1957