1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef _TCP_H
19#define _TCP_H
20
21#define TCP_DEBUG 1
22#define FASTRETRANS_DEBUG 1
23
24#include <linux/list.h>
25#include <linux/tcp.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
29#include <linux/skbuff.h>
30#include <linux/dmaengine.h>
31#include <linux/crypto.h>
32#include <linux/cryptohash.h>
33#include <linux/kref.h>
34
35#include <net/inet_connection_sock.h>
36#include <net/inet_timewait_sock.h>
37#include <net/inet_hashtables.h>
38#include <net/checksum.h>
39#include <net/request_sock.h>
40#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
43#include <net/tcp_states.h>
44#include <net/inet_ecn.h>
45#include <net/dst.h>
46
47#include <linux/seq_file.h>
48
49extern struct inet_hashinfo tcp_hashinfo;
50
51extern struct percpu_counter tcp_orphan_count;
52extern void tcp_time_wait(struct sock *sk, int state, int timeo);
53
54#define MAX_TCP_HEADER (128 + MAX_HEADER)
55#define MAX_TCP_OPTION_SPACE 40
56
57
58
59
60
61#define MAX_TCP_WINDOW 32767U
62
63
64#define TCP_DEFAULT_INIT_RCVWND 10
65
66
67#define TCP_MIN_MSS 88U
68
69
70#define TCP_BASE_MSS 512
71
72
73#define TCP_FASTRETRANS_THRESH 3
74
75
76#define TCP_MAX_REORDERING 127
77
78
79#define TCP_MAX_QUICKACKS 16U
80
81
82#define TCP_URG_VALID 0x0100
83#define TCP_URG_NOTYET 0x0200
84#define TCP_URG_READ 0x0400
85
86#define TCP_RETR1 3
87
88
89
90
91
92
93#define TCP_RETR2 15
94
95
96
97
98
99
100#define TCP_SYN_RETRIES 5
101
102
103#define TCP_SYNACK_RETRIES 5
104
105
106#define TCP_TIMEWAIT_LEN (60*HZ)
107
108#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
109
110
111
112
113
114
115#define TCP_DELACK_MAX ((unsigned)(HZ/5))
116#if HZ >= 100
117#define TCP_DELACK_MIN ((unsigned)(HZ/25))
118#define TCP_ATO_MIN ((unsigned)(HZ/25))
119#else
120#define TCP_DELACK_MIN 4U
121#define TCP_ATO_MIN 4U
122#endif
123#define TCP_RTO_MAX ((unsigned)(120*HZ))
124#define TCP_RTO_MIN ((unsigned)(HZ/5))
125#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ))
126
127#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U))
128
129
130
131#define TCP_KEEPALIVE_TIME (120*60*HZ)
132#define TCP_KEEPALIVE_PROBES 9
133#define TCP_KEEPALIVE_INTVL (75*HZ)
134
135#define MAX_TCP_KEEPIDLE 32767
136#define MAX_TCP_KEEPINTVL 32767
137#define MAX_TCP_KEEPCNT 127
138#define MAX_TCP_SYNCNT 127
139
140#define TCP_SYNQ_INTERVAL (HZ/5)
141
142#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
143#define TCP_PAWS_MSL 60
144
145
146
147
148
149#define TCP_PAWS_WINDOW 1
150
151
152
153
154
155
156
157#define TCPOPT_NOP 1
158#define TCPOPT_EOL 0
159#define TCPOPT_MSS 2
160#define TCPOPT_WINDOW 3
161#define TCPOPT_SACK_PERM 4
162#define TCPOPT_SACK 5
163#define TCPOPT_TIMESTAMP 8
164#define TCPOPT_MD5SIG 19
165#define TCPOPT_COOKIE 253
166
167
168
169
170
171#define TCPOLEN_MSS 4
172#define TCPOLEN_WINDOW 3
173#define TCPOLEN_SACK_PERM 2
174#define TCPOLEN_TIMESTAMP 10
175#define TCPOLEN_MD5SIG 18
176#define TCPOLEN_COOKIE_BASE 2
177#define TCPOLEN_COOKIE_PAIR 3
178#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
179#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
180
181
182#define TCPOLEN_TSTAMP_ALIGNED 12
183#define TCPOLEN_WSCALE_ALIGNED 4
184#define TCPOLEN_SACKPERM_ALIGNED 4
185#define TCPOLEN_SACK_BASE 2
186#define TCPOLEN_SACK_BASE_ALIGNED 4
187#define TCPOLEN_SACK_PERBLOCK 8
188#define TCPOLEN_MD5SIG_ALIGNED 20
189#define TCPOLEN_MSS_ALIGNED 4
190
191
192#define TCP_NAGLE_OFF 1
193#define TCP_NAGLE_CORK 2
194#define TCP_NAGLE_PUSH 4
195
196
197#define TCP_THIN_LINEAR_RETRIES 6
198
199
200#define TCP_INIT_CWND 10
201
202extern struct inet_timewait_death_row tcp_death_row;
203
204
205extern int sysctl_tcp_timestamps;
206extern int sysctl_tcp_window_scaling;
207extern int sysctl_tcp_sack;
208extern int sysctl_tcp_fin_timeout;
209extern int sysctl_tcp_keepalive_time;
210extern int sysctl_tcp_keepalive_probes;
211extern int sysctl_tcp_keepalive_intvl;
212extern int sysctl_tcp_syn_retries;
213extern int sysctl_tcp_synack_retries;
214extern int sysctl_tcp_retries1;
215extern int sysctl_tcp_retries2;
216extern int sysctl_tcp_orphan_retries;
217extern int sysctl_tcp_syncookies;
218extern int sysctl_tcp_retrans_collapse;
219extern int sysctl_tcp_stdurg;
220extern int sysctl_tcp_rfc1337;
221extern int sysctl_tcp_abort_on_overflow;
222extern int sysctl_tcp_max_orphans;
223extern int sysctl_tcp_fack;
224extern int sysctl_tcp_reordering;
225extern int sysctl_tcp_ecn;
226extern int sysctl_tcp_dsack;
227extern long sysctl_tcp_mem[3];
228extern int sysctl_tcp_wmem[3];
229extern int sysctl_tcp_rmem[3];
230extern int sysctl_tcp_app_win;
231extern int sysctl_tcp_adv_win_scale;
232extern int sysctl_tcp_tw_reuse;
233extern int sysctl_tcp_frto;
234extern int sysctl_tcp_frto_response;
235extern int sysctl_tcp_low_latency;
236extern int sysctl_tcp_dma_copybreak;
237extern int sysctl_tcp_nometrics_save;
238extern int sysctl_tcp_moderate_rcvbuf;
239extern int sysctl_tcp_tso_win_divisor;
240extern int sysctl_tcp_abc;
241extern int sysctl_tcp_mtu_probing;
242extern int sysctl_tcp_base_mss;
243extern int sysctl_tcp_workaround_signed_windows;
244extern int sysctl_tcp_slow_start_after_idle;
245extern int sysctl_tcp_max_ssthresh;
246extern int sysctl_tcp_cookie_size;
247extern int sysctl_tcp_thin_linear_timeouts;
248extern int sysctl_tcp_thin_dupack;
249
250extern atomic_long_t tcp_memory_allocated;
251extern struct percpu_counter tcp_sockets_allocated;
252extern int tcp_memory_pressure;
253
254
255
256
257
258
259static inline int before(__u32 seq1, __u32 seq2)
260{
261 return (__s32)(seq1-seq2) < 0;
262}
263#define after(seq2, seq1) before(seq1, seq2)
264
265
266static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
267{
268 return seq3 - seq2 >= seq1 - seq2;
269}
270
271static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
272{
273 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
274 int orphans = percpu_counter_read_positive(ocp);
275
276 if (orphans << shift > sysctl_tcp_max_orphans) {
277 orphans = percpu_counter_sum_positive(ocp);
278 if (orphans << shift > sysctl_tcp_max_orphans)
279 return true;
280 }
281
282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
283 atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
284 return true;
285 return false;
286}
287
288
289static inline void tcp_synq_overflow(struct sock *sk)
290{
291 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
292}
293
294
295static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
296{
297 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
298 return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
299}
300
301extern struct proto tcp_prot;
302
303#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
304#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
305#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
306#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
307#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
308
309extern void tcp_v4_err(struct sk_buff *skb, u32);
310
311extern void tcp_shutdown (struct sock *sk, int how);
312
313extern int tcp_v4_rcv(struct sk_buff *skb);
314
315extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
316extern void *tcp_v4_tw_get_peer(struct sock *sk);
317extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
318extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
319 size_t size);
320extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
321 size_t size, int flags);
322extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
323extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
324 struct tcphdr *th, unsigned len);
325extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
326 struct tcphdr *th, unsigned len);
327extern void tcp_rcv_space_adjust(struct sock *sk);
328extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
329extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
330extern void tcp_twsk_destructor(struct sock *sk);
331extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
332 struct pipe_inode_info *pipe, size_t len,
333 unsigned int flags);
334
335static inline void tcp_dec_quickack_mode(struct sock *sk,
336 const unsigned int pkts)
337{
338 struct inet_connection_sock *icsk = inet_csk(sk);
339
340 if (icsk->icsk_ack.quick) {
341 if (pkts >= icsk->icsk_ack.quick) {
342 icsk->icsk_ack.quick = 0;
343
344 icsk->icsk_ack.ato = TCP_ATO_MIN;
345 } else
346 icsk->icsk_ack.quick -= pkts;
347 }
348}
349
350#define TCP_ECN_OK 1
351#define TCP_ECN_QUEUE_CWR 2
352#define TCP_ECN_DEMAND_CWR 4
353
354static __inline__ void
355TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
356{
357 if (sysctl_tcp_ecn && th->ece && th->cwr)
358 inet_rsk(req)->ecn_ok = 1;
359}
360
361enum tcp_tw_status {
362 TCP_TW_SUCCESS = 0,
363 TCP_TW_RST = 1,
364 TCP_TW_ACK = 2,
365 TCP_TW_SYN = 3
366};
367
368
369extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
370 struct sk_buff *skb,
371 const struct tcphdr *th);
372extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
373 struct request_sock *req,
374 struct request_sock **prev);
375extern int tcp_child_process(struct sock *parent, struct sock *child,
376 struct sk_buff *skb);
377extern int tcp_use_frto(struct sock *sk);
378extern void tcp_enter_frto(struct sock *sk);
379extern void tcp_enter_loss(struct sock *sk, int how);
380extern void tcp_clear_retrans(struct tcp_sock *tp);
381extern void tcp_update_metrics(struct sock *sk);
382extern void tcp_close(struct sock *sk, long timeout);
383extern unsigned int tcp_poll(struct file * file, struct socket *sock,
384 struct poll_table_struct *wait);
385extern int tcp_getsockopt(struct sock *sk, int level, int optname,
386 char __user *optval, int __user *optlen);
387extern int tcp_setsockopt(struct sock *sk, int level, int optname,
388 char __user *optval, unsigned int optlen);
389extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
390 char __user *optval, int __user *optlen);
391extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
392 char __user *optval, unsigned int optlen);
393extern void tcp_set_keepalive(struct sock *sk, int val);
394extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
395extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
396 size_t len, int nonblock, int flags, int *addr_len);
397extern void tcp_parse_options(struct sk_buff *skb,
398 struct tcp_options_received *opt_rx, u8 **hvpp,
399 int estab);
400extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
401
402
403
404
405
406extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
407extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
408extern struct sock * tcp_create_openreq_child(struct sock *sk,
409 struct request_sock *req,
410 struct sk_buff *skb);
411extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
412 struct request_sock *req,
413 struct dst_entry *dst);
414extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
415extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
416 int addr_len);
417extern int tcp_connect(struct sock *sk);
418extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
419 struct request_sock *req,
420 struct request_values *rvp);
421extern int tcp_disconnect(struct sock *sk, int flags);
422
423
424
425extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
426extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
427 struct ip_options *opt);
428extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
429 __u16 *mss);
430
431extern __u32 cookie_init_timestamp(struct request_sock *req);
432extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
433
434
435extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
436extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
437 __u16 *mss);
438
439
440
441extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
442 int nonagle);
443extern int tcp_may_send_now(struct sock *sk);
444extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
445extern void tcp_retransmit_timer(struct sock *sk);
446extern void tcp_xmit_retransmit_queue(struct sock *);
447extern void tcp_simple_retransmit(struct sock *);
448extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
449extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
450
451extern void tcp_send_probe0(struct sock *);
452extern void tcp_send_partial(struct sock *);
453extern int tcp_write_wakeup(struct sock *);
454extern void tcp_send_fin(struct sock *sk);
455extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
456extern int tcp_send_synack(struct sock *);
457extern void tcp_push_one(struct sock *, unsigned int mss_now);
458extern void tcp_send_ack(struct sock *sk);
459extern void tcp_send_delayed_ack(struct sock *sk);
460
461
462extern void tcp_cwnd_application_limited(struct sock *sk);
463
464
465extern void tcp_init_xmit_timers(struct sock *);
466static inline void tcp_clear_xmit_timers(struct sock *sk)
467{
468 inet_csk_clear_xmit_timers(sk);
469}
470
471extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
472extern unsigned int tcp_current_mss(struct sock *sk);
473
474
475static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
476{
477 int cutoff;
478
479
480
481
482
483
484
485
486 if (tp->max_window >= 512)
487 cutoff = (tp->max_window >> 1);
488 else
489 cutoff = tp->max_window;
490
491 if (cutoff && pktsize > cutoff)
492 return max_t(int, cutoff, 68U - tp->tcp_header_len);
493 else
494 return pktsize;
495}
496
497
498extern void tcp_get_info(struct sock *, struct tcp_info *);
499
500
501typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
502 unsigned int, size_t);
503extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
504 sk_read_actor_t recv_actor);
505
506extern void tcp_initialize_rcv_mss(struct sock *sk);
507
508extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
509extern int tcp_mss_to_mtu(struct sock *sk, int mss);
510extern void tcp_mtup_init(struct sock *sk);
511
512static inline void tcp_bound_rto(const struct sock *sk)
513{
514 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
515 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
516}
517
518static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
519{
520 return (tp->srtt >> 3) + tp->rttvar;
521}
522
523static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
524{
525 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
526 ntohl(TCP_FLAG_ACK) |
527 snd_wnd);
528}
529
530static inline void tcp_fast_path_on(struct tcp_sock *tp)
531{
532 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
533}
534
535static inline void tcp_fast_path_check(struct sock *sk)
536{
537 struct tcp_sock *tp = tcp_sk(sk);
538
539 if (skb_queue_empty(&tp->out_of_order_queue) &&
540 tp->rcv_wnd &&
541 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
542 !tp->urg_data)
543 tcp_fast_path_on(tp);
544}
545
546
547static inline u32 tcp_rto_min(struct sock *sk)
548{
549 struct dst_entry *dst = __sk_dst_get(sk);
550 u32 rto_min = TCP_RTO_MIN;
551
552 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
553 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
554 return rto_min;
555}
556
557
558
559
560
561static inline u32 tcp_receive_window(const struct tcp_sock *tp)
562{
563 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
564
565 if (win < 0)
566 win = 0;
567 return (u32) win;
568}
569
570
571
572
573
574extern u32 __tcp_select_window(struct sock *sk);
575
576
577
578
579
580
581
582#define tcp_time_stamp ((__u32)(jiffies))
583
584#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
585
586#define TCPHDR_FIN 0x01
587#define TCPHDR_SYN 0x02
588#define TCPHDR_RST 0x04
589#define TCPHDR_PSH 0x08
590#define TCPHDR_ACK 0x10
591#define TCPHDR_URG 0x20
592#define TCPHDR_ECE 0x40
593#define TCPHDR_CWR 0x80
594
595
596
597
598
599
600
601struct tcp_skb_cb {
602 union {
603 struct inet_skb_parm h4;
604#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
605 struct inet6_skb_parm h6;
606#endif
607 } header;
608 __u32 seq;
609 __u32 end_seq;
610 __u32 when;
611 __u8 flags;
612 __u8 sacked;
613#define TCPCB_SACKED_ACKED 0x01
614#define TCPCB_SACKED_RETRANS 0x02
615#define TCPCB_LOST 0x04
616#define TCPCB_TAGBITS 0x07
617
618#define TCPCB_EVER_RETRANS 0x80
619#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
620
621 __u32 ack_seq;
622};
623
624#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
625
626
627
628
629static inline int tcp_skb_pcount(const struct sk_buff *skb)
630{
631 return skb_shinfo(skb)->gso_segs;
632}
633
634
635static inline int tcp_skb_mss(const struct sk_buff *skb)
636{
637 return skb_shinfo(skb)->gso_size;
638}
639
640
641enum tcp_ca_event {
642 CA_EVENT_TX_START,
643 CA_EVENT_CWND_RESTART,
644 CA_EVENT_COMPLETE_CWR,
645 CA_EVENT_FRTO,
646 CA_EVENT_LOSS,
647 CA_EVENT_FAST_ACK,
648 CA_EVENT_SLOW_ACK,
649};
650
651
652
653
654#define TCP_CA_NAME_MAX 16
655#define TCP_CA_MAX 128
656#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
657
658#define TCP_CONG_NON_RESTRICTED 0x1
659#define TCP_CONG_RTT_STAMP 0x2
660
661struct tcp_congestion_ops {
662 struct list_head list;
663 unsigned long flags;
664
665
666 void (*init)(struct sock *sk);
667
668 void (*release)(struct sock *sk);
669
670
671 u32 (*ssthresh)(struct sock *sk);
672
673 u32 (*min_cwnd)(const struct sock *sk);
674
675 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
676
677 void (*set_state)(struct sock *sk, u8 new_state);
678
679 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
680
681 u32 (*undo_cwnd)(struct sock *sk);
682
683 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
684
685 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
686
687 char name[TCP_CA_NAME_MAX];
688 struct module *owner;
689};
690
691extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
692extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
693
694extern void tcp_init_congestion_control(struct sock *sk);
695extern void tcp_cleanup_congestion_control(struct sock *sk);
696extern int tcp_set_default_congestion_control(const char *name);
697extern void tcp_get_default_congestion_control(char *name);
698extern void tcp_get_available_congestion_control(char *buf, size_t len);
699extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
700extern int tcp_set_allowed_congestion_control(char *allowed);
701extern int tcp_set_congestion_control(struct sock *sk, const char *name);
702extern void tcp_slow_start(struct tcp_sock *tp);
703extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
704
705extern struct tcp_congestion_ops tcp_init_congestion_ops;
706extern u32 tcp_reno_ssthresh(struct sock *sk);
707extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
708extern u32 tcp_reno_min_cwnd(const struct sock *sk);
709extern struct tcp_congestion_ops tcp_reno;
710
711static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
712{
713 struct inet_connection_sock *icsk = inet_csk(sk);
714
715 if (icsk->icsk_ca_ops->set_state)
716 icsk->icsk_ca_ops->set_state(sk, ca_state);
717 icsk->icsk_ca_state = ca_state;
718}
719
720static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
721{
722 const struct inet_connection_sock *icsk = inet_csk(sk);
723
724 if (icsk->icsk_ca_ops->cwnd_event)
725 icsk->icsk_ca_ops->cwnd_event(sk, event);
726}
727
728
729
730
731
732
733
734
735
736static inline int tcp_is_sack(const struct tcp_sock *tp)
737{
738 return tp->rx_opt.sack_ok;
739}
740
741static inline int tcp_is_reno(const struct tcp_sock *tp)
742{
743 return !tcp_is_sack(tp);
744}
745
746static inline int tcp_is_fack(const struct tcp_sock *tp)
747{
748 return tp->rx_opt.sack_ok & 2;
749}
750
751static inline void tcp_enable_fack(struct tcp_sock *tp)
752{
753 tp->rx_opt.sack_ok |= 2;
754}
755
756static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
757{
758 return tp->sacked_out + tp->lost_out;
759}
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
776{
777 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
778}
779
780#define TCP_INFINITE_SSTHRESH 0x7fffffff
781
782static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
783{
784 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
785}
786
787
788
789
790
791static inline __u32 tcp_current_ssthresh(const struct sock *sk)
792{
793 const struct tcp_sock *tp = tcp_sk(sk);
794 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
795 return tp->snd_ssthresh;
796 else
797 return max(tp->snd_ssthresh,
798 ((tp->snd_cwnd >> 1) +
799 (tp->snd_cwnd >> 2)));
800}
801
802
803#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
804
805extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
806extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
807
808
809
810
811
812
813
814static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
815{
816 return tp->reordering;
817}
818
819
820static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
821{
822 return tp->snd_una + tp->snd_wnd;
823}
824extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
825
826static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
827 const struct sk_buff *skb)
828{
829 if (skb->len < mss)
830 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
831}
832
833static inline void tcp_check_probe_timer(struct sock *sk)
834{
835 struct tcp_sock *tp = tcp_sk(sk);
836 const struct inet_connection_sock *icsk = inet_csk(sk);
837
838 if (!tp->packets_out && !icsk->icsk_pending)
839 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
840 icsk->icsk_rto, TCP_RTO_MAX);
841}
842
843static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
844{
845 tp->snd_wl1 = seq;
846}
847
848static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
849{
850 tp->snd_wl1 = seq;
851}
852
853
854
855
856static inline __sum16 tcp_v4_check(int len, __be32 saddr,
857 __be32 daddr, __wsum base)
858{
859 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
860}
861
862static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
863{
864 return __skb_checksum_complete(skb);
865}
866
867static inline int tcp_checksum_complete(struct sk_buff *skb)
868{
869 return !skb_csum_unnecessary(skb) &&
870 __tcp_checksum_complete(skb);
871}
872
873
874
875static inline void tcp_prequeue_init(struct tcp_sock *tp)
876{
877 tp->ucopy.task = NULL;
878 tp->ucopy.len = 0;
879 tp->ucopy.memory = 0;
880 skb_queue_head_init(&tp->ucopy.prequeue);
881#ifdef CONFIG_NET_DMA
882 tp->ucopy.dma_chan = NULL;
883 tp->ucopy.wakeup = 0;
884 tp->ucopy.pinned_list = NULL;
885 tp->ucopy.dma_cookie = 0;
886#endif
887}
888
889
890
891
892
893
894
895
896
897static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
898{
899 struct tcp_sock *tp = tcp_sk(sk);
900
901 if (sysctl_tcp_low_latency || !tp->ucopy.task)
902 return 0;
903
904 __skb_queue_tail(&tp->ucopy.prequeue, skb);
905 tp->ucopy.memory += skb->truesize;
906 if (tp->ucopy.memory > sk->sk_rcvbuf) {
907 struct sk_buff *skb1;
908
909 BUG_ON(sock_owned_by_user(sk));
910
911 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
912 sk_backlog_rcv(sk, skb1);
913 NET_INC_STATS_BH(sock_net(sk),
914 LINUX_MIB_TCPPREQUEUEDROPPED);
915 }
916
917 tp->ucopy.memory = 0;
918 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
919 wake_up_interruptible_sync_poll(sk_sleep(sk),
920 POLLIN | POLLRDNORM | POLLRDBAND);
921 if (!inet_csk_ack_scheduled(sk))
922 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
923 (3 * tcp_rto_min(sk)) / 4,
924 TCP_RTO_MAX);
925 }
926 return 1;
927}
928
929
930#undef STATE_TRACE
931
932#ifdef STATE_TRACE
933static const char *statename[]={
934 "Unused","Established","Syn Sent","Syn Recv",
935 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
936 "Close Wait","Last ACK","Listen","Closing"
937};
938#endif
939extern void tcp_set_state(struct sock *sk, int state);
940
941extern void tcp_done(struct sock *sk);
942
943static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
944{
945 rx_opt->dsack = 0;
946 rx_opt->num_sacks = 0;
947}
948
949
950extern void tcp_select_initial_window(int __space, __u32 mss,
951 __u32 *rcv_wnd, __u32 *window_clamp,
952 int wscale_ok, __u8 *rcv_wscale,
953 __u32 init_rcv_wnd);
954
955static inline int tcp_win_from_space(int space)
956{
957 return sysctl_tcp_adv_win_scale<=0 ?
958 (space>>(-sysctl_tcp_adv_win_scale)) :
959 space - (space>>sysctl_tcp_adv_win_scale);
960}
961
962
963static inline int tcp_space(const struct sock *sk)
964{
965 return tcp_win_from_space(sk->sk_rcvbuf -
966 atomic_read(&sk->sk_rmem_alloc));
967}
968
969static inline int tcp_full_space(const struct sock *sk)
970{
971 return tcp_win_from_space(sk->sk_rcvbuf);
972}
973
974static inline void tcp_openreq_init(struct request_sock *req,
975 struct tcp_options_received *rx_opt,
976 struct sk_buff *skb)
977{
978 struct inet_request_sock *ireq = inet_rsk(req);
979
980 req->rcv_wnd = 0;
981 req->cookie_ts = 0;
982 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
983 req->mss = rx_opt->mss_clamp;
984 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
985 ireq->tstamp_ok = rx_opt->tstamp_ok;
986 ireq->sack_ok = rx_opt->sack_ok;
987 ireq->snd_wscale = rx_opt->snd_wscale;
988 ireq->wscale_ok = rx_opt->wscale_ok;
989 ireq->acked = 0;
990 ireq->ecn_ok = 0;
991 ireq->rmt_port = tcp_hdr(skb)->source;
992 ireq->loc_port = tcp_hdr(skb)->dest;
993}
994
995extern void tcp_enter_memory_pressure(struct sock *sk);
996
997static inline int keepalive_intvl_when(const struct tcp_sock *tp)
998{
999 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1000}
1001
1002static inline int keepalive_time_when(const struct tcp_sock *tp)
1003{
1004 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1005}
1006
1007static inline int keepalive_probes(const struct tcp_sock *tp)
1008{
1009 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1010}
1011
1012static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1013{
1014 const struct inet_connection_sock *icsk = &tp->inet_conn;
1015
1016 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1017 tcp_time_stamp - tp->rcv_tstamp);
1018}
1019
1020static inline int tcp_fin_time(const struct sock *sk)
1021{
1022 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1023 const int rto = inet_csk(sk)->icsk_rto;
1024
1025 if (fin_timeout < (rto << 2) - (rto >> 1))
1026 fin_timeout = (rto << 2) - (rto >> 1);
1027
1028 return fin_timeout;
1029}
1030
1031static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1032 int paws_win)
1033{
1034 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1035 return 1;
1036 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1037 return 1;
1038
1039
1040
1041
1042
1043 if (!rx_opt->ts_recent)
1044 return 1;
1045 return 0;
1046}
1047
1048static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1049 int rst)
1050{
1051 if (tcp_paws_check(rx_opt, 0))
1052 return 0;
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1067 return 0;
1068 return 1;
1069}
1070
1071static inline void tcp_mib_init(struct net *net)
1072{
1073
1074 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1075 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1076 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1077 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1078}
1079
1080
1081static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1082{
1083 tp->lost_skb_hint = NULL;
1084 tp->scoreboard_skb_hint = NULL;
1085}
1086
1087static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1088{
1089 tcp_clear_retrans_hints_partial(tp);
1090 tp->retransmit_skb_hint = NULL;
1091}
1092
1093
1094struct crypto_hash;
1095
1096
1097struct tcp_md5sig_key {
1098 u8 *key;
1099 u8 keylen;
1100};
1101
1102struct tcp4_md5sig_key {
1103 struct tcp_md5sig_key base;
1104 __be32 addr;
1105};
1106
1107struct tcp6_md5sig_key {
1108 struct tcp_md5sig_key base;
1109#if 0
1110 u32 scope_id;
1111#endif
1112 struct in6_addr addr;
1113};
1114
1115
1116struct tcp_md5sig_info {
1117 struct tcp4_md5sig_key *keys4;
1118#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1119 struct tcp6_md5sig_key *keys6;
1120 u32 entries6;
1121 u32 alloced6;
1122#endif
1123 u32 entries4;
1124 u32 alloced4;
1125};
1126
1127
1128struct tcp4_pseudohdr {
1129 __be32 saddr;
1130 __be32 daddr;
1131 __u8 pad;
1132 __u8 protocol;
1133 __be16 len;
1134};
1135
1136struct tcp6_pseudohdr {
1137 struct in6_addr saddr;
1138 struct in6_addr daddr;
1139 __be32 len;
1140 __be32 protocol;
1141};
1142
1143union tcp_md5sum_block {
1144 struct tcp4_pseudohdr ip4;
1145#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1146 struct tcp6_pseudohdr ip6;
1147#endif
1148};
1149
1150
1151struct tcp_md5sig_pool {
1152 struct hash_desc md5_desc;
1153 union tcp_md5sum_block md5_blk;
1154};
1155
1156
1157extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1158 struct sock *sk, struct request_sock *req,
1159 struct sk_buff *skb);
1160extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk,
1161 struct sock *addr_sk);
1162extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey,
1163 u8 newkeylen);
1164extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr);
1165
1166#ifdef CONFIG_TCP_MD5SIG
1167#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1168 &(struct tcp_md5sig_key) { \
1169 .key = (twsk)->tw_md5_key, \
1170 .keylen = (twsk)->tw_md5_keylen, \
1171 } : NULL)
1172#else
1173#define tcp_twsk_md5_key(twsk) NULL
1174#endif
1175
1176extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
1177extern void tcp_free_md5sig_pool(void);
1178
1179extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1180extern void tcp_put_md5sig_pool(void);
1181
1182extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1183extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1184 unsigned header_len);
1185extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1186 struct tcp_md5sig_key *key);
1187
1188
1189static inline void tcp_write_queue_purge(struct sock *sk)
1190{
1191 struct sk_buff *skb;
1192
1193 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1194 sk_wmem_free_skb(sk, skb);
1195 sk_mem_reclaim(sk);
1196 tcp_clear_all_retrans_hints(tcp_sk(sk));
1197}
1198
1199static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1200{
1201 return skb_peek(&sk->sk_write_queue);
1202}
1203
1204static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1205{
1206 return skb_peek_tail(&sk->sk_write_queue);
1207}
1208
1209static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1210{
1211 return skb_queue_next(&sk->sk_write_queue, skb);
1212}
1213
1214static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
1215{
1216 return skb_queue_prev(&sk->sk_write_queue, skb);
1217}
1218
1219#define tcp_for_write_queue(skb, sk) \
1220 skb_queue_walk(&(sk)->sk_write_queue, skb)
1221
1222#define tcp_for_write_queue_from(skb, sk) \
1223 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1224
1225#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1226 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1227
1228static inline struct sk_buff *tcp_send_head(struct sock *sk)
1229{
1230 return sk->sk_send_head;
1231}
1232
1233static inline bool tcp_skb_is_last(const struct sock *sk,
1234 const struct sk_buff *skb)
1235{
1236 return skb_queue_is_last(&sk->sk_write_queue, skb);
1237}
1238
1239static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1240{
1241 if (tcp_skb_is_last(sk, skb))
1242 sk->sk_send_head = NULL;
1243 else
1244 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1245}
1246
1247static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1248{
1249 if (sk->sk_send_head == skb_unlinked)
1250 sk->sk_send_head = NULL;
1251}
1252
1253static inline void tcp_init_send_head(struct sock *sk)
1254{
1255 sk->sk_send_head = NULL;
1256}
1257
1258static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1259{
1260 __skb_queue_tail(&sk->sk_write_queue, skb);
1261}
1262
1263static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1264{
1265 __tcp_add_write_queue_tail(sk, skb);
1266
1267
1268 if (sk->sk_send_head == NULL) {
1269 sk->sk_send_head = skb;
1270
1271 if (tcp_sk(sk)->highest_sack == NULL)
1272 tcp_sk(sk)->highest_sack = skb;
1273 }
1274}
1275
1276static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1277{
1278 __skb_queue_head(&sk->sk_write_queue, skb);
1279}
1280
1281
1282static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1283 struct sk_buff *buff,
1284 struct sock *sk)
1285{
1286 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1287}
1288
1289
1290static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1291 struct sk_buff *skb,
1292 struct sock *sk)
1293{
1294 __skb_queue_before(&sk->sk_write_queue, skb, new);
1295
1296 if (sk->sk_send_head == skb)
1297 sk->sk_send_head = new;
1298}
1299
1300static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1301{
1302 __skb_unlink(skb, &sk->sk_write_queue);
1303}
1304
1305static inline int tcp_write_queue_empty(struct sock *sk)
1306{
1307 return skb_queue_empty(&sk->sk_write_queue);
1308}
1309
1310static inline void tcp_push_pending_frames(struct sock *sk)
1311{
1312 if (tcp_send_head(sk)) {
1313 struct tcp_sock *tp = tcp_sk(sk);
1314
1315 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1316 }
1317}
1318
1319
1320
1321
1322static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1323{
1324 if (!tp->sacked_out)
1325 return tp->snd_una;
1326
1327 if (tp->highest_sack == NULL)
1328 return tp->snd_nxt;
1329
1330 return TCP_SKB_CB(tp->highest_sack)->seq;
1331}
1332
1333static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1334{
1335 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1336 tcp_write_queue_next(sk, skb);
1337}
1338
1339static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1340{
1341 return tcp_sk(sk)->highest_sack;
1342}
1343
1344static inline void tcp_highest_sack_reset(struct sock *sk)
1345{
1346 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1347}
1348
1349
1350static inline void tcp_highest_sack_combine(struct sock *sk,
1351 struct sk_buff *old,
1352 struct sk_buff *new)
1353{
1354 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1355 tcp_sk(sk)->highest_sack = new;
1356}
1357
1358
1359
1360
1361static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
1362{
1363 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1364}
1365
1366
1367enum tcp_seq_states {
1368 TCP_SEQ_STATE_LISTENING,
1369 TCP_SEQ_STATE_OPENREQ,
1370 TCP_SEQ_STATE_ESTABLISHED,
1371 TCP_SEQ_STATE_TIME_WAIT,
1372};
1373
1374struct tcp_seq_afinfo {
1375 char *name;
1376 sa_family_t family;
1377 struct file_operations seq_fops;
1378 struct seq_operations seq_ops;
1379};
1380
1381struct tcp_iter_state {
1382 struct seq_net_private p;
1383 sa_family_t family;
1384 enum tcp_seq_states state;
1385 struct sock *syn_wait_sk;
1386 int bucket, offset, sbucket, num, uid;
1387 loff_t last_pos;
1388};
1389
1390extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1391extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1392
1393extern struct request_sock_ops tcp_request_sock_ops;
1394extern struct request_sock_ops tcp6_request_sock_ops;
1395
1396extern void tcp_v4_destroy_sock(struct sock *sk);
1397
1398extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1399extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
1400extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1401 struct sk_buff *skb);
1402extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1403 struct sk_buff *skb);
1404extern int tcp_gro_complete(struct sk_buff *skb);
1405extern int tcp4_gro_complete(struct sk_buff *skb);
1406
1407#ifdef CONFIG_PROC_FS
1408extern int tcp4_proc_init(void);
1409extern void tcp4_proc_exit(void);
1410#endif
1411
1412
1413struct tcp_sock_af_ops {
1414#ifdef CONFIG_TCP_MD5SIG
1415 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1416 struct sock *addr_sk);
1417 int (*calc_md5_hash) (char *location,
1418 struct tcp_md5sig_key *md5,
1419 struct sock *sk,
1420 struct request_sock *req,
1421 struct sk_buff *skb);
1422 int (*md5_add) (struct sock *sk,
1423 struct sock *addr_sk,
1424 u8 *newkey,
1425 u8 len);
1426 int (*md5_parse) (struct sock *sk,
1427 char __user *optval,
1428 int optlen);
1429#endif
1430};
1431
1432struct tcp_request_sock_ops {
1433#ifdef CONFIG_TCP_MD5SIG
1434 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1435 struct request_sock *req);
1436 int (*calc_md5_hash) (char *location,
1437 struct tcp_md5sig_key *md5,
1438 struct sock *sk,
1439 struct request_sock *req,
1440 struct sk_buff *skb);
1441#endif
1442};
1443
1444
1445
1446#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1447#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1448#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1449
1450extern int tcp_cookie_generator(u32 *bakery);
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475struct tcp_cookie_values {
1476 struct kref kref;
1477 u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
1478 u8 cookie_pair_size;
1479 u8 cookie_desired;
1480 u16 s_data_desired:11,
1481 s_data_constant:1,
1482 s_data_in:1,
1483 s_data_out:1,
1484 s_data_unused:2;
1485 u8 s_data_payload[0];
1486};
1487
1488static inline void tcp_cookie_values_release(struct kref *kref)
1489{
1490 kfree(container_of(kref, struct tcp_cookie_values, kref));
1491}
1492
1493
1494
1495
1496
1497static inline int tcp_s_data_size(const struct tcp_sock *tp)
1498{
1499 return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1500 ? tp->cookie_values->s_data_desired
1501 : 0;
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516struct tcp_extend_values {
1517 struct request_values rv;
1518 u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
1519 u8 cookie_plus:6,
1520 cookie_out_never:1,
1521 cookie_in_always:1;
1522};
1523
1524static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1525{
1526 return (struct tcp_extend_values *)rvp;
1527}
1528
1529extern void tcp_v4_init(void);
1530extern void tcp_init(void);
1531
1532#endif
1533