1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef _TCP_H
19#define _TCP_H
20
21#define TCP_DEBUG 1
22#define FASTRETRANS_DEBUG 1
23
24#include <linux/list.h>
25#include <linux/tcp.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
29#include <linux/skbuff.h>
30#include <linux/dmaengine.h>
31#include <linux/crypto.h>
32#include <linux/cryptohash.h>
33
34#include <net/inet_connection_sock.h>
35#include <net/inet_timewait_sock.h>
36#include <net/inet_hashtables.h>
37#include <net/checksum.h>
38#include <net/request_sock.h>
39#include <net/sock.h>
40#include <net/snmp.h>
41#include <net/ip.h>
42#include <net/tcp_states.h>
43#include <net/inet_ecn.h>
44#include <net/dst.h>
45
46#include <linux/seq_file.h>
47
48extern struct inet_hashinfo tcp_hashinfo;
49
50extern struct percpu_counter tcp_orphan_count;
51extern void tcp_time_wait(struct sock *sk, int state, int timeo);
52
53#define MAX_TCP_HEADER (128 + MAX_HEADER)
54#define MAX_TCP_OPTION_SPACE 40
55
56
57
58
59
60#define MAX_TCP_WINDOW 32767U
61
62
63#define TCP_MIN_MSS 88U
64
65
66#define TCP_MIN_RCVMSS 536U
67
68
69#define TCP_BASE_MSS 512
70
71
72#define TCP_FASTRETRANS_THRESH 3
73
74
75#define TCP_MAX_REORDERING 127
76
77
78#define TCP_MAX_QUICKACKS 16U
79
80
81#define TCP_URG_VALID 0x0100
82#define TCP_URG_NOTYET 0x0200
83#define TCP_URG_READ 0x0400
84
85#define TCP_RETR1 3
86
87
88
89
90
91
92#define TCP_RETR2 15
93
94
95
96
97
98
99#define TCP_SYN_RETRIES 5
100
101
102#define TCP_SYNACK_RETRIES 5
103
104
105
106#define TCP_ORPHAN_RETRIES 7
107
108
109
110
111#define TCP_TIMEWAIT_LEN (60*HZ)
112
113#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
114
115
116
117
118
119
120#define TCP_DELACK_MAX ((unsigned)(HZ/5))
121#if HZ >= 100
122#define TCP_DELACK_MIN ((unsigned)(HZ/25))
123#define TCP_ATO_MIN ((unsigned)(HZ/25))
124#else
125#define TCP_DELACK_MIN 4U
126#define TCP_ATO_MIN 4U
127#endif
128#define TCP_RTO_MAX ((unsigned)(120*HZ))
129#define TCP_RTO_MIN ((unsigned)(HZ/5))
130#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ))
131
132#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U))
133
134
135
136#define TCP_KEEPALIVE_TIME (120*60*HZ)
137#define TCP_KEEPALIVE_PROBES 9
138#define TCP_KEEPALIVE_INTVL (75*HZ)
139
140#define MAX_TCP_KEEPIDLE 32767
141#define MAX_TCP_KEEPINTVL 32767
142#define MAX_TCP_KEEPCNT 127
143#define MAX_TCP_SYNCNT 127
144
145#define TCP_SYNQ_INTERVAL (HZ/5)
146
147#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
148#define TCP_PAWS_MSL 60
149
150
151
152
153
154#define TCP_PAWS_WINDOW 1
155
156
157
158
159
160
161
162#define TCPOPT_NOP 1
163#define TCPOPT_EOL 0
164#define TCPOPT_MSS 2
165#define TCPOPT_WINDOW 3
166#define TCPOPT_SACK_PERM 4
167#define TCPOPT_SACK 5
168#define TCPOPT_TIMESTAMP 8
169#define TCPOPT_MD5SIG 19
170
171
172
173
174
175#define TCPOLEN_MSS 4
176#define TCPOLEN_WINDOW 3
177#define TCPOLEN_SACK_PERM 2
178#define TCPOLEN_TIMESTAMP 10
179#define TCPOLEN_MD5SIG 18
180
181
182#define TCPOLEN_TSTAMP_ALIGNED 12
183#define TCPOLEN_WSCALE_ALIGNED 4
184#define TCPOLEN_SACKPERM_ALIGNED 4
185#define TCPOLEN_SACK_BASE 2
186#define TCPOLEN_SACK_BASE_ALIGNED 4
187#define TCPOLEN_SACK_PERBLOCK 8
188#define TCPOLEN_MD5SIG_ALIGNED 20
189#define TCPOLEN_MSS_ALIGNED 4
190
191
192#define TCP_NAGLE_OFF 1
193#define TCP_NAGLE_CORK 2
194#define TCP_NAGLE_PUSH 4
195
196extern struct inet_timewait_death_row tcp_death_row;
197
198
199extern int sysctl_tcp_timestamps;
200extern int sysctl_tcp_window_scaling;
201extern int sysctl_tcp_sack;
202extern int sysctl_tcp_fin_timeout;
203extern int sysctl_tcp_keepalive_time;
204extern int sysctl_tcp_keepalive_probes;
205extern int sysctl_tcp_keepalive_intvl;
206extern int sysctl_tcp_syn_retries;
207extern int sysctl_tcp_synack_retries;
208extern int sysctl_tcp_retries1;
209extern int sysctl_tcp_retries2;
210extern int sysctl_tcp_orphan_retries;
211extern int sysctl_tcp_syncookies;
212extern int sysctl_tcp_retrans_collapse;
213extern int sysctl_tcp_stdurg;
214extern int sysctl_tcp_rfc1337;
215extern int sysctl_tcp_abort_on_overflow;
216extern int sysctl_tcp_max_orphans;
217extern int sysctl_tcp_fack;
218extern int sysctl_tcp_reordering;
219extern int sysctl_tcp_ecn;
220extern int sysctl_tcp_dsack;
221extern int sysctl_tcp_mem[3];
222extern int sysctl_tcp_wmem[3];
223extern int sysctl_tcp_rmem[3];
224extern int sysctl_tcp_app_win;
225extern int sysctl_tcp_adv_win_scale;
226extern int sysctl_tcp_tw_reuse;
227extern int sysctl_tcp_frto;
228extern int sysctl_tcp_frto_response;
229extern int sysctl_tcp_low_latency;
230extern int sysctl_tcp_dma_copybreak;
231extern int sysctl_tcp_nometrics_save;
232extern int sysctl_tcp_moderate_rcvbuf;
233extern int sysctl_tcp_tso_win_divisor;
234extern int sysctl_tcp_abc;
235extern int sysctl_tcp_mtu_probing;
236extern int sysctl_tcp_base_mss;
237extern int sysctl_tcp_workaround_signed_windows;
238extern int sysctl_tcp_slow_start_after_idle;
239extern int sysctl_tcp_max_ssthresh;
240
241extern atomic_t tcp_memory_allocated;
242extern struct percpu_counter tcp_sockets_allocated;
243extern int tcp_memory_pressure;
244
245
246
247
248
249
250static inline int before(__u32 seq1, __u32 seq2)
251{
252 return (__s32)(seq1-seq2) < 0;
253}
254#define after(seq2, seq1) before(seq1, seq2)
255
256
257static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
258{
259 return seq3 - seq2 >= seq1 - seq2;
260}
261
262static inline int tcp_too_many_orphans(struct sock *sk, int num)
263{
264 return (num > sysctl_tcp_max_orphans) ||
265 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
266 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
267}
268
269
270static inline void tcp_synq_overflow(struct sock *sk)
271{
272 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
273}
274
275
276static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
277{
278 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
279 return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
280}
281
282extern struct proto tcp_prot;
283
284#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
285#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
286#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
287#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
288
289extern void tcp_v4_err(struct sk_buff *skb, u32);
290
291extern void tcp_shutdown (struct sock *sk, int how);
292
293extern int tcp_v4_rcv(struct sk_buff *skb);
294
295extern int tcp_v4_remember_stamp(struct sock *sk);
296
297extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
298
299extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock,
300 struct msghdr *msg, size_t size);
301extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
302
303extern int tcp_ioctl(struct sock *sk,
304 int cmd,
305 unsigned long arg);
306
307extern int tcp_rcv_state_process(struct sock *sk,
308 struct sk_buff *skb,
309 struct tcphdr *th,
310 unsigned len);
311
312extern int tcp_rcv_established(struct sock *sk,
313 struct sk_buff *skb,
314 struct tcphdr *th,
315 unsigned len);
316
317extern void tcp_rcv_space_adjust(struct sock *sk);
318
319extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
320
321extern int tcp_twsk_unique(struct sock *sk,
322 struct sock *sktw, void *twp);
323
324extern void tcp_twsk_destructor(struct sock *sk);
325
326extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
327 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
328
329static inline void tcp_dec_quickack_mode(struct sock *sk,
330 const unsigned int pkts)
331{
332 struct inet_connection_sock *icsk = inet_csk(sk);
333
334 if (icsk->icsk_ack.quick) {
335 if (pkts >= icsk->icsk_ack.quick) {
336 icsk->icsk_ack.quick = 0;
337
338 icsk->icsk_ack.ato = TCP_ATO_MIN;
339 } else
340 icsk->icsk_ack.quick -= pkts;
341 }
342}
343
344extern void tcp_enter_quickack_mode(struct sock *sk);
345
346static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
347{
348 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
349}
350
351#define TCP_ECN_OK 1
352#define TCP_ECN_QUEUE_CWR 2
353#define TCP_ECN_DEMAND_CWR 4
354
355static __inline__ void
356TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
357{
358 if (sysctl_tcp_ecn && th->ece && th->cwr)
359 inet_rsk(req)->ecn_ok = 1;
360}
361
362enum tcp_tw_status
363{
364 TCP_TW_SUCCESS = 0,
365 TCP_TW_RST = 1,
366 TCP_TW_ACK = 2,
367 TCP_TW_SYN = 3
368};
369
370
371extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
372 struct sk_buff *skb,
373 const struct tcphdr *th);
374
375extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
376 struct request_sock *req,
377 struct request_sock **prev);
378extern int tcp_child_process(struct sock *parent,
379 struct sock *child,
380 struct sk_buff *skb);
381extern int tcp_use_frto(struct sock *sk);
382extern void tcp_enter_frto(struct sock *sk);
383extern void tcp_enter_loss(struct sock *sk, int how);
384extern void tcp_clear_retrans(struct tcp_sock *tp);
385extern void tcp_update_metrics(struct sock *sk);
386
387extern void tcp_close(struct sock *sk,
388 long timeout);
389extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
390
391extern int tcp_getsockopt(struct sock *sk, int level,
392 int optname,
393 char __user *optval,
394 int __user *optlen);
395extern int tcp_setsockopt(struct sock *sk, int level,
396 int optname, char __user *optval,
397 unsigned int optlen);
398extern int compat_tcp_getsockopt(struct sock *sk,
399 int level, int optname,
400 char __user *optval, int __user *optlen);
401extern int compat_tcp_setsockopt(struct sock *sk,
402 int level, int optname,
403 char __user *optval, unsigned int optlen);
404extern void tcp_set_keepalive(struct sock *sk, int val);
405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
406 struct msghdr *msg,
407 size_t len, int nonblock,
408 int flags, int *addr_len);
409
410extern void tcp_parse_options(struct sk_buff *skb,
411 struct tcp_options_received *opt_rx,
412 int estab);
413
414extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
415
416
417
418
419
420extern void tcp_v4_send_check(struct sock *sk, int len,
421 struct sk_buff *skb);
422
423extern int tcp_v4_conn_request(struct sock *sk,
424 struct sk_buff *skb);
425
426extern struct sock * tcp_create_openreq_child(struct sock *sk,
427 struct request_sock *req,
428 struct sk_buff *skb);
429
430extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
431 struct sk_buff *skb,
432 struct request_sock *req,
433 struct dst_entry *dst);
434
435extern int tcp_v4_do_rcv(struct sock *sk,
436 struct sk_buff *skb);
437
438extern int tcp_v4_connect(struct sock *sk,
439 struct sockaddr *uaddr,
440 int addr_len);
441
442extern int tcp_connect(struct sock *sk);
443
444extern struct sk_buff * tcp_make_synack(struct sock *sk,
445 struct dst_entry *dst,
446 struct request_sock *req);
447
448extern int tcp_disconnect(struct sock *sk, int flags);
449
450
451
452extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
453extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
454 struct ip_options *opt);
455extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
456 __u16 *mss);
457
458extern __u32 cookie_init_timestamp(struct request_sock *req);
459extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
460
461
462extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
463extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
464 __u16 *mss);
465
466
467
468extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
469 int nonagle);
470extern int tcp_may_send_now(struct sock *sk);
471extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
472extern void tcp_retransmit_timer(struct sock *sk);
473extern void tcp_xmit_retransmit_queue(struct sock *);
474extern void tcp_simple_retransmit(struct sock *);
475extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
476extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
477
478extern void tcp_send_probe0(struct sock *);
479extern void tcp_send_partial(struct sock *);
480extern int tcp_write_wakeup(struct sock *);
481extern void tcp_send_fin(struct sock *sk);
482extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
483extern int tcp_send_synack(struct sock *);
484extern void tcp_push_one(struct sock *, unsigned int mss_now);
485extern void tcp_send_ack(struct sock *sk);
486extern void tcp_send_delayed_ack(struct sock *sk);
487
488
489extern void tcp_cwnd_application_limited(struct sock *sk);
490
491
492extern void tcp_init_xmit_timers(struct sock *);
493static inline void tcp_clear_xmit_timers(struct sock *sk)
494{
495 inet_csk_clear_xmit_timers(sk);
496}
497
498extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
499extern unsigned int tcp_current_mss(struct sock *sk);
500
501
502static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
503{
504 if (tp->max_window && pktsize > (tp->max_window >> 1))
505 return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
506 else
507 return pktsize;
508}
509
510
511extern void tcp_get_info(struct sock *, struct tcp_info *);
512
513
514typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
515 unsigned int, size_t);
516extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
517 sk_read_actor_t recv_actor);
518
519extern void tcp_initialize_rcv_mss(struct sock *sk);
520
521extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
522extern int tcp_mss_to_mtu(struct sock *sk, int mss);
523extern void tcp_mtup_init(struct sock *sk);
524
525static inline void tcp_bound_rto(const struct sock *sk)
526{
527 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
528 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
529}
530
531static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
532{
533 return (tp->srtt >> 3) + tp->rttvar;
534}
535
536static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
537{
538 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
539 ntohl(TCP_FLAG_ACK) |
540 snd_wnd);
541}
542
543static inline void tcp_fast_path_on(struct tcp_sock *tp)
544{
545 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
546}
547
548static inline void tcp_fast_path_check(struct sock *sk)
549{
550 struct tcp_sock *tp = tcp_sk(sk);
551
552 if (skb_queue_empty(&tp->out_of_order_queue) &&
553 tp->rcv_wnd &&
554 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
555 !tp->urg_data)
556 tcp_fast_path_on(tp);
557}
558
559
560static inline u32 tcp_rto_min(struct sock *sk)
561{
562 struct dst_entry *dst = __sk_dst_get(sk);
563 u32 rto_min = TCP_RTO_MIN;
564
565 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
566 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
567 return rto_min;
568}
569
570
571
572
573
574static inline u32 tcp_receive_window(const struct tcp_sock *tp)
575{
576 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
577
578 if (win < 0)
579 win = 0;
580 return (u32) win;
581}
582
583
584
585
586
587extern u32 __tcp_select_window(struct sock *sk);
588
589
590
591
592
593
594
595#define tcp_time_stamp ((__u32)(jiffies))
596
597
598
599
600
601
602
603
604struct tcp_skb_cb {
605 union {
606 struct inet_skb_parm h4;
607#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
608 struct inet6_skb_parm h6;
609#endif
610 } header;
611 __u32 seq;
612 __u32 end_seq;
613 __u32 when;
614 __u8 flags;
615
616
617
618
619#define TCPCB_FLAG_FIN 0x01
620#define TCPCB_FLAG_SYN 0x02
621#define TCPCB_FLAG_RST 0x04
622#define TCPCB_FLAG_PSH 0x08
623#define TCPCB_FLAG_ACK 0x10
624#define TCPCB_FLAG_URG 0x20
625#define TCPCB_FLAG_ECE 0x40
626#define TCPCB_FLAG_CWR 0x80
627
628 __u8 sacked;
629#define TCPCB_SACKED_ACKED 0x01
630#define TCPCB_SACKED_RETRANS 0x02
631#define TCPCB_LOST 0x04
632#define TCPCB_TAGBITS 0x07
633
634#define TCPCB_EVER_RETRANS 0x80
635#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
636
637 __u32 ack_seq;
638};
639
640#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
641
642
643
644
645static inline int tcp_skb_pcount(const struct sk_buff *skb)
646{
647 return skb_shinfo(skb)->gso_segs;
648}
649
650
651static inline int tcp_skb_mss(const struct sk_buff *skb)
652{
653 return skb_shinfo(skb)->gso_size;
654}
655
656
657enum tcp_ca_event {
658 CA_EVENT_TX_START,
659 CA_EVENT_CWND_RESTART,
660 CA_EVENT_COMPLETE_CWR,
661 CA_EVENT_FRTO,
662 CA_EVENT_LOSS,
663 CA_EVENT_FAST_ACK,
664 CA_EVENT_SLOW_ACK,
665};
666
667
668
669
670#define TCP_CA_NAME_MAX 16
671#define TCP_CA_MAX 128
672#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
673
674#define TCP_CONG_NON_RESTRICTED 0x1
675#define TCP_CONG_RTT_STAMP 0x2
676
677struct tcp_congestion_ops {
678 struct list_head list;
679 unsigned long flags;
680
681
682 void (*init)(struct sock *sk);
683
684 void (*release)(struct sock *sk);
685
686
687 u32 (*ssthresh)(struct sock *sk);
688
689 u32 (*min_cwnd)(const struct sock *sk);
690
691 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
692
693 void (*set_state)(struct sock *sk, u8 new_state);
694
695 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
696
697 u32 (*undo_cwnd)(struct sock *sk);
698
699 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
700
701 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
702
703 char name[TCP_CA_NAME_MAX];
704 struct module *owner;
705};
706
707extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
708extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
709
710extern void tcp_init_congestion_control(struct sock *sk);
711extern void tcp_cleanup_congestion_control(struct sock *sk);
712extern int tcp_set_default_congestion_control(const char *name);
713extern void tcp_get_default_congestion_control(char *name);
714extern void tcp_get_available_congestion_control(char *buf, size_t len);
715extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
716extern int tcp_set_allowed_congestion_control(char *allowed);
717extern int tcp_set_congestion_control(struct sock *sk, const char *name);
718extern void tcp_slow_start(struct tcp_sock *tp);
719extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
720
721extern struct tcp_congestion_ops tcp_init_congestion_ops;
722extern u32 tcp_reno_ssthresh(struct sock *sk);
723extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
724extern u32 tcp_reno_min_cwnd(const struct sock *sk);
725extern struct tcp_congestion_ops tcp_reno;
726
727static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
728{
729 struct inet_connection_sock *icsk = inet_csk(sk);
730
731 if (icsk->icsk_ca_ops->set_state)
732 icsk->icsk_ca_ops->set_state(sk, ca_state);
733 icsk->icsk_ca_state = ca_state;
734}
735
736static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
737{
738 const struct inet_connection_sock *icsk = inet_csk(sk);
739
740 if (icsk->icsk_ca_ops->cwnd_event)
741 icsk->icsk_ca_ops->cwnd_event(sk, event);
742}
743
744
745
746
747
748
749
750
751
752static inline int tcp_is_sack(const struct tcp_sock *tp)
753{
754 return tp->rx_opt.sack_ok;
755}
756
757static inline int tcp_is_reno(const struct tcp_sock *tp)
758{
759 return !tcp_is_sack(tp);
760}
761
762static inline int tcp_is_fack(const struct tcp_sock *tp)
763{
764 return tp->rx_opt.sack_ok & 2;
765}
766
767static inline void tcp_enable_fack(struct tcp_sock *tp)
768{
769 tp->rx_opt.sack_ok |= 2;
770}
771
772static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
773{
774 return tp->sacked_out + tp->lost_out;
775}
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
792{
793 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
794}
795
796#define TCP_INFINITE_SSTHRESH 0x7fffffff
797
798static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
799{
800 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
801}
802
803
804
805
806
807static inline __u32 tcp_current_ssthresh(const struct sock *sk)
808{
809 const struct tcp_sock *tp = tcp_sk(sk);
810 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
811 return tp->snd_ssthresh;
812 else
813 return max(tp->snd_ssthresh,
814 ((tp->snd_cwnd >> 1) +
815 (tp->snd_cwnd >> 2)));
816}
817
818
819#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
820
821extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
822extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
823
824
825
826
827
828
829
830static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
831{
832 return tp->reordering;
833}
834
835
836static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
837{
838 return tp->snd_una + tp->snd_wnd;
839}
840extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
841
842static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
843 const struct sk_buff *skb)
844{
845 if (skb->len < mss)
846 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
847}
848
849static inline void tcp_check_probe_timer(struct sock *sk)
850{
851 struct tcp_sock *tp = tcp_sk(sk);
852 const struct inet_connection_sock *icsk = inet_csk(sk);
853
854 if (!tp->packets_out && !icsk->icsk_pending)
855 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
856 icsk->icsk_rto, TCP_RTO_MAX);
857}
858
859static inline void tcp_push_pending_frames(struct sock *sk)
860{
861 struct tcp_sock *tp = tcp_sk(sk);
862
863 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
864}
865
866static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
867{
868 tp->snd_wl1 = seq;
869}
870
871static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
872{
873 tp->snd_wl1 = seq;
874}
875
876
877
878
879static inline __sum16 tcp_v4_check(int len, __be32 saddr,
880 __be32 daddr, __wsum base)
881{
882 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
883}
884
885static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
886{
887 return __skb_checksum_complete(skb);
888}
889
890static inline int tcp_checksum_complete(struct sk_buff *skb)
891{
892 return !skb_csum_unnecessary(skb) &&
893 __tcp_checksum_complete(skb);
894}
895
896
897
898static inline void tcp_prequeue_init(struct tcp_sock *tp)
899{
900 tp->ucopy.task = NULL;
901 tp->ucopy.len = 0;
902 tp->ucopy.memory = 0;
903 skb_queue_head_init(&tp->ucopy.prequeue);
904#ifdef CONFIG_NET_DMA
905 tp->ucopy.dma_chan = NULL;
906 tp->ucopy.wakeup = 0;
907 tp->ucopy.pinned_list = NULL;
908 tp->ucopy.dma_cookie = 0;
909#endif
910}
911
912
913
914
915
916
917
918
919
920static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
921{
922 struct tcp_sock *tp = tcp_sk(sk);
923
924 if (sysctl_tcp_low_latency || !tp->ucopy.task)
925 return 0;
926
927 __skb_queue_tail(&tp->ucopy.prequeue, skb);
928 tp->ucopy.memory += skb->truesize;
929 if (tp->ucopy.memory > sk->sk_rcvbuf) {
930 struct sk_buff *skb1;
931
932 BUG_ON(sock_owned_by_user(sk));
933
934 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
935 sk_backlog_rcv(sk, skb1);
936 NET_INC_STATS_BH(sock_net(sk),
937 LINUX_MIB_TCPPREQUEUEDROPPED);
938 }
939
940 tp->ucopy.memory = 0;
941 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
942 wake_up_interruptible_poll(sk->sk_sleep,
943 POLLIN | POLLRDNORM | POLLRDBAND);
944 if (!inet_csk_ack_scheduled(sk))
945 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
946 (3 * tcp_rto_min(sk)) / 4,
947 TCP_RTO_MAX);
948 }
949 return 1;
950}
951
952
953#undef STATE_TRACE
954
955#ifdef STATE_TRACE
956static const char *statename[]={
957 "Unused","Established","Syn Sent","Syn Recv",
958 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
959 "Close Wait","Last ACK","Listen","Closing"
960};
961#endif
962extern void tcp_set_state(struct sock *sk, int state);
963
964extern void tcp_done(struct sock *sk);
965
966static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
967{
968 rx_opt->dsack = 0;
969 rx_opt->num_sacks = 0;
970}
971
972
973extern void tcp_select_initial_window(int __space, __u32 mss,
974 __u32 *rcv_wnd, __u32 *window_clamp,
975 int wscale_ok, __u8 *rcv_wscale);
976
977static inline int tcp_win_from_space(int space)
978{
979 return sysctl_tcp_adv_win_scale<=0 ?
980 (space>>(-sysctl_tcp_adv_win_scale)) :
981 space - (space>>sysctl_tcp_adv_win_scale);
982}
983
984
985static inline int tcp_space(const struct sock *sk)
986{
987 return tcp_win_from_space(sk->sk_rcvbuf -
988 atomic_read(&sk->sk_rmem_alloc));
989}
990
991static inline int tcp_full_space(const struct sock *sk)
992{
993 return tcp_win_from_space(sk->sk_rcvbuf);
994}
995
996static inline void tcp_openreq_init(struct request_sock *req,
997 struct tcp_options_received *rx_opt,
998 struct sk_buff *skb)
999{
1000 struct inet_request_sock *ireq = inet_rsk(req);
1001
1002 req->rcv_wnd = 0;
1003 req->cookie_ts = 0;
1004 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1005 req->mss = rx_opt->mss_clamp;
1006 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1007 ireq->tstamp_ok = rx_opt->tstamp_ok;
1008 ireq->sack_ok = rx_opt->sack_ok;
1009 ireq->snd_wscale = rx_opt->snd_wscale;
1010 ireq->wscale_ok = rx_opt->wscale_ok;
1011 ireq->acked = 0;
1012 ireq->ecn_ok = 0;
1013 ireq->rmt_port = tcp_hdr(skb)->source;
1014 ireq->loc_port = tcp_hdr(skb)->dest;
1015}
1016
1017extern void tcp_enter_memory_pressure(struct sock *sk);
1018
1019static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1020{
1021 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1022}
1023
1024static inline int keepalive_time_when(const struct tcp_sock *tp)
1025{
1026 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1027}
1028
1029static inline int keepalive_probes(const struct tcp_sock *tp)
1030{
1031 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1032}
1033
1034static inline int tcp_fin_time(const struct sock *sk)
1035{
1036 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1037 const int rto = inet_csk(sk)->icsk_rto;
1038
1039 if (fin_timeout < (rto << 2) - (rto >> 1))
1040 fin_timeout = (rto << 2) - (rto >> 1);
1041
1042 return fin_timeout;
1043}
1044
1045static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1046 int paws_win)
1047{
1048 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1049 return 1;
1050 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1051 return 1;
1052
1053 return 0;
1054}
1055
1056static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1057 int rst)
1058{
1059 if (tcp_paws_check(rx_opt, 0))
1060 return 0;
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1075 return 0;
1076 return 1;
1077}
1078
1079#define TCP_CHECK_TIMER(sk) do { } while (0)
1080
1081static inline void tcp_mib_init(struct net *net)
1082{
1083
1084 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1085 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1086 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1087 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1088}
1089
1090
1091static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1092{
1093 tp->lost_skb_hint = NULL;
1094 tp->scoreboard_skb_hint = NULL;
1095}
1096
1097static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1098{
1099 tcp_clear_retrans_hints_partial(tp);
1100 tp->retransmit_skb_hint = NULL;
1101}
1102
1103
1104struct crypto_hash;
1105
1106
1107struct tcp_md5sig_key {
1108 u8 *key;
1109 u8 keylen;
1110};
1111
1112struct tcp4_md5sig_key {
1113 struct tcp_md5sig_key base;
1114 __be32 addr;
1115};
1116
1117struct tcp6_md5sig_key {
1118 struct tcp_md5sig_key base;
1119#if 0
1120 u32 scope_id;
1121#endif
1122 struct in6_addr addr;
1123};
1124
1125
1126struct tcp_md5sig_info {
1127 struct tcp4_md5sig_key *keys4;
1128#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1129 struct tcp6_md5sig_key *keys6;
1130 u32 entries6;
1131 u32 alloced6;
1132#endif
1133 u32 entries4;
1134 u32 alloced4;
1135};
1136
1137
1138struct tcp4_pseudohdr {
1139 __be32 saddr;
1140 __be32 daddr;
1141 __u8 pad;
1142 __u8 protocol;
1143 __be16 len;
1144};
1145
1146struct tcp6_pseudohdr {
1147 struct in6_addr saddr;
1148 struct in6_addr daddr;
1149 __be32 len;
1150 __be32 protocol;
1151};
1152
1153union tcp_md5sum_block {
1154 struct tcp4_pseudohdr ip4;
1155#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1156 struct tcp6_pseudohdr ip6;
1157#endif
1158};
1159
1160
1161struct tcp_md5sig_pool {
1162 struct hash_desc md5_desc;
1163 union tcp_md5sum_block md5_blk;
1164};
1165
1166#define TCP_MD5SIG_MAXKEYS (~(u32)0)
1167
1168
1169extern int tcp_v4_md5_hash_skb(char *md5_hash,
1170 struct tcp_md5sig_key *key,
1171 struct sock *sk,
1172 struct request_sock *req,
1173 struct sk_buff *skb);
1174
1175extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1176 struct sock *addr_sk);
1177
1178extern int tcp_v4_md5_do_add(struct sock *sk,
1179 __be32 addr,
1180 u8 *newkey,
1181 u8 newkeylen);
1182
1183extern int tcp_v4_md5_do_del(struct sock *sk,
1184 __be32 addr);
1185
1186#ifdef CONFIG_TCP_MD5SIG
1187#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1188 &(struct tcp_md5sig_key) { \
1189 .key = (twsk)->tw_md5_key, \
1190 .keylen = (twsk)->tw_md5_keylen, \
1191 } : NULL)
1192#else
1193#define tcp_twsk_md5_key(twsk) NULL
1194#endif
1195
1196extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *);
1197extern void tcp_free_md5sig_pool(void);
1198
1199extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
1200extern void __tcp_put_md5sig_pool(void);
1201extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1202extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1203 unsigned header_len);
1204extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1205 struct tcp_md5sig_key *key);
1206
1207static inline
1208struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
1209{
1210 int cpu = get_cpu();
1211 struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
1212 if (!ret)
1213 put_cpu();
1214 return ret;
1215}
1216
1217static inline void tcp_put_md5sig_pool(void)
1218{
1219 __tcp_put_md5sig_pool();
1220 put_cpu();
1221}
1222
1223
1224static inline void tcp_write_queue_purge(struct sock *sk)
1225{
1226 struct sk_buff *skb;
1227
1228 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1229 sk_wmem_free_skb(sk, skb);
1230 sk_mem_reclaim(sk);
1231}
1232
1233static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1234{
1235 return skb_peek(&sk->sk_write_queue);
1236}
1237
1238static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1239{
1240 return skb_peek_tail(&sk->sk_write_queue);
1241}
1242
1243static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1244{
1245 return skb_queue_next(&sk->sk_write_queue, skb);
1246}
1247
1248static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
1249{
1250 return skb_queue_prev(&sk->sk_write_queue, skb);
1251}
1252
1253#define tcp_for_write_queue(skb, sk) \
1254 skb_queue_walk(&(sk)->sk_write_queue, skb)
1255
1256#define tcp_for_write_queue_from(skb, sk) \
1257 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1258
1259#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1260 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1261
1262
1263
1264
1265
1266static inline bool retransmits_timed_out(const struct sock *sk,
1267 unsigned int boundary)
1268{
1269 unsigned int timeout, linear_backoff_thresh;
1270
1271 if (!inet_csk(sk)->icsk_retransmits)
1272 return false;
1273
1274 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
1275
1276 if (boundary <= linear_backoff_thresh)
1277 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
1278 else
1279 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
1280 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
1281
1282 return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout;
1283}
1284
1285static inline struct sk_buff *tcp_send_head(struct sock *sk)
1286{
1287 return sk->sk_send_head;
1288}
1289
1290static inline bool tcp_skb_is_last(const struct sock *sk,
1291 const struct sk_buff *skb)
1292{
1293 return skb_queue_is_last(&sk->sk_write_queue, skb);
1294}
1295
1296static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1297{
1298 if (tcp_skb_is_last(sk, skb))
1299 sk->sk_send_head = NULL;
1300 else
1301 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1302}
1303
1304static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1305{
1306 if (sk->sk_send_head == skb_unlinked)
1307 sk->sk_send_head = NULL;
1308}
1309
1310static inline void tcp_init_send_head(struct sock *sk)
1311{
1312 sk->sk_send_head = NULL;
1313}
1314
1315static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1316{
1317 __skb_queue_tail(&sk->sk_write_queue, skb);
1318}
1319
1320static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1321{
1322 __tcp_add_write_queue_tail(sk, skb);
1323
1324
1325 if (sk->sk_send_head == NULL) {
1326 sk->sk_send_head = skb;
1327
1328 if (tcp_sk(sk)->highest_sack == NULL)
1329 tcp_sk(sk)->highest_sack = skb;
1330 }
1331}
1332
1333static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1334{
1335 __skb_queue_head(&sk->sk_write_queue, skb);
1336}
1337
1338
1339static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1340 struct sk_buff *buff,
1341 struct sock *sk)
1342{
1343 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1344}
1345
1346
1347static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1348 struct sk_buff *skb,
1349 struct sock *sk)
1350{
1351 __skb_queue_before(&sk->sk_write_queue, skb, new);
1352
1353 if (sk->sk_send_head == skb)
1354 sk->sk_send_head = new;
1355}
1356
1357static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1358{
1359 __skb_unlink(skb, &sk->sk_write_queue);
1360}
1361
1362static inline int tcp_write_queue_empty(struct sock *sk)
1363{
1364 return skb_queue_empty(&sk->sk_write_queue);
1365}
1366
1367
1368
1369
1370static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1371{
1372 if (!tp->sacked_out)
1373 return tp->snd_una;
1374
1375 if (tp->highest_sack == NULL)
1376 return tp->snd_nxt;
1377
1378 return TCP_SKB_CB(tp->highest_sack)->seq;
1379}
1380
1381static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1382{
1383 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1384 tcp_write_queue_next(sk, skb);
1385}
1386
1387static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1388{
1389 return tcp_sk(sk)->highest_sack;
1390}
1391
1392static inline void tcp_highest_sack_reset(struct sock *sk)
1393{
1394 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1395}
1396
1397
1398static inline void tcp_highest_sack_combine(struct sock *sk,
1399 struct sk_buff *old,
1400 struct sk_buff *new)
1401{
1402 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1403 tcp_sk(sk)->highest_sack = new;
1404}
1405
1406
1407enum tcp_seq_states {
1408 TCP_SEQ_STATE_LISTENING,
1409 TCP_SEQ_STATE_OPENREQ,
1410 TCP_SEQ_STATE_ESTABLISHED,
1411 TCP_SEQ_STATE_TIME_WAIT,
1412};
1413
1414struct tcp_seq_afinfo {
1415 char *name;
1416 sa_family_t family;
1417 struct file_operations seq_fops;
1418 struct seq_operations seq_ops;
1419};
1420
1421struct tcp_iter_state {
1422 struct seq_net_private p;
1423 sa_family_t family;
1424 enum tcp_seq_states state;
1425 struct sock *syn_wait_sk;
1426 int bucket, sbucket, num, uid;
1427};
1428
1429extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1430extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1431
1432extern struct request_sock_ops tcp_request_sock_ops;
1433extern struct request_sock_ops tcp6_request_sock_ops;
1434
1435extern void tcp_v4_destroy_sock(struct sock *sk);
1436
1437extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1438extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
1439extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1440 struct sk_buff *skb);
1441extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1442 struct sk_buff *skb);
1443extern int tcp_gro_complete(struct sk_buff *skb);
1444extern int tcp4_gro_complete(struct sk_buff *skb);
1445
1446#ifdef CONFIG_PROC_FS
1447extern int tcp4_proc_init(void);
1448extern void tcp4_proc_exit(void);
1449#endif
1450
1451
1452struct tcp_sock_af_ops {
1453#ifdef CONFIG_TCP_MD5SIG
1454 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1455 struct sock *addr_sk);
1456 int (*calc_md5_hash) (char *location,
1457 struct tcp_md5sig_key *md5,
1458 struct sock *sk,
1459 struct request_sock *req,
1460 struct sk_buff *skb);
1461 int (*md5_add) (struct sock *sk,
1462 struct sock *addr_sk,
1463 u8 *newkey,
1464 u8 len);
1465 int (*md5_parse) (struct sock *sk,
1466 char __user *optval,
1467 int optlen);
1468#endif
1469};
1470
1471struct tcp_request_sock_ops {
1472#ifdef CONFIG_TCP_MD5SIG
1473 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1474 struct request_sock *req);
1475 int (*calc_md5_hash) (char *location,
1476 struct tcp_md5sig_key *md5,
1477 struct sock *sk,
1478 struct request_sock *req,
1479 struct sk_buff *skb);
1480#endif
1481};
1482
1483extern void tcp_v4_init(void);
1484extern void tcp_init(void);
1485
1486#endif
1487