1
2
3
4
5
6
7
8
9
10
11#ifndef _INET_CONNECTION_SOCK_H
12#define _INET_CONNECTION_SOCK_H
13
14#include <linux/compiler.h>
15#include <linux/string.h>
16#include <linux/timer.h>
17#include <linux/poll.h>
18#include <linux/kernel.h>
19#include <linux/sockptr.h>
20
21#include <net/inet_sock.h>
22#include <net/request_sock.h>
23
24
25#undef INET_CSK_CLEAR_TIMERS
26
27struct inet_bind_bucket;
28struct tcp_congestion_ops;
29
30
31
32
33
34struct inet_connection_sock_af_ops {
35 int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
36 void (*send_check)(struct sock *sk, struct sk_buff *skb);
37 int (*rebuild_header)(struct sock *sk);
38 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
39 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
40 struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
41 struct request_sock *req,
42 struct dst_entry *dst,
43 struct request_sock *req_unhash,
44 bool *own_req);
45 u16 net_header_len;
46 u16 net_frag_header_len;
47 u16 sockaddr_len;
48 int (*setsockopt)(struct sock *sk, int level, int optname,
49 sockptr_t optval, unsigned int optlen);
50 int (*getsockopt)(struct sock *sk, int level, int optname,
51 char __user *optval, int __user *optlen);
52 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
53 void (*mtu_reduced)(struct sock *sk);
54};
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82struct inet_connection_sock {
83
84 struct inet_sock icsk_inet;
85 struct request_sock_queue icsk_accept_queue;
86 struct inet_bind_bucket *icsk_bind_hash;
87 unsigned long icsk_timeout;
88 struct timer_list icsk_retransmit_timer;
89 struct timer_list icsk_delack_timer;
90 __u32 icsk_rto;
91 __u32 icsk_rto_min;
92 __u32 icsk_delack_max;
93 __u32 icsk_pmtu_cookie;
94 const struct tcp_congestion_ops *icsk_ca_ops;
95 const struct inet_connection_sock_af_ops *icsk_af_ops;
96 const struct tcp_ulp_ops *icsk_ulp_ops;
97 void __rcu *icsk_ulp_data;
98 void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
99 struct hlist_node icsk_listen_portaddr_node;
100 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
101 __u8 icsk_ca_state:5,
102 icsk_ca_initialized:1,
103 icsk_ca_setsockopt:1,
104 icsk_ca_dst_locked:1;
105 __u8 icsk_retransmits;
106 __u8 icsk_pending;
107 __u8 icsk_backoff;
108 __u8 icsk_syn_retries;
109 __u8 icsk_probes_out;
110 __u16 icsk_ext_hdr_len;
111 struct {
112 __u8 pending;
113 __u8 quick;
114 __u8 pingpong;
115 __u8 retry;
116 __u32 ato;
117 unsigned long timeout;
118 __u32 lrcvtime;
119 __u16 last_seg_size;
120 __u16 rcv_mss;
121 } icsk_ack;
122 struct {
123
124 int search_high;
125 int search_low;
126
127
128 u32 probe_size:31,
129
130 enabled:1;
131
132 u32 probe_timestamp;
133 } icsk_mtup;
134 u32 icsk_probes_tstamp;
135 u32 icsk_user_timeout;
136
137 u64 icsk_ca_priv[104 / sizeof(u64)];
138#define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv)
139};
140
141#define ICSK_TIME_RETRANS 1
142#define ICSK_TIME_DACK 2
143#define ICSK_TIME_PROBE0 3
144#define ICSK_TIME_LOSS_PROBE 5
145#define ICSK_TIME_REO_TIMEOUT 6
146
147static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
148{
149 return (struct inet_connection_sock *)sk;
150}
151
152static inline void *inet_csk_ca(const struct sock *sk)
153{
154 return (void *)inet_csk(sk)->icsk_ca_priv;
155}
156
157struct sock *inet_csk_clone_lock(const struct sock *sk,
158 const struct request_sock *req,
159 const gfp_t priority);
160
161enum inet_csk_ack_state_t {
162 ICSK_ACK_SCHED = 1,
163 ICSK_ACK_TIMER = 2,
164 ICSK_ACK_PUSHED = 4,
165 ICSK_ACK_PUSHED2 = 8,
166 ICSK_ACK_NOW = 16
167};
168
169void inet_csk_init_xmit_timers(struct sock *sk,
170 void (*retransmit_handler)(struct timer_list *),
171 void (*delack_handler)(struct timer_list *),
172 void (*keepalive_handler)(struct timer_list *));
173void inet_csk_clear_xmit_timers(struct sock *sk);
174
175static inline void inet_csk_schedule_ack(struct sock *sk)
176{
177 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
178}
179
180static inline int inet_csk_ack_scheduled(const struct sock *sk)
181{
182 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
183}
184
185static inline void inet_csk_delack_init(struct sock *sk)
186{
187 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
188}
189
190void inet_csk_delete_keepalive_timer(struct sock *sk);
191void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
192
193static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
194{
195 struct inet_connection_sock *icsk = inet_csk(sk);
196
197 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
198 icsk->icsk_pending = 0;
199#ifdef INET_CSK_CLEAR_TIMERS
200 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
201#endif
202 } else if (what == ICSK_TIME_DACK) {
203 icsk->icsk_ack.pending = 0;
204 icsk->icsk_ack.retry = 0;
205#ifdef INET_CSK_CLEAR_TIMERS
206 sk_stop_timer(sk, &icsk->icsk_delack_timer);
207#endif
208 } else {
209 pr_debug("inet_csk BUG: unknown timer value\n");
210 }
211}
212
213
214
215
216static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
217 unsigned long when,
218 const unsigned long max_when)
219{
220 struct inet_connection_sock *icsk = inet_csk(sk);
221
222 if (when > max_when) {
223 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
224 sk, what, when, (void *)_THIS_IP_);
225 when = max_when;
226 }
227
228 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
229 what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
230 icsk->icsk_pending = what;
231 icsk->icsk_timeout = jiffies + when;
232 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
233 } else if (what == ICSK_TIME_DACK) {
234 icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
235 icsk->icsk_ack.timeout = jiffies + when;
236 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
237 } else {
238 pr_debug("inet_csk BUG: unknown timer value\n");
239 }
240}
241
242static inline unsigned long
243inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
244 unsigned long max_when)
245{
246 u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
247
248 return (unsigned long)min_t(u64, when, max_when);
249}
250
251struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
252
253int inet_csk_get_port(struct sock *sk, unsigned short snum);
254
255struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
256 const struct request_sock *req);
257struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
258 struct sock *newsk,
259 const struct request_sock *req);
260
261struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
262 struct request_sock *req,
263 struct sock *child);
264void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
265 unsigned long timeout);
266struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
267 struct request_sock *req,
268 bool own_req);
269
270static inline void inet_csk_reqsk_queue_added(struct sock *sk)
271{
272 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
273}
274
275static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
276{
277 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
278}
279
280static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
281{
282 return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
283}
284
285bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
286void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
287
288static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
289{
290
291 sock_set_flag(sk, SOCK_DEAD);
292 percpu_counter_inc(sk->sk_prot->orphan_count);
293}
294
295void inet_csk_destroy_sock(struct sock *sk);
296void inet_csk_prepare_forced_close(struct sock *sk);
297
298
299
300
301static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
302{
303 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
304 (EPOLLIN | EPOLLRDNORM) : 0;
305}
306
307int inet_csk_listen_start(struct sock *sk, int backlog);
308void inet_csk_listen_stop(struct sock *sk);
309
310void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
311
312
313void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
314 struct sock *sk);
315
316struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
317
318#define TCP_PINGPONG_THRESH 3
319
320static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
321{
322 inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
323}
324
325static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
326{
327 inet_csk(sk)->icsk_ack.pingpong = 0;
328}
329
330static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
331{
332 return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
333}
334
335static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
336{
337 struct inet_connection_sock *icsk = inet_csk(sk);
338
339 if (icsk->icsk_ack.pingpong < U8_MAX)
340 icsk->icsk_ack.pingpong++;
341}
342
343static inline bool inet_csk_has_ulp(struct sock *sk)
344{
345 return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
346}
347
348#endif
349