1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _INET_TIMEWAIT_SOCK_
16#define _INET_TIMEWAIT_SOCK_
17
18
19#include <linux/kmemcheck.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/timer.h>
23#include <linux/types.h>
24#include <linux/workqueue.h>
25
26#include <net/inet_sock.h>
27#include <net/sock.h>
28#include <net/tcp_states.h>
29#include <net/timewait_sock.h>
30
31#include <asm/atomic.h>
32
33struct inet_hashinfo;
34
35#define INET_TWDR_RECYCLE_SLOTS_LOG 5
36#define INET_TWDR_RECYCLE_SLOTS (1 << INET_TWDR_RECYCLE_SLOTS_LOG)
37
38
39
40
41
42#if HZ <= 16 || HZ > 4096
43# error Unsupported: HZ <= 16 or HZ > 4096
44#elif HZ <= 32
45# define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
46#elif HZ <= 64
47# define INET_TWDR_RECYCLE_TICK (6 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
48#elif HZ <= 128
49# define INET_TWDR_RECYCLE_TICK (7 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
50#elif HZ <= 256
51# define INET_TWDR_RECYCLE_TICK (8 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
52#elif HZ <= 512
53# define INET_TWDR_RECYCLE_TICK (9 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
54#elif HZ <= 1024
55# define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
56#elif HZ <= 2048
57# define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
58#else
59# define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
60#endif
61
62
63#define INET_TWDR_TWKILL_SLOTS 8
64
65#define INET_TWDR_TWKILL_QUOTA 100
66
67struct inet_timewait_death_row {
68
69 int twcal_hand;
70 unsigned long twcal_jiffie;
71 struct timer_list twcal_timer;
72 struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS];
73
74 spinlock_t death_lock;
75 int tw_count;
76 int period;
77 u32 thread_slots;
78 struct work_struct twkill_work;
79 struct timer_list tw_timer;
80 int slot;
81 struct hlist_head cells[INET_TWDR_TWKILL_SLOTS];
82 struct inet_hashinfo *hashinfo;
83 int sysctl_tw_recycle;
84 int sysctl_max_tw_buckets;
85};
86
87extern void inet_twdr_hangman(unsigned long data);
88extern void inet_twdr_twkill_work(struct work_struct *work);
89extern void inet_twdr_twcal_tick(unsigned long data);
90
91#if (BITS_PER_LONG == 64)
92#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 8
93#else
94#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 4
95#endif
96
97struct inet_bind_bucket;
98
99
100
101
102
103
104struct inet_timewait_sock {
105
106
107
108
109 struct sock_common __tw_common;
110#define tw_family __tw_common.skc_family
111#define tw_state __tw_common.skc_state
112#define tw_reuse __tw_common.skc_reuse
113#define tw_bound_dev_if __tw_common.skc_bound_dev_if
114#define tw_node __tw_common.skc_nulls_node
115#define tw_bind_node __tw_common.skc_bind_node
116#define tw_refcnt __tw_common.skc_refcnt
117#define tw_hash __tw_common.skc_hash
118#define tw_prot __tw_common.skc_prot
119#define tw_net __tw_common.skc_net
120 int tw_timeout;
121 volatile unsigned char tw_substate;
122
123 unsigned char tw_rcv_wscale;
124
125
126 __be16 tw_sport;
127 __be32 tw_daddr __attribute__((aligned(INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES)));
128 __be32 tw_rcv_saddr;
129 __be16 tw_dport;
130 __u16 tw_num;
131 kmemcheck_bitfield_begin(flags);
132
133 unsigned int tw_ipv6only : 1,
134 tw_transparent : 1,
135 tw_pad : 14,
136 tw_ipv6_offset : 16;
137 kmemcheck_bitfield_end(flags);
138 unsigned long tw_ttd;
139 struct inet_bind_bucket *tw_tb;
140 struct hlist_node tw_death_node;
141};
142
143static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
144 struct hlist_nulls_head *list)
145{
146 hlist_nulls_add_head_rcu(&tw->tw_node, list);
147}
148
149static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
150 struct hlist_head *list)
151{
152 hlist_add_head(&tw->tw_bind_node, list);
153}
154
155static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
156{
157 return !hlist_unhashed(&tw->tw_death_node);
158}
159
160static inline void inet_twsk_dead_node_init(struct inet_timewait_sock *tw)
161{
162 tw->tw_death_node.pprev = NULL;
163}
164
165static inline void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
166{
167 __hlist_del(&tw->tw_death_node);
168 inet_twsk_dead_node_init(tw);
169}
170
171static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
172{
173 if (inet_twsk_dead_hashed(tw)) {
174 __inet_twsk_del_dead_node(tw);
175 return 1;
176 }
177 return 0;
178}
179
180#define inet_twsk_for_each(tw, node, head) \
181 hlist_nulls_for_each_entry(tw, node, head, tw_node)
182
183#define inet_twsk_for_each_inmate(tw, node, jail) \
184 hlist_for_each_entry(tw, node, jail, tw_death_node)
185
186#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \
187 hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
188
189static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
190{
191 return (struct inet_timewait_sock *)sk;
192}
193
194static inline __be32 inet_rcv_saddr(const struct sock *sk)
195{
196 return likely(sk->sk_state != TCP_TIME_WAIT) ?
197 inet_sk(sk)->rcv_saddr : inet_twsk(sk)->tw_rcv_saddr;
198}
199
200extern void inet_twsk_put(struct inet_timewait_sock *tw);
201
202extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
203 const int state);
204
205extern void __inet_twsk_hashdance(struct inet_timewait_sock *tw,
206 struct sock *sk,
207 struct inet_hashinfo *hashinfo);
208
209extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
210 struct inet_timewait_death_row *twdr,
211 const int timeo, const int timewait_len);
212extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
213 struct inet_timewait_death_row *twdr);
214
215extern void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
216 struct inet_timewait_death_row *twdr, int family);
217
218static inline
219struct net *twsk_net(const struct inet_timewait_sock *twsk)
220{
221#ifdef CONFIG_NET_NS
222 return twsk->tw_net;
223#else
224 return &init_net;
225#endif
226}
227
228static inline
229void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
230{
231#ifdef CONFIG_NET_NS
232 twsk->tw_net = net;
233#endif
234}
235#endif
236