1
2
3
4
5
6
7
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
13#include <linux/spinlock.h>
14#include <linux/random.h>
15#include <linux/timer.h>
16#include <linux/time.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/net.h>
20#include <linux/workqueue.h>
21#include <net/ip.h>
22#include <net/inetpeer.h>
23#include <net/secure_seq.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54static struct kmem_cache *peer_cachep __read_mostly;
55
56void inet_peer_base_init(struct inet_peer_base *bp)
57{
58 bp->rb_root = RB_ROOT;
59 seqlock_init(&bp->lock);
60 bp->total = 0;
61}
62EXPORT_SYMBOL_GPL(inet_peer_base_init);
63
64#define PEER_MAX_GC 32
65
66
67int inet_peer_threshold __read_mostly = 65536 + 128;
68
69int inet_peer_minttl __read_mostly = 120 * HZ;
70int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;
71
72
73void __init inet_initpeers(void)
74{
75 struct sysinfo si;
76
77
78 si_meminfo(&si);
79
80
81
82
83 if (si.totalram <= (32768*1024)/PAGE_SIZE)
84 inet_peer_threshold >>= 1;
85 if (si.totalram <= (16384*1024)/PAGE_SIZE)
86 inet_peer_threshold >>= 1;
87 if (si.totalram <= (8192*1024)/PAGE_SIZE)
88 inet_peer_threshold >>= 2;
89
90 peer_cachep = kmem_cache_create("inet_peer_cache",
91 sizeof(struct inet_peer),
92 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
93 NULL);
94}
95
96
97static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
98 struct inet_peer_base *base,
99 unsigned int seq,
100 struct inet_peer *gc_stack[],
101 unsigned int *gc_cnt,
102 struct rb_node **parent_p,
103 struct rb_node ***pp_p)
104{
105 struct rb_node **pp, *parent, *next;
106 struct inet_peer *p;
107
108 pp = &base->rb_root.rb_node;
109 parent = NULL;
110 while (1) {
111 int cmp;
112
113 next = rcu_dereference_raw(*pp);
114 if (!next)
115 break;
116 parent = next;
117 p = rb_entry(parent, struct inet_peer, rb_node);
118 cmp = inetpeer_addr_cmp(daddr, &p->daddr);
119 if (cmp == 0) {
120 if (!refcount_inc_not_zero(&p->refcnt))
121 break;
122 return p;
123 }
124 if (gc_stack) {
125 if (*gc_cnt < PEER_MAX_GC)
126 gc_stack[(*gc_cnt)++] = p;
127 } else if (unlikely(read_seqretry(&base->lock, seq))) {
128 break;
129 }
130 if (cmp == -1)
131 pp = &next->rb_left;
132 else
133 pp = &next->rb_right;
134 }
135 *parent_p = parent;
136 *pp_p = pp;
137 return NULL;
138}
139
140static void inetpeer_free_rcu(struct rcu_head *head)
141{
142 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
143}
144
145
146static void inet_peer_gc(struct inet_peer_base *base,
147 struct inet_peer *gc_stack[],
148 unsigned int gc_cnt)
149{
150 struct inet_peer *p;
151 __u32 delta, ttl;
152 int i;
153
154 if (base->total >= inet_peer_threshold)
155 ttl = 0;
156 else
157 ttl = inet_peer_maxttl
158 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
159 base->total / inet_peer_threshold * HZ;
160 for (i = 0; i < gc_cnt; i++) {
161 p = gc_stack[i];
162 delta = (__u32)jiffies - p->dtime;
163 if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
164 gc_stack[i] = NULL;
165 }
166 for (i = 0; i < gc_cnt; i++) {
167 p = gc_stack[i];
168 if (p) {
169 rb_erase(&p->rb_node, &base->rb_root);
170 base->total--;
171 call_rcu(&p->rcu, inetpeer_free_rcu);
172 }
173 }
174}
175
176struct inet_peer *inet_getpeer(struct inet_peer_base *base,
177 const struct inetpeer_addr *daddr,
178 int create)
179{
180 struct inet_peer *p, *gc_stack[PEER_MAX_GC];
181 struct rb_node **pp, *parent;
182 unsigned int gc_cnt, seq;
183 int invalidated;
184
185
186
187
188 rcu_read_lock();
189 seq = read_seqbegin(&base->lock);
190 p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
191 invalidated = read_seqretry(&base->lock, seq);
192 rcu_read_unlock();
193
194 if (p)
195 return p;
196
197
198 if (!create && !invalidated)
199 return NULL;
200
201
202
203
204 parent = NULL;
205 write_seqlock_bh(&base->lock);
206
207 gc_cnt = 0;
208 p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
209 if (!p && create) {
210 p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
211 if (p) {
212 p->daddr = *daddr;
213 refcount_set(&p->refcnt, 2);
214 atomic_set(&p->rid, 0);
215 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
216 p->rate_tokens = 0;
217
218
219
220 p->rate_last = jiffies - 60*HZ;
221
222 rb_link_node(&p->rb_node, parent, pp);
223 rb_insert_color(&p->rb_node, &base->rb_root);
224 base->total++;
225 }
226 }
227 if (gc_cnt)
228 inet_peer_gc(base, gc_stack, gc_cnt);
229 write_sequnlock_bh(&base->lock);
230
231 return p;
232}
233EXPORT_SYMBOL_GPL(inet_getpeer);
234
235void inet_putpeer(struct inet_peer *p)
236{
237 p->dtime = (__u32)jiffies;
238
239 if (refcount_dec_and_test(&p->refcnt))
240 call_rcu(&p->rcu, inetpeer_free_rcu);
241}
242EXPORT_SYMBOL_GPL(inet_putpeer);
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261#define XRLIM_BURST_FACTOR 6
262bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
263{
264 unsigned long now, token;
265 bool rc = false;
266
267 if (!peer)
268 return true;
269
270 token = peer->rate_tokens;
271 now = jiffies;
272 token += now - peer->rate_last;
273 peer->rate_last = now;
274 if (token > XRLIM_BURST_FACTOR * timeout)
275 token = XRLIM_BURST_FACTOR * timeout;
276 if (token >= timeout) {
277 token -= timeout;
278 rc = true;
279 }
280 peer->rate_tokens = token;
281 return rc;
282}
283EXPORT_SYMBOL(inet_peer_xrlim_allow);
284
285void inetpeer_invalidate_tree(struct inet_peer_base *base)
286{
287 struct rb_node *p = rb_first(&base->rb_root);
288
289 while (p) {
290 struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
291
292 p = rb_next(p);
293 rb_erase(&peer->rb_node, &base->rb_root);
294 inet_putpeer(peer);
295 cond_resched();
296 }
297
298 base->total = 0;
299}
300EXPORT_SYMBOL(inetpeer_invalidate_tree);
301