1
2
3
4
5
6
7
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
13#include <linux/spinlock.h>
14#include <linux/random.h>
15#include <linux/timer.h>
16#include <linux/time.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/net.h>
20#include <linux/workqueue.h>
21#include <net/ip.h>
22#include <net/inetpeer.h>
23#include <net/secure_seq.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static struct kmem_cache *peer_cachep __read_mostly;
69
70static LIST_HEAD(gc_list);
71static const int gc_delay = 60 * HZ;
72static struct delayed_work gc_work;
73static DEFINE_SPINLOCK(gc_lock);
74
75#define node_height(x) x->avl_height
76
77#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
78#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
79static const struct inet_peer peer_fake_node = {
80 .avl_left = peer_avl_empty_rcu,
81 .avl_right = peer_avl_empty_rcu,
82 .avl_height = 0
83};
84
85void inet_peer_base_init(struct inet_peer_base *bp)
86{
87 bp->root = peer_avl_empty_rcu;
88 seqlock_init(&bp->lock);
89 bp->flush_seq = ~0U;
90 bp->total = 0;
91}
92EXPORT_SYMBOL_GPL(inet_peer_base_init);
93
94static atomic_t v4_seq = ATOMIC_INIT(0);
95static atomic_t v6_seq = ATOMIC_INIT(0);
96
97static atomic_t *inetpeer_seq_ptr(int family)
98{
99 return (family == AF_INET ? &v4_seq : &v6_seq);
100}
101
102static inline void flush_check(struct inet_peer_base *base, int family)
103{
104 atomic_t *fp = inetpeer_seq_ptr(family);
105
106 if (unlikely(base->flush_seq != atomic_read(fp))) {
107 inetpeer_invalidate_tree(base);
108 base->flush_seq = atomic_read(fp);
109 }
110}
111
112void inetpeer_invalidate_family(int family)
113{
114 atomic_t *fp = inetpeer_seq_ptr(family);
115
116 atomic_inc(fp);
117}
118
119#define PEER_MAXDEPTH 40
120
121
122int inet_peer_threshold __read_mostly = 65536 + 128;
123
124int inet_peer_minttl __read_mostly = 120 * HZ;
125int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;
126
127static void inetpeer_gc_worker(struct work_struct *work)
128{
129 struct inet_peer *p, *n, *c;
130 LIST_HEAD(list);
131
132 spin_lock_bh(&gc_lock);
133 list_replace_init(&gc_list, &list);
134 spin_unlock_bh(&gc_lock);
135
136 if (list_empty(&list))
137 return;
138
139 list_for_each_entry_safe(p, n, &list, gc_list) {
140
141 if (need_resched())
142 cond_resched();
143
144 c = rcu_dereference_protected(p->avl_left, 1);
145 if (c != peer_avl_empty) {
146 list_add_tail(&c->gc_list, &list);
147 p->avl_left = peer_avl_empty_rcu;
148 }
149
150 c = rcu_dereference_protected(p->avl_right, 1);
151 if (c != peer_avl_empty) {
152 list_add_tail(&c->gc_list, &list);
153 p->avl_right = peer_avl_empty_rcu;
154 }
155
156 n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
157
158 if (!atomic_read(&p->refcnt)) {
159 list_del(&p->gc_list);
160 kmem_cache_free(peer_cachep, p);
161 }
162 }
163
164 if (list_empty(&list))
165 return;
166
167 spin_lock_bh(&gc_lock);
168 list_splice(&list, &gc_list);
169 spin_unlock_bh(&gc_lock);
170
171 schedule_delayed_work(&gc_work, gc_delay);
172}
173
174
175void __init inet_initpeers(void)
176{
177 struct sysinfo si;
178
179
180 si_meminfo(&si);
181
182
183
184
185 if (si.totalram <= (32768*1024)/PAGE_SIZE)
186 inet_peer_threshold >>= 1;
187 if (si.totalram <= (16384*1024)/PAGE_SIZE)
188 inet_peer_threshold >>= 1;
189 if (si.totalram <= (8192*1024)/PAGE_SIZE)
190 inet_peer_threshold >>= 2;
191
192 peer_cachep = kmem_cache_create("inet_peer_cache",
193 sizeof(struct inet_peer),
194 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
195 NULL);
196
197 INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
198}
199
200static int addr_compare(const struct inetpeer_addr *a,
201 const struct inetpeer_addr *b)
202{
203 int i, n = (a->family == AF_INET ? 1 : 4);
204
205 for (i = 0; i < n; i++) {
206 if (a->addr.a6[i] == b->addr.a6[i])
207 continue;
208 if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
209 return -1;
210 return 1;
211 }
212
213 return 0;
214}
215
216#define rcu_deref_locked(X, BASE) \
217 rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
218
219
220
221
222#define lookup(_daddr, _stack, _base) \
223({ \
224 struct inet_peer *u; \
225 struct inet_peer __rcu **v; \
226 \
227 stackptr = _stack; \
228 *stackptr++ = &_base->root; \
229 for (u = rcu_deref_locked(_base->root, _base); \
230 u != peer_avl_empty; ) { \
231 int cmp = addr_compare(_daddr, &u->daddr); \
232 if (cmp == 0) \
233 break; \
234 if (cmp == -1) \
235 v = &u->avl_left; \
236 else \
237 v = &u->avl_right; \
238 *stackptr++ = v; \
239 u = rcu_deref_locked(*v, _base); \
240 } \
241 u; \
242})
243
244
245
246
247
248
249
250
251static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
252 struct inet_peer_base *base)
253{
254 struct inet_peer *u = rcu_dereference(base->root);
255 int count = 0;
256
257 while (u != peer_avl_empty) {
258 int cmp = addr_compare(daddr, &u->daddr);
259 if (cmp == 0) {
260
261
262
263 if (!atomic_add_unless(&u->refcnt, 1, -1))
264 u = NULL;
265 return u;
266 }
267 if (cmp == -1)
268 u = rcu_dereference(u->avl_left);
269 else
270 u = rcu_dereference(u->avl_right);
271 if (unlikely(++count == PEER_MAXDEPTH))
272 break;
273 }
274 return NULL;
275}
276
277
278#define lookup_rightempty(start, base) \
279({ \
280 struct inet_peer *u; \
281 struct inet_peer __rcu **v; \
282 *stackptr++ = &start->avl_left; \
283 v = &start->avl_left; \
284 for (u = rcu_deref_locked(*v, base); \
285 u->avl_right != peer_avl_empty_rcu; ) { \
286 v = &u->avl_right; \
287 *stackptr++ = v; \
288 u = rcu_deref_locked(*v, base); \
289 } \
290 u; \
291})
292
293
294
295
296
297static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
298 struct inet_peer __rcu ***stackend,
299 struct inet_peer_base *base)
300{
301 struct inet_peer __rcu **nodep;
302 struct inet_peer *node, *l, *r;
303 int lh, rh;
304
305 while (stackend > stack) {
306 nodep = *--stackend;
307 node = rcu_deref_locked(*nodep, base);
308 l = rcu_deref_locked(node->avl_left, base);
309 r = rcu_deref_locked(node->avl_right, base);
310 lh = node_height(l);
311 rh = node_height(r);
312 if (lh > rh + 1) {
313 struct inet_peer *ll, *lr, *lrl, *lrr;
314 int lrh;
315 ll = rcu_deref_locked(l->avl_left, base);
316 lr = rcu_deref_locked(l->avl_right, base);
317 lrh = node_height(lr);
318 if (lrh <= node_height(ll)) {
319 RCU_INIT_POINTER(node->avl_left, lr);
320 RCU_INIT_POINTER(node->avl_right, r);
321 node->avl_height = lrh + 1;
322 RCU_INIT_POINTER(l->avl_left, ll);
323 RCU_INIT_POINTER(l->avl_right, node);
324 l->avl_height = node->avl_height + 1;
325 RCU_INIT_POINTER(*nodep, l);
326 } else {
327 lrl = rcu_deref_locked(lr->avl_left, base);
328 lrr = rcu_deref_locked(lr->avl_right, base);
329 RCU_INIT_POINTER(node->avl_left, lrr);
330 RCU_INIT_POINTER(node->avl_right, r);
331 node->avl_height = rh + 1;
332 RCU_INIT_POINTER(l->avl_left, ll);
333 RCU_INIT_POINTER(l->avl_right, lrl);
334 l->avl_height = rh + 1;
335 RCU_INIT_POINTER(lr->avl_left, l);
336 RCU_INIT_POINTER(lr->avl_right, node);
337 lr->avl_height = rh + 2;
338 RCU_INIT_POINTER(*nodep, lr);
339 }
340 } else if (rh > lh + 1) {
341 struct inet_peer *rr, *rl, *rlr, *rll;
342 int rlh;
343 rr = rcu_deref_locked(r->avl_right, base);
344 rl = rcu_deref_locked(r->avl_left, base);
345 rlh = node_height(rl);
346 if (rlh <= node_height(rr)) {
347 RCU_INIT_POINTER(node->avl_right, rl);
348 RCU_INIT_POINTER(node->avl_left, l);
349 node->avl_height = rlh + 1;
350 RCU_INIT_POINTER(r->avl_right, rr);
351 RCU_INIT_POINTER(r->avl_left, node);
352 r->avl_height = node->avl_height + 1;
353 RCU_INIT_POINTER(*nodep, r);
354 } else {
355 rlr = rcu_deref_locked(rl->avl_right, base);
356 rll = rcu_deref_locked(rl->avl_left, base);
357 RCU_INIT_POINTER(node->avl_right, rll);
358 RCU_INIT_POINTER(node->avl_left, l);
359 node->avl_height = lh + 1;
360 RCU_INIT_POINTER(r->avl_right, rr);
361 RCU_INIT_POINTER(r->avl_left, rlr);
362 r->avl_height = lh + 1;
363 RCU_INIT_POINTER(rl->avl_right, r);
364 RCU_INIT_POINTER(rl->avl_left, node);
365 rl->avl_height = lh + 2;
366 RCU_INIT_POINTER(*nodep, rl);
367 }
368 } else {
369 node->avl_height = (lh > rh ? lh : rh) + 1;
370 }
371 }
372}
373
374
375#define link_to_pool(n, base) \
376do { \
377 n->avl_height = 1; \
378 n->avl_left = peer_avl_empty_rcu; \
379 n->avl_right = peer_avl_empty_rcu; \
380 \
381 rcu_assign_pointer(**--stackptr, n); \
382 peer_avl_rebalance(stack, stackptr, base); \
383} while (0)
384
385static void inetpeer_free_rcu(struct rcu_head *head)
386{
387 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
388}
389
390static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
391 struct inet_peer __rcu **stack[PEER_MAXDEPTH])
392{
393 struct inet_peer __rcu ***stackptr, ***delp;
394
395 if (lookup(&p->daddr, stack, base) != p)
396 BUG();
397 delp = stackptr - 1;
398 if (p->avl_left == peer_avl_empty_rcu) {
399 *delp[0] = p->avl_right;
400 --stackptr;
401 } else {
402
403 struct inet_peer *t;
404 t = lookup_rightempty(p, base);
405 BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
406 **--stackptr = t->avl_left;
407
408
409
410 RCU_INIT_POINTER(*delp[0], t);
411 t->avl_left = p->avl_left;
412 t->avl_right = p->avl_right;
413 t->avl_height = p->avl_height;
414 BUG_ON(delp[1] != &p->avl_left);
415 delp[1] = &t->avl_left;
416 }
417 peer_avl_rebalance(stack, stackptr, base);
418 base->total--;
419 call_rcu(&p->rcu, inetpeer_free_rcu);
420}
421
422
423static int inet_peer_gc(struct inet_peer_base *base,
424 struct inet_peer __rcu **stack[PEER_MAXDEPTH],
425 struct inet_peer __rcu ***stackptr)
426{
427 struct inet_peer *p, *gchead = NULL;
428 __u32 delta, ttl;
429 int cnt = 0;
430
431 if (base->total >= inet_peer_threshold)
432 ttl = 0;
433 else
434 ttl = inet_peer_maxttl
435 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
436 base->total / inet_peer_threshold * HZ;
437 stackptr--;
438 while (stackptr > stack) {
439 stackptr--;
440 p = rcu_deref_locked(**stackptr, base);
441 if (atomic_read(&p->refcnt) == 0) {
442 smp_rmb();
443 delta = (__u32)jiffies - p->dtime;
444 if (delta >= ttl &&
445 atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
446 p->gc_next = gchead;
447 gchead = p;
448 }
449 }
450 }
451 while ((p = gchead) != NULL) {
452 gchead = p->gc_next;
453 cnt++;
454 unlink_from_pool(p, base, stack);
455 }
456 return cnt;
457}
458
459struct inet_peer *inet_getpeer(struct inet_peer_base *base,
460 const struct inetpeer_addr *daddr,
461 int create)
462{
463 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
464 struct inet_peer *p;
465 unsigned int sequence;
466 int invalidated, gccnt = 0;
467
468 flush_check(base, daddr->family);
469
470
471
472
473 rcu_read_lock();
474 sequence = read_seqbegin(&base->lock);
475 p = lookup_rcu(daddr, base);
476 invalidated = read_seqretry(&base->lock, sequence);
477 rcu_read_unlock();
478
479 if (p)
480 return p;
481
482
483 if (!create && !invalidated)
484 return NULL;
485
486
487
488
489 write_seqlock_bh(&base->lock);
490relookup:
491 p = lookup(daddr, stack, base);
492 if (p != peer_avl_empty) {
493 atomic_inc(&p->refcnt);
494 write_sequnlock_bh(&base->lock);
495 return p;
496 }
497 if (!gccnt) {
498 gccnt = inet_peer_gc(base, stack, stackptr);
499 if (gccnt && create)
500 goto relookup;
501 }
502 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
503 if (p) {
504 p->daddr = *daddr;
505 atomic_set(&p->refcnt, 1);
506 atomic_set(&p->rid, 0);
507 atomic_set(&p->ip_id_count,
508 (daddr->family == AF_INET) ?
509 secure_ip_id(daddr->addr.a4) :
510 secure_ipv6_id(daddr->addr.a6));
511 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
512 p->rate_tokens = 0;
513
514
515
516 p->rate_last = jiffies - 60*HZ;
517 INIT_LIST_HEAD(&p->gc_list);
518
519
520 link_to_pool(p, base);
521 base->total++;
522 }
523 write_sequnlock_bh(&base->lock);
524
525 return p;
526}
527EXPORT_SYMBOL_GPL(inet_getpeer);
528
529void inet_putpeer(struct inet_peer *p)
530{
531 p->dtime = (__u32)jiffies;
532 smp_mb__before_atomic_dec();
533 atomic_dec(&p->refcnt);
534}
535EXPORT_SYMBOL_GPL(inet_putpeer);
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554#define XRLIM_BURST_FACTOR 6
555bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
556{
557 unsigned long now, token;
558 bool rc = false;
559
560 if (!peer)
561 return true;
562
563 token = peer->rate_tokens;
564 now = jiffies;
565 token += now - peer->rate_last;
566 peer->rate_last = now;
567 if (token > XRLIM_BURST_FACTOR * timeout)
568 token = XRLIM_BURST_FACTOR * timeout;
569 if (token >= timeout) {
570 token -= timeout;
571 rc = true;
572 }
573 peer->rate_tokens = token;
574 return rc;
575}
576EXPORT_SYMBOL(inet_peer_xrlim_allow);
577
578static void inetpeer_inval_rcu(struct rcu_head *head)
579{
580 struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
581
582 spin_lock_bh(&gc_lock);
583 list_add_tail(&p->gc_list, &gc_list);
584 spin_unlock_bh(&gc_lock);
585
586 schedule_delayed_work(&gc_work, gc_delay);
587}
588
589void inetpeer_invalidate_tree(struct inet_peer_base *base)
590{
591 struct inet_peer *root;
592
593 write_seqlock_bh(&base->lock);
594
595 root = rcu_deref_locked(base->root, base);
596 if (root != peer_avl_empty) {
597 base->root = peer_avl_empty_rcu;
598 base->total = 0;
599 call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
600 }
601
602 write_sequnlock_bh(&base->lock);
603}
604EXPORT_SYMBOL(inetpeer_invalidate_tree);
605