1
2
3
4
5
6
7
8#include <linux/bitops.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/workqueue.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/netdevice.h>
17#include <linux/skbuff.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <net/net_namespace.h>
21#include <linux/sched.h>
22#include <linux/prefetch.h>
23#ifndef __GENKSYMS__
24#include <net/lwtunnel.h>
25#endif
26
27#include <net/dst.h>
28#include <net/dst_metadata.h>
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47static struct {
48 spinlock_t lock;
49 struct dst_entry *list;
50 unsigned long timer_inc;
51 unsigned long timer_expires;
52} dst_garbage = {
53 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
54 .timer_inc = DST_GC_MAX,
55};
56static void dst_gc_task(struct work_struct *work);
57static void ___dst_free(struct dst_entry *dst);
58
59static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
60
61static DEFINE_MUTEX(dst_gc_mutex);
62
63
64
65static struct dst_entry *dst_busy_list;
66
67static void dst_gc_task(struct work_struct *work)
68{
69 int delayed = 0;
70 int work_performed = 0;
71 unsigned long expires = ~0L;
72 struct dst_entry *dst, *next, head;
73 struct dst_entry *last = &head;
74
75 mutex_lock(&dst_gc_mutex);
76 next = dst_busy_list;
77
78loop:
79 while ((dst = next) != NULL) {
80 next = dst->next;
81 prefetch(&next->next);
82 cond_resched();
83 if (likely(atomic_read(&dst->__refcnt))) {
84 last->next = dst;
85 last = dst;
86 delayed++;
87 continue;
88 }
89 work_performed++;
90
91 dst = dst_destroy(dst);
92 if (dst) {
93
94
95
96
97
98
99
100
101 if (dst->obsolete > 0)
102 continue;
103
104 ___dst_free(dst);
105 dst->next = next;
106 next = dst;
107 }
108 }
109
110 spin_lock_bh(&dst_garbage.lock);
111 next = dst_garbage.list;
112 if (next) {
113 dst_garbage.list = NULL;
114 spin_unlock_bh(&dst_garbage.lock);
115 goto loop;
116 }
117 last->next = NULL;
118 dst_busy_list = head.next;
119 if (!dst_busy_list)
120 dst_garbage.timer_inc = DST_GC_MAX;
121 else {
122
123
124
125
126 if (work_performed <= delayed/10) {
127 dst_garbage.timer_expires += dst_garbage.timer_inc;
128 if (dst_garbage.timer_expires > DST_GC_MAX)
129 dst_garbage.timer_expires = DST_GC_MAX;
130 dst_garbage.timer_inc += DST_GC_INC;
131 } else {
132 dst_garbage.timer_inc = DST_GC_INC;
133 dst_garbage.timer_expires = DST_GC_MIN;
134 }
135 expires = dst_garbage.timer_expires;
136
137
138
139
140 if (expires > 4*HZ)
141 expires = round_jiffies_relative(expires);
142 schedule_delayed_work(&dst_gc_work, expires);
143 }
144
145 spin_unlock_bh(&dst_garbage.lock);
146 mutex_unlock(&dst_gc_mutex);
147}
148
149int dst_discard_sk(struct sock *sk, struct sk_buff *skb)
150{
151 kfree_skb(skb);
152 return 0;
153}
154EXPORT_SYMBOL(dst_discard_sk);
155
156const struct dst_metrics dst_default_metrics = {
157
158
159
160
161
162 .refcnt = ATOMIC_INIT(1),
163};
164
165void dst_init(struct dst_entry *dst, struct dst_ops *ops,
166 struct net_device *dev, int initial_ref, int initial_obsolete,
167 unsigned short flags)
168{
169 dst->child = NULL;
170 dst->dev = dev;
171 if (dev)
172 dev_hold(dev);
173 dst->ops = ops;
174 dst_init_metrics(dst, dst_default_metrics.metrics, true);
175 dst->expires = 0UL;
176 dst->path = dst;
177 dst->from = NULL;
178#ifdef CONFIG_XFRM
179 dst->xfrm = NULL;
180#endif
181 dst->input = dst_discard;
182 dst->output = dst_discard_sk;
183 dst->error = 0;
184 dst->obsolete = initial_obsolete;
185 dst->header_len = 0;
186 dst->trailer_len = 0;
187#ifdef CONFIG_IP_ROUTE_CLASSID
188 dst->tclassid = 0;
189#endif
190 dst->lwtstate = NULL;
191 atomic_set(&dst->__refcnt, initial_ref);
192 dst->__use = 0;
193 dst->lastuse = jiffies;
194 dst->flags = flags;
195 dst->next = NULL;
196 if (!(flags & DST_NOCOUNT))
197 dst_entries_add(ops, 1);
198}
199EXPORT_SYMBOL(dst_init);
200
201void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
202 int initial_ref, int initial_obsolete, unsigned short flags)
203{
204 struct dst_entry *dst;
205
206 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
207 if (ops->gc(ops))
208 return NULL;
209 }
210
211 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
212 if (!dst)
213 return NULL;
214
215 dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
216
217 return dst;
218}
219EXPORT_SYMBOL(dst_alloc);
220
221static void ___dst_free(struct dst_entry *dst)
222{
223
224
225
226 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
227 dst->input = dst_discard;
228 dst->output = dst_discard_sk;
229 }
230 dst->obsolete = DST_OBSOLETE_DEAD;
231}
232
233void __dst_free(struct dst_entry *dst)
234{
235 spin_lock_bh(&dst_garbage.lock);
236 ___dst_free(dst);
237 dst->next = dst_garbage.list;
238 dst_garbage.list = dst;
239 if (dst_garbage.timer_inc > DST_GC_INC) {
240 dst_garbage.timer_inc = DST_GC_INC;
241 dst_garbage.timer_expires = DST_GC_MIN;
242 mod_delayed_work(system_wq, &dst_gc_work,
243 dst_garbage.timer_expires);
244 }
245 spin_unlock_bh(&dst_garbage.lock);
246}
247EXPORT_SYMBOL(__dst_free);
248
249struct dst_entry *dst_destroy(struct dst_entry * dst)
250{
251 struct dst_entry *child;
252
253 smp_rmb();
254
255again:
256 child = dst->child;
257
258 if (!(dst->flags & DST_NOCOUNT))
259 dst_entries_add(dst->ops, -1);
260
261 if (dst->ops->destroy)
262 dst->ops->destroy(dst);
263 if (dst->dev)
264 dev_put(dst->dev);
265
266 lwtstate_put(dst->lwtstate);
267
268 if (dst->flags & DST_METADATA)
269 metadata_dst_free((struct metadata_dst *)dst);
270 else
271 kmem_cache_free(dst->ops->kmem_cachep, dst);
272
273 dst = child;
274 if (dst) {
275 int nohash = dst->flags & DST_NOHASH;
276
277 if (atomic_dec_and_test(&dst->__refcnt)) {
278
279 if (nohash)
280 goto again;
281 } else {
282
283 if (nohash)
284 return dst;
285
286 }
287 }
288 return NULL;
289}
290EXPORT_SYMBOL(dst_destroy);
291
292static void dst_destroy_rcu(struct rcu_head *head)
293{
294 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
295
296 dst = dst_destroy(dst);
297 if (dst)
298 __dst_free(dst);
299}
300
301void dst_release(struct dst_entry *dst)
302{
303 if (dst) {
304 int newrefcnt;
305 unsigned short nocache = dst->flags & DST_NOCACHE;
306
307 newrefcnt = atomic_dec_return(&dst->__refcnt);
308 if (unlikely(newrefcnt < 0))
309 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
310 __func__, dst, newrefcnt);
311 if (!newrefcnt && unlikely(nocache))
312 call_rcu(&dst->rcu_head, dst_destroy_rcu);
313 }
314}
315EXPORT_SYMBOL(dst_release);
316
317u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
318{
319 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
320
321 if (p) {
322 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
323 unsigned long prev, new;
324
325 atomic_set(&p->refcnt, 1);
326 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
327
328 new = (unsigned long) p;
329 prev = cmpxchg(&dst->_metrics, old, new);
330
331 if (prev != old) {
332 kfree(p);
333 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
334 if (prev & DST_METRICS_READ_ONLY)
335 p = NULL;
336 } else if (prev & DST_METRICS_REFCOUNTED) {
337 if (atomic_dec_and_test(&old_p->refcnt))
338 kfree(old_p);
339 }
340 }
341 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
342 return (u32 *)p;
343}
344EXPORT_SYMBOL(dst_cow_metrics_generic);
345
346
347void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
348{
349 unsigned long prev, new;
350
351 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
352 prev = cmpxchg(&dst->_metrics, old, new);
353 if (prev == old)
354 kfree(__DST_METRICS_PTR(old));
355}
356EXPORT_SYMBOL(__dst_destroy_metrics_generic);
357
358static struct dst_ops md_dst_ops = {
359 .family = AF_UNSPEC,
360};
361
362static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb)
363{
364 WARN_ONCE(1, "Attempting to call output on metadata dst\n");
365 kfree_skb(skb);
366 return 0;
367}
368
369static int dst_md_discard(struct sk_buff *skb)
370{
371 WARN_ONCE(1, "Attempting to call input on metadata dst\n");
372 kfree_skb(skb);
373 return 0;
374}
375
376struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
377 gfp_t flags)
378{
379 struct metadata_dst *md_dst;
380 struct dst_entry *dst;
381
382 md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
383 if (!md_dst)
384 return NULL;
385
386 dst = &md_dst->dst;
387 dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
388 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
389
390 dst->input = dst_md_discard;
391 dst->output = dst_md_discard_sk;
392
393 memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
394 md_dst->opts_len = optslen;
395 md_dst->type = type;
396
397 return md_dst;
398}
399EXPORT_SYMBOL_GPL(metadata_dst_alloc);
400
401void metadata_dst_free(struct metadata_dst *md_dst)
402{
403#ifdef CONFIG_DST_CACHE
404 if (md_dst->type == METADATA_IP_TUNNEL)
405 dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
406#endif
407 kfree(md_dst);
408}
409
410
411
412
413
414
415
416
417
418static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
419 int unregister)
420{
421 if (dst->ops->ifdown)
422 dst->ops->ifdown(dst, dev, unregister);
423
424 if (dev != dst->dev)
425 return;
426
427 if (!unregister) {
428 dst->input = dst_discard;
429 dst->output = dst_discard_sk;
430 } else {
431 dst->dev = dev_net(dst->dev)->loopback_dev;
432 dev_hold(dst->dev);
433 dev_put(dev);
434 }
435}
436
437static int dst_dev_event(struct notifier_block *this, unsigned long event,
438 void *ptr)
439{
440 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
441 struct dst_entry *dst, *last = NULL;
442
443 switch (event) {
444 case NETDEV_UNREGISTER_FINAL:
445 case NETDEV_DOWN:
446 mutex_lock(&dst_gc_mutex);
447 for (dst = dst_busy_list; dst; dst = dst->next) {
448 last = dst;
449 dst_ifdown(dst, dev, event != NETDEV_DOWN);
450 }
451
452 spin_lock_bh(&dst_garbage.lock);
453 dst = dst_garbage.list;
454 dst_garbage.list = NULL;
455
456
457
458
459
460
461
462
463 if (dst_garbage.timer_inc > DST_GC_INC) {
464 dst_garbage.timer_inc = DST_GC_INC;
465 dst_garbage.timer_expires = DST_GC_MIN;
466 mod_delayed_work(system_wq, &dst_gc_work,
467 dst_garbage.timer_expires);
468 }
469 spin_unlock_bh(&dst_garbage.lock);
470
471 if (last)
472 last->next = dst;
473 else
474 dst_busy_list = dst;
475 for (; dst; dst = dst->next)
476 dst_ifdown(dst, dev, event != NETDEV_DOWN);
477 mutex_unlock(&dst_gc_mutex);
478 break;
479 }
480 return NOTIFY_DONE;
481}
482
483static struct notifier_block dst_dev_notifier = {
484 .notifier_call = dst_dev_event,
485 .priority = -10,
486};
487
488void __init dst_subsys_init(void)
489{
490 register_netdevice_notifier_rh(&dst_dev_notifier);
491}
492