1
2
3
4
5
6
7
8#include <linux/bitops.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/workqueue.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/netdevice.h>
17#include <linux/skbuff.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <net/net_namespace.h>
21#include <linux/sched.h>
22#include <linux/prefetch.h>
23
24#include <net/dst.h>
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43static struct {
44 spinlock_t lock;
45 struct dst_entry *list;
46 unsigned long timer_inc;
47 unsigned long timer_expires;
48} dst_garbage = {
49 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
50 .timer_inc = DST_GC_MAX,
51};
52static void dst_gc_task(struct work_struct *work);
53static void ___dst_free(struct dst_entry *dst);
54
55static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
56
57static DEFINE_MUTEX(dst_gc_mutex);
58
59
60
61static struct dst_entry *dst_busy_list;
62
63static void dst_gc_task(struct work_struct *work)
64{
65 int delayed = 0;
66 int work_performed = 0;
67 unsigned long expires = ~0L;
68 struct dst_entry *dst, *next, head;
69 struct dst_entry *last = &head;
70
71 mutex_lock(&dst_gc_mutex);
72 next = dst_busy_list;
73
74loop:
75 while ((dst = next) != NULL) {
76 next = dst->next;
77 prefetch(&next->next);
78 cond_resched();
79 if (likely(atomic_read(&dst->__refcnt))) {
80 last->next = dst;
81 last = dst;
82 delayed++;
83 continue;
84 }
85 work_performed++;
86
87 dst = dst_destroy(dst);
88 if (dst) {
89
90
91
92
93
94
95
96
97 if (dst->obsolete > 0)
98 continue;
99
100 ___dst_free(dst);
101 dst->next = next;
102 next = dst;
103 }
104 }
105
106 spin_lock_bh(&dst_garbage.lock);
107 next = dst_garbage.list;
108 if (next) {
109 dst_garbage.list = NULL;
110 spin_unlock_bh(&dst_garbage.lock);
111 goto loop;
112 }
113 last->next = NULL;
114 dst_busy_list = head.next;
115 if (!dst_busy_list)
116 dst_garbage.timer_inc = DST_GC_MAX;
117 else {
118
119
120
121
122 if (work_performed <= delayed/10) {
123 dst_garbage.timer_expires += dst_garbage.timer_inc;
124 if (dst_garbage.timer_expires > DST_GC_MAX)
125 dst_garbage.timer_expires = DST_GC_MAX;
126 dst_garbage.timer_inc += DST_GC_INC;
127 } else {
128 dst_garbage.timer_inc = DST_GC_INC;
129 dst_garbage.timer_expires = DST_GC_MIN;
130 }
131 expires = dst_garbage.timer_expires;
132
133
134
135
136 if (expires > 4*HZ)
137 expires = round_jiffies_relative(expires);
138 schedule_delayed_work(&dst_gc_work, expires);
139 }
140
141 spin_unlock_bh(&dst_garbage.lock);
142 mutex_unlock(&dst_gc_mutex);
143}
144
145int dst_discard(struct sk_buff *skb)
146{
147 kfree_skb(skb);
148 return 0;
149}
150EXPORT_SYMBOL(dst_discard);
151
152const u32 dst_default_metrics[RTAX_MAX + 1] = {
153
154
155
156
157
158 [RTAX_MAX] = 0xdeadbeef,
159};
160
161
162void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
163 int initial_ref, int initial_obsolete, unsigned short flags)
164{
165 struct dst_entry *dst;
166
167 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
168 if (ops->gc(ops))
169 return NULL;
170 }
171 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
172 if (!dst)
173 return NULL;
174 dst->child = NULL;
175 dst->dev = dev;
176 if (dev)
177 dev_hold(dev);
178 dst->ops = ops;
179 dst_init_metrics(dst, dst_default_metrics, true);
180 dst->expires = 0UL;
181 dst->path = dst;
182 dst->from = NULL;
183#ifdef CONFIG_XFRM
184 dst->xfrm = NULL;
185#endif
186 dst->input = dst_discard;
187 dst->output = dst_discard;
188 dst->error = 0;
189 dst->obsolete = initial_obsolete;
190 dst->header_len = 0;
191 dst->trailer_len = 0;
192#ifdef CONFIG_IP_ROUTE_CLASSID
193 dst->tclassid = 0;
194#endif
195 atomic_set(&dst->__refcnt, initial_ref);
196 dst->__use = 0;
197 dst->lastuse = jiffies;
198 dst->flags = flags;
199 dst->pending_confirm = 0;
200 dst->next = NULL;
201 if (!(flags & DST_NOCOUNT))
202 dst_entries_add(ops, 1);
203 return dst;
204}
205EXPORT_SYMBOL(dst_alloc);
206
207static void ___dst_free(struct dst_entry *dst)
208{
209
210
211
212 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
213 dst->input = dst->output = dst_discard;
214 dst->obsolete = DST_OBSOLETE_DEAD;
215}
216
217void __dst_free(struct dst_entry *dst)
218{
219 spin_lock_bh(&dst_garbage.lock);
220 ___dst_free(dst);
221 dst->next = dst_garbage.list;
222 dst_garbage.list = dst;
223 if (dst_garbage.timer_inc > DST_GC_INC) {
224 dst_garbage.timer_inc = DST_GC_INC;
225 dst_garbage.timer_expires = DST_GC_MIN;
226 mod_delayed_work(system_wq, &dst_gc_work,
227 dst_garbage.timer_expires);
228 }
229 spin_unlock_bh(&dst_garbage.lock);
230}
231EXPORT_SYMBOL(__dst_free);
232
233struct dst_entry *dst_destroy(struct dst_entry * dst)
234{
235 struct dst_entry *child;
236
237 smp_rmb();
238
239again:
240 child = dst->child;
241
242 if (!(dst->flags & DST_NOCOUNT))
243 dst_entries_add(dst->ops, -1);
244
245 if (dst->ops->destroy)
246 dst->ops->destroy(dst);
247 if (dst->dev)
248 dev_put(dst->dev);
249 kmem_cache_free(dst->ops->kmem_cachep, dst);
250
251 dst = child;
252 if (dst) {
253 int nohash = dst->flags & DST_NOHASH;
254
255 if (atomic_dec_and_test(&dst->__refcnt)) {
256
257 if (nohash)
258 goto again;
259 } else {
260
261 if (nohash)
262 return dst;
263
264 }
265 }
266 return NULL;
267}
268EXPORT_SYMBOL(dst_destroy);
269
270void dst_release(struct dst_entry *dst)
271{
272 if (dst) {
273 int newrefcnt;
274
275 newrefcnt = atomic_dec_return(&dst->__refcnt);
276 WARN_ON(newrefcnt < 0);
277 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
278 dst = dst_destroy(dst);
279 if (dst)
280 __dst_free(dst);
281 }
282 }
283}
284EXPORT_SYMBOL(dst_release);
285
286u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
287{
288 u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
289
290 if (p) {
291 u32 *old_p = __DST_METRICS_PTR(old);
292 unsigned long prev, new;
293
294 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
295
296 new = (unsigned long) p;
297 prev = cmpxchg(&dst->_metrics, old, new);
298
299 if (prev != old) {
300 kfree(p);
301 p = __DST_METRICS_PTR(prev);
302 if (prev & DST_METRICS_READ_ONLY)
303 p = NULL;
304 }
305 }
306 return p;
307}
308EXPORT_SYMBOL(dst_cow_metrics_generic);
309
310
311void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
312{
313 unsigned long prev, new;
314
315 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
316 prev = cmpxchg(&dst->_metrics, old, new);
317 if (prev == old)
318 kfree(__DST_METRICS_PTR(old));
319}
320EXPORT_SYMBOL(__dst_destroy_metrics_generic);
321
322
323
324
325
326
327
328
329
330
331void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, bool force)
332{
333 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
334
335
336
337 if (unlikely((dst->flags & DST_NOCACHE) && !force)) {
338 dst_hold(dst);
339 skb_dst_set(skb, dst);
340 } else {
341 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
342 }
343}
344EXPORT_SYMBOL(__skb_dst_set_noref);
345
346
347
348
349
350
351
352
353
354static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
355 int unregister)
356{
357 if (dst->ops->ifdown)
358 dst->ops->ifdown(dst, dev, unregister);
359
360 if (dev != dst->dev)
361 return;
362
363 if (!unregister) {
364 dst->input = dst->output = dst_discard;
365 } else {
366 dst->dev = dev_net(dst->dev)->loopback_dev;
367 dev_hold(dst->dev);
368 dev_put(dev);
369 }
370}
371
372static int dst_dev_event(struct notifier_block *this, unsigned long event,
373 void *ptr)
374{
375 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
376 struct dst_entry *dst, *last = NULL;
377
378 switch (event) {
379 case NETDEV_UNREGISTER_FINAL:
380 case NETDEV_DOWN:
381 mutex_lock(&dst_gc_mutex);
382 for (dst = dst_busy_list; dst; dst = dst->next) {
383 last = dst;
384 dst_ifdown(dst, dev, event != NETDEV_DOWN);
385 }
386
387 spin_lock_bh(&dst_garbage.lock);
388 dst = dst_garbage.list;
389 dst_garbage.list = NULL;
390 spin_unlock_bh(&dst_garbage.lock);
391
392 if (last)
393 last->next = dst;
394 else
395 dst_busy_list = dst;
396 for (; dst; dst = dst->next)
397 dst_ifdown(dst, dev, event != NETDEV_DOWN);
398 mutex_unlock(&dst_gc_mutex);
399 break;
400 }
401 return NOTIFY_DONE;
402}
403
404static struct notifier_block dst_dev_notifier = {
405 .notifier_call = dst_dev_event,
406 .priority = -10,
407};
408
409void __init dst_init(void)
410{
411 register_netdevice_notifier(&dst_dev_notifier);
412}
413