1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <asm/uaccess.h>
17#include <linux/bitops.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/jiffies.h>
21#include <linux/mm.h>
22#include <linux/string.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/errno.h>
26#include <linux/in.h>
27#include <linux/inet.h>
28#include <linux/inetdevice.h>
29#include <linux/netdevice.h>
30#include <linux/if_arp.h>
31#include <linux/proc_fs.h>
32#include <linux/skbuff.h>
33#include <linux/init.h>
34#include <linux/slab.h>
35
36#include <net/arp.h>
37#include <net/ip.h>
38#include <net/protocol.h>
39#include <net/route.h>
40#include <net/tcp.h>
41#include <net/sock.h>
42#include <net/ip_fib.h>
43#include <net/netlink.h>
44#include <net/nexthop.h>
45
46#include "fib_lookup.h"
47
48static DEFINE_SPINLOCK(fib_info_lock);
49static struct hlist_head *fib_info_hash;
50static struct hlist_head *fib_info_laddrhash;
51static unsigned int fib_info_hash_size;
52static unsigned int fib_info_cnt;
53
54#define DEVINDEX_HASHBITS 8
55#define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
56static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
57
58#ifdef CONFIG_IP_ROUTE_MULTIPATH
59
60static DEFINE_SPINLOCK(fib_multipath_lock);
61
62#define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \
64 for (nhsel = 0, nh = (fi)->fib_nh; \
65 nhsel < (fi)->fib_nhs; \
66 nh++, nhsel++)
67
68#define change_nexthops(fi) { \
69 int nhsel; struct fib_nh *nexthop_nh; \
70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
71 nhsel < (fi)->fib_nhs; \
72 nexthop_nh++, nhsel++)
73
74#else
75
76
77
78#define for_nexthops(fi) { \
79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
80 for (nhsel = 0; nhsel < 1; nhsel++)
81
82#define change_nexthops(fi) { \
83 int nhsel; \
84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
85 for (nhsel = 0; nhsel < 1; nhsel++)
86
87#endif
88
89#define endfor_nexthops(fi) }
90
91
92const struct fib_prop fib_props[RTN_MAX + 1] = {
93 [RTN_UNSPEC] = {
94 .error = 0,
95 .scope = RT_SCOPE_NOWHERE,
96 },
97 [RTN_UNICAST] = {
98 .error = 0,
99 .scope = RT_SCOPE_UNIVERSE,
100 },
101 [RTN_LOCAL] = {
102 .error = 0,
103 .scope = RT_SCOPE_HOST,
104 },
105 [RTN_BROADCAST] = {
106 .error = 0,
107 .scope = RT_SCOPE_LINK,
108 },
109 [RTN_ANYCAST] = {
110 .error = 0,
111 .scope = RT_SCOPE_LINK,
112 },
113 [RTN_MULTICAST] = {
114 .error = 0,
115 .scope = RT_SCOPE_UNIVERSE,
116 },
117 [RTN_BLACKHOLE] = {
118 .error = -EINVAL,
119 .scope = RT_SCOPE_UNIVERSE,
120 },
121 [RTN_UNREACHABLE] = {
122 .error = -EHOSTUNREACH,
123 .scope = RT_SCOPE_UNIVERSE,
124 },
125 [RTN_PROHIBIT] = {
126 .error = -EACCES,
127 .scope = RT_SCOPE_UNIVERSE,
128 },
129 [RTN_THROW] = {
130 .error = -EAGAIN,
131 .scope = RT_SCOPE_UNIVERSE,
132 },
133 [RTN_NAT] = {
134 .error = -EINVAL,
135 .scope = RT_SCOPE_NOWHERE,
136 },
137 [RTN_XRESOLVE] = {
138 .error = -EINVAL,
139 .scope = RT_SCOPE_NOWHERE,
140 },
141};
142
143static void rt_fibinfo_free(struct rtable __rcu **rtp)
144{
145 struct rtable *rt = rcu_dereference_protected(*rtp, 1);
146
147 if (!rt)
148 return;
149
150
151
152
153
154
155 dst_free(&rt->dst);
156}
157
158static void free_nh_exceptions(struct fib_nh *nh)
159{
160 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
161 int i;
162
163 for (i = 0; i < FNHE_HASH_SIZE; i++) {
164 struct fib_nh_exception *fnhe;
165
166 fnhe = rcu_dereference_protected(hash[i].chain, 1);
167 while (fnhe) {
168 struct fib_nh_exception *next;
169
170 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
171
172 rt_fibinfo_free(&fnhe->fnhe_rth_input);
173 rt_fibinfo_free(&fnhe->fnhe_rth_output);
174
175 kfree(fnhe);
176
177 fnhe = next;
178 }
179 }
180 kfree(hash);
181}
182
183static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
184{
185 int cpu;
186
187 if (!rtp)
188 return;
189
190 for_each_possible_cpu(cpu) {
191 struct rtable *rt;
192
193 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
194 if (rt)
195 dst_free(&rt->dst);
196 }
197 free_percpu(rtp);
198}
199
200
201static void free_fib_info_rcu(struct rcu_head *head)
202{
203 struct fib_info *fi = container_of(head, struct fib_info, rcu);
204
205 change_nexthops(fi) {
206 if (nexthop_nh->nh_dev)
207 dev_put(nexthop_nh->nh_dev);
208 if (nexthop_nh->nh_exceptions)
209 free_nh_exceptions(nexthop_nh);
210 rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
211 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
212 } endfor_nexthops(fi);
213
214 release_net(fi->fib_net);
215 if (fi->fib_metrics != (u32 *) dst_default_metrics)
216 kfree(fi->fib_metrics);
217 kfree(fi);
218}
219
220void free_fib_info(struct fib_info *fi)
221{
222 if (fi->fib_dead == 0) {
223 pr_warn("Freeing alive fib_info %p\n", fi);
224 return;
225 }
226 fib_info_cnt--;
227#ifdef CONFIG_IP_ROUTE_CLASSID
228 change_nexthops(fi) {
229 if (nexthop_nh->nh_tclassid)
230 fi->fib_net->ipv4.fib_num_tclassid_users--;
231 } endfor_nexthops(fi);
232#endif
233 call_rcu(&fi->rcu, free_fib_info_rcu);
234}
235
236void fib_release_info(struct fib_info *fi)
237{
238 spin_lock_bh(&fib_info_lock);
239 if (fi && --fi->fib_treeref == 0) {
240 hlist_del(&fi->fib_hash);
241 if (fi->fib_prefsrc)
242 hlist_del(&fi->fib_lhash);
243 change_nexthops(fi) {
244 if (!nexthop_nh->nh_dev)
245 continue;
246 hlist_del(&nexthop_nh->nh_hash);
247 } endfor_nexthops(fi)
248 fi->fib_dead = 1;
249 fib_info_put(fi);
250 }
251 spin_unlock_bh(&fib_info_lock);
252}
253
254static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
255{
256 const struct fib_nh *onh = ofi->fib_nh;
257
258 for_nexthops(fi) {
259 if (nh->nh_oif != onh->nh_oif ||
260 nh->nh_gw != onh->nh_gw ||
261 nh->nh_scope != onh->nh_scope ||
262#ifdef CONFIG_IP_ROUTE_MULTIPATH
263 nh->nh_weight != onh->nh_weight ||
264#endif
265#ifdef CONFIG_IP_ROUTE_CLASSID
266 nh->nh_tclassid != onh->nh_tclassid ||
267#endif
268 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
269 return -1;
270 onh++;
271 } endfor_nexthops(fi);
272 return 0;
273}
274
275static inline unsigned int fib_devindex_hashfn(unsigned int val)
276{
277 unsigned int mask = DEVINDEX_HASHSIZE - 1;
278
279 return (val ^
280 (val >> DEVINDEX_HASHBITS) ^
281 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
282}
283
284static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
285{
286 unsigned int mask = (fib_info_hash_size - 1);
287 unsigned int val = fi->fib_nhs;
288
289 val ^= (fi->fib_protocol << 8) | fi->fib_scope;
290 val ^= (__force u32)fi->fib_prefsrc;
291 val ^= fi->fib_priority;
292 for_nexthops(fi) {
293 val ^= fib_devindex_hashfn(nh->nh_oif);
294 } endfor_nexthops(fi)
295
296 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
297}
298
299static struct fib_info *fib_find_info(const struct fib_info *nfi)
300{
301 struct hlist_head *head;
302 struct fib_info *fi;
303 unsigned int hash;
304
305 hash = fib_info_hashfn(nfi);
306 head = &fib_info_hash[hash];
307
308 hlist_for_each_entry(fi, head, fib_hash) {
309 if (!net_eq(fi->fib_net, nfi->fib_net))
310 continue;
311 if (fi->fib_nhs != nfi->fib_nhs)
312 continue;
313 if (nfi->fib_protocol == fi->fib_protocol &&
314 nfi->fib_scope == fi->fib_scope &&
315 nfi->fib_prefsrc == fi->fib_prefsrc &&
316 nfi->fib_priority == fi->fib_priority &&
317 nfi->fib_type == fi->fib_type &&
318 memcmp(nfi->fib_metrics, fi->fib_metrics,
319 sizeof(u32) * RTAX_MAX) == 0 &&
320 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
321 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
322 return fi;
323 }
324
325 return NULL;
326}
327
328
329
330
331int ip_fib_check_default(__be32 gw, struct net_device *dev)
332{
333 struct hlist_head *head;
334 struct fib_nh *nh;
335 unsigned int hash;
336
337 spin_lock(&fib_info_lock);
338
339 hash = fib_devindex_hashfn(dev->ifindex);
340 head = &fib_info_devhash[hash];
341 hlist_for_each_entry(nh, head, nh_hash) {
342 if (nh->nh_dev == dev &&
343 nh->nh_gw == gw &&
344 !(nh->nh_flags & RTNH_F_DEAD)) {
345 spin_unlock(&fib_info_lock);
346 return 0;
347 }
348 }
349
350 spin_unlock(&fib_info_lock);
351
352 return -1;
353}
354
355static inline size_t fib_nlmsg_size(struct fib_info *fi)
356{
357 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
358 + nla_total_size(4)
359 + nla_total_size(4)
360 + nla_total_size(4)
361 + nla_total_size(4);
362
363
364 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
365
366 if (fi->fib_nhs) {
367
368
369
370 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
371
372
373 nhsize += 2 * nla_total_size(4);
374
375
376 payload += nla_total_size(fi->fib_nhs * nhsize);
377 }
378
379 return payload;
380}
381
382void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
383 int dst_len, u32 tb_id, const struct nl_info *info,
384 unsigned int nlm_flags)
385{
386 struct sk_buff *skb;
387 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
388 int err = -ENOBUFS;
389
390 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
391 if (skb == NULL)
392 goto errout;
393
394 err = fib_dump_info(skb, info->portid, seq, event, tb_id,
395 fa->fa_type, key, dst_len,
396 fa->fa_tos, fa->fa_info, nlm_flags);
397 if (err < 0) {
398
399 WARN_ON(err == -EMSGSIZE);
400 kfree_skb(skb);
401 goto errout;
402 }
403 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
404 info->nlh, GFP_KERNEL);
405 return;
406errout:
407 if (err < 0)
408 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
409}
410
411
412
413
414struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
415{
416 if (fah) {
417 struct fib_alias *fa;
418 list_for_each_entry(fa, fah, fa_list) {
419 if (fa->fa_tos > tos)
420 continue;
421 if (fa->fa_info->fib_priority >= prio ||
422 fa->fa_tos < tos)
423 return fa;
424 }
425 }
426 return NULL;
427}
428
429static int fib_detect_death(struct fib_info *fi, int order,
430 struct fib_info **last_resort, int *last_idx,
431 int dflt)
432{
433 struct neighbour *n;
434 int state = NUD_NONE;
435
436 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
437 if (n) {
438 state = n->nud_state;
439 neigh_release(n);
440 }
441 if (state == NUD_REACHABLE)
442 return 0;
443 if ((state & NUD_VALID) && order != dflt)
444 return 0;
445 if ((state & NUD_VALID) ||
446 (*last_idx < 0 && order > dflt)) {
447 *last_resort = fi;
448 *last_idx = order;
449 }
450 return 1;
451}
452
453#ifdef CONFIG_IP_ROUTE_MULTIPATH
454
455static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
456{
457 int nhs = 0;
458
459 while (rtnh_ok(rtnh, remaining)) {
460 nhs++;
461 rtnh = rtnh_next(rtnh, &remaining);
462 }
463
464
465 return remaining > 0 ? 0 : nhs;
466}
467
468static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
469 int remaining, struct fib_config *cfg)
470{
471 change_nexthops(fi) {
472 int attrlen;
473
474 if (!rtnh_ok(rtnh, remaining))
475 return -EINVAL;
476
477 nexthop_nh->nh_flags =
478 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
479 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
480 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
481
482 attrlen = rtnh_attrlen(rtnh);
483 if (attrlen > 0) {
484 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
485
486 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
487 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
488#ifdef CONFIG_IP_ROUTE_CLASSID
489 nla = nla_find(attrs, attrlen, RTA_FLOW);
490 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
491 if (nexthop_nh->nh_tclassid)
492 fi->fib_net->ipv4.fib_num_tclassid_users++;
493#endif
494 }
495
496 rtnh = rtnh_next(rtnh, &remaining);
497 } endfor_nexthops(fi);
498
499 return 0;
500}
501
502#endif
503
504int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
505{
506#ifdef CONFIG_IP_ROUTE_MULTIPATH
507 struct rtnexthop *rtnh;
508 int remaining;
509#endif
510
511 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
512 return 1;
513
514 if (cfg->fc_oif || cfg->fc_gw) {
515 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
516 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
517 return 0;
518 return 1;
519 }
520
521#ifdef CONFIG_IP_ROUTE_MULTIPATH
522 if (cfg->fc_mp == NULL)
523 return 0;
524
525 rtnh = cfg->fc_mp;
526 remaining = cfg->fc_mp_len;
527
528 for_nexthops(fi) {
529 int attrlen;
530
531 if (!rtnh_ok(rtnh, remaining))
532 return -EINVAL;
533
534 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
535 return 1;
536
537 attrlen = rtnh_attrlen(rtnh);
538 if (attrlen < 0) {
539 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
540
541 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
542 if (nla && nla_get_be32(nla) != nh->nh_gw)
543 return 1;
544#ifdef CONFIG_IP_ROUTE_CLASSID
545 nla = nla_find(attrs, attrlen, RTA_FLOW);
546 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
547 return 1;
548#endif
549 }
550
551 rtnh = rtnh_next(rtnh, &remaining);
552 } endfor_nexthops(fi);
553#endif
554 return 0;
555}
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
602 struct fib_nh *nh)
603{
604 int err;
605 struct net *net;
606 struct net_device *dev;
607
608 net = cfg->fc_nlinfo.nl_net;
609 if (nh->nh_gw) {
610 struct fib_result res;
611
612 if (nh->nh_flags & RTNH_F_ONLINK) {
613
614 if (cfg->fc_scope >= RT_SCOPE_LINK)
615 return -EINVAL;
616 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
617 return -EINVAL;
618 dev = __dev_get_by_index(net, nh->nh_oif);
619 if (!dev)
620 return -ENODEV;
621 if (!(dev->flags & IFF_UP))
622 return -ENETDOWN;
623 nh->nh_dev = dev;
624 dev_hold(dev);
625 nh->nh_scope = RT_SCOPE_LINK;
626 return 0;
627 }
628 rcu_read_lock();
629 {
630 struct flowi4 fl4 = {
631 .daddr = nh->nh_gw,
632 .flowi4_scope = cfg->fc_scope + 1,
633 .flowi4_oif = nh->nh_oif,
634 .flowi4_iif = LOOPBACK_IFINDEX,
635 };
636
637
638 if (fl4.flowi4_scope < RT_SCOPE_LINK)
639 fl4.flowi4_scope = RT_SCOPE_LINK;
640 err = fib_lookup(net, &fl4, &res);
641 if (err) {
642 rcu_read_unlock();
643 return err;
644 }
645 }
646 err = -EINVAL;
647 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
648 goto out;
649 nh->nh_scope = res.scope;
650 nh->nh_oif = FIB_RES_OIF(res);
651 nh->nh_dev = dev = FIB_RES_DEV(res);
652 if (!dev)
653 goto out;
654 dev_hold(dev);
655 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
656 } else {
657 struct in_device *in_dev;
658
659 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
660 return -EINVAL;
661
662 rcu_read_lock();
663 err = -ENODEV;
664 in_dev = inetdev_by_index(net, nh->nh_oif);
665 if (in_dev == NULL)
666 goto out;
667 err = -ENETDOWN;
668 if (!(in_dev->dev->flags & IFF_UP))
669 goto out;
670 nh->nh_dev = in_dev->dev;
671 dev_hold(nh->nh_dev);
672 nh->nh_scope = RT_SCOPE_HOST;
673 err = 0;
674 }
675out:
676 rcu_read_unlock();
677 return err;
678}
679
680static inline unsigned int fib_laddr_hashfn(__be32 val)
681{
682 unsigned int mask = (fib_info_hash_size - 1);
683
684 return ((__force u32)val ^
685 ((__force u32)val >> 7) ^
686 ((__force u32)val >> 14)) & mask;
687}
688
689static struct hlist_head *fib_info_hash_alloc(int bytes)
690{
691 if (bytes <= PAGE_SIZE)
692 return kzalloc(bytes, GFP_KERNEL);
693 else
694 return (struct hlist_head *)
695 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
696 get_order(bytes));
697}
698
699static void fib_info_hash_free(struct hlist_head *hash, int bytes)
700{
701 if (!hash)
702 return;
703
704 if (bytes <= PAGE_SIZE)
705 kfree(hash);
706 else
707 free_pages((unsigned long) hash, get_order(bytes));
708}
709
710static void fib_info_hash_move(struct hlist_head *new_info_hash,
711 struct hlist_head *new_laddrhash,
712 unsigned int new_size)
713{
714 struct hlist_head *old_info_hash, *old_laddrhash;
715 unsigned int old_size = fib_info_hash_size;
716 unsigned int i, bytes;
717
718 spin_lock_bh(&fib_info_lock);
719 old_info_hash = fib_info_hash;
720 old_laddrhash = fib_info_laddrhash;
721 fib_info_hash_size = new_size;
722
723 for (i = 0; i < old_size; i++) {
724 struct hlist_head *head = &fib_info_hash[i];
725 struct hlist_node *n;
726 struct fib_info *fi;
727
728 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
729 struct hlist_head *dest;
730 unsigned int new_hash;
731
732 hlist_del(&fi->fib_hash);
733
734 new_hash = fib_info_hashfn(fi);
735 dest = &new_info_hash[new_hash];
736 hlist_add_head(&fi->fib_hash, dest);
737 }
738 }
739 fib_info_hash = new_info_hash;
740
741 for (i = 0; i < old_size; i++) {
742 struct hlist_head *lhead = &fib_info_laddrhash[i];
743 struct hlist_node *n;
744 struct fib_info *fi;
745
746 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
747 struct hlist_head *ldest;
748 unsigned int new_hash;
749
750 hlist_del(&fi->fib_lhash);
751
752 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
753 ldest = &new_laddrhash[new_hash];
754 hlist_add_head(&fi->fib_lhash, ldest);
755 }
756 }
757 fib_info_laddrhash = new_laddrhash;
758
759 spin_unlock_bh(&fib_info_lock);
760
761 bytes = old_size * sizeof(struct hlist_head *);
762 fib_info_hash_free(old_info_hash, bytes);
763 fib_info_hash_free(old_laddrhash, bytes);
764}
765
766__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
767{
768 nh->nh_saddr = inet_select_addr(nh->nh_dev,
769 nh->nh_gw,
770 nh->nh_parent->fib_scope);
771 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
772
773 return nh->nh_saddr;
774}
775
776struct fib_info *fib_create_info(struct fib_config *cfg)
777{
778 int err;
779 struct fib_info *fi = NULL;
780 struct fib_info *ofi;
781 int nhs = 1;
782 struct net *net = cfg->fc_nlinfo.nl_net;
783
784 if (cfg->fc_type > RTN_MAX)
785 goto err_inval;
786
787
788 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
789 goto err_inval;
790
791#ifdef CONFIG_IP_ROUTE_MULTIPATH
792 if (cfg->fc_mp) {
793 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
794 if (nhs == 0)
795 goto err_inval;
796 }
797#endif
798
799 err = -ENOBUFS;
800 if (fib_info_cnt >= fib_info_hash_size) {
801 unsigned int new_size = fib_info_hash_size << 1;
802 struct hlist_head *new_info_hash;
803 struct hlist_head *new_laddrhash;
804 unsigned int bytes;
805
806 if (!new_size)
807 new_size = 16;
808 bytes = new_size * sizeof(struct hlist_head *);
809 new_info_hash = fib_info_hash_alloc(bytes);
810 new_laddrhash = fib_info_hash_alloc(bytes);
811 if (!new_info_hash || !new_laddrhash) {
812 fib_info_hash_free(new_info_hash, bytes);
813 fib_info_hash_free(new_laddrhash, bytes);
814 } else
815 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
816
817 if (!fib_info_hash_size)
818 goto failure;
819 }
820
821 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
822 if (fi == NULL)
823 goto failure;
824 fib_info_cnt++;
825 if (cfg->fc_mx) {
826 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
827 if (!fi->fib_metrics)
828 goto failure;
829 } else
830 fi->fib_metrics = (u32 *) dst_default_metrics;
831
832 fi->fib_net = hold_net(net);
833 fi->fib_protocol = cfg->fc_protocol;
834 fi->fib_scope = cfg->fc_scope;
835 fi->fib_flags = cfg->fc_flags;
836 fi->fib_priority = cfg->fc_priority;
837 fi->fib_prefsrc = cfg->fc_prefsrc;
838 fi->fib_type = cfg->fc_type;
839
840 fi->fib_nhs = nhs;
841 change_nexthops(fi) {
842 nexthop_nh->nh_parent = fi;
843 nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
844 if (!nexthop_nh->nh_pcpu_rth_output)
845 goto failure;
846 } endfor_nexthops(fi)
847
848 if (cfg->fc_mx) {
849 struct nlattr *nla;
850 int remaining;
851
852 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
853 int type = nla_type(nla);
854
855 if (type) {
856 u32 val;
857
858 if (type > RTAX_MAX)
859 goto err_inval;
860 val = nla_get_u32(nla);
861 if (type == RTAX_ADVMSS && val > 65535 - 40)
862 val = 65535 - 40;
863 if (type == RTAX_MTU && val > 65535 - 15)
864 val = 65535 - 15;
865 fi->fib_metrics[type - 1] = val;
866 }
867 }
868 }
869
870 if (cfg->fc_mp) {
871#ifdef CONFIG_IP_ROUTE_MULTIPATH
872 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
873 if (err != 0)
874 goto failure;
875 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
876 goto err_inval;
877 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
878 goto err_inval;
879#ifdef CONFIG_IP_ROUTE_CLASSID
880 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
881 goto err_inval;
882#endif
883#else
884 goto err_inval;
885#endif
886 } else {
887 struct fib_nh *nh = fi->fib_nh;
888
889 nh->nh_oif = cfg->fc_oif;
890 nh->nh_gw = cfg->fc_gw;
891 nh->nh_flags = cfg->fc_flags;
892#ifdef CONFIG_IP_ROUTE_CLASSID
893 nh->nh_tclassid = cfg->fc_flow;
894 if (nh->nh_tclassid)
895 fi->fib_net->ipv4.fib_num_tclassid_users++;
896#endif
897#ifdef CONFIG_IP_ROUTE_MULTIPATH
898 nh->nh_weight = 1;
899#endif
900 }
901
902 if (fib_props[cfg->fc_type].error) {
903 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
904 goto err_inval;
905 goto link_it;
906 } else {
907 switch (cfg->fc_type) {
908 case RTN_UNICAST:
909 case RTN_LOCAL:
910 case RTN_BROADCAST:
911 case RTN_ANYCAST:
912 case RTN_MULTICAST:
913 break;
914 default:
915 goto err_inval;
916 }
917 }
918
919 if (cfg->fc_scope > RT_SCOPE_HOST)
920 goto err_inval;
921
922 if (cfg->fc_scope == RT_SCOPE_HOST) {
923 struct fib_nh *nh = fi->fib_nh;
924
925
926 if (nhs != 1 || nh->nh_gw)
927 goto err_inval;
928 nh->nh_scope = RT_SCOPE_NOWHERE;
929 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
930 err = -ENODEV;
931 if (nh->nh_dev == NULL)
932 goto failure;
933 } else {
934 change_nexthops(fi) {
935 err = fib_check_nh(cfg, fi, nexthop_nh);
936 if (err != 0)
937 goto failure;
938 } endfor_nexthops(fi)
939 }
940
941 if (fi->fib_prefsrc) {
942 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
943 fi->fib_prefsrc != cfg->fc_dst)
944 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
945 goto err_inval;
946 }
947
948 change_nexthops(fi) {
949 fib_info_update_nh_saddr(net, nexthop_nh);
950 } endfor_nexthops(fi)
951
952link_it:
953 ofi = fib_find_info(fi);
954 if (ofi) {
955 fi->fib_dead = 1;
956 free_fib_info(fi);
957 ofi->fib_treeref++;
958 return ofi;
959 }
960
961 fi->fib_treeref++;
962 atomic_inc(&fi->fib_clntref);
963 spin_lock_bh(&fib_info_lock);
964 hlist_add_head(&fi->fib_hash,
965 &fib_info_hash[fib_info_hashfn(fi)]);
966 if (fi->fib_prefsrc) {
967 struct hlist_head *head;
968
969 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
970 hlist_add_head(&fi->fib_lhash, head);
971 }
972 change_nexthops(fi) {
973 struct hlist_head *head;
974 unsigned int hash;
975
976 if (!nexthop_nh->nh_dev)
977 continue;
978 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
979 head = &fib_info_devhash[hash];
980 hlist_add_head(&nexthop_nh->nh_hash, head);
981 } endfor_nexthops(fi)
982 spin_unlock_bh(&fib_info_lock);
983 return fi;
984
985err_inval:
986 err = -EINVAL;
987
988failure:
989 if (fi) {
990 fi->fib_dead = 1;
991 free_fib_info(fi);
992 }
993
994 return ERR_PTR(err);
995}
996
997int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
998 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
999 struct fib_info *fi, unsigned int flags)
1000{
1001 struct nlmsghdr *nlh;
1002 struct rtmsg *rtm;
1003
1004 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1005 if (nlh == NULL)
1006 return -EMSGSIZE;
1007
1008 rtm = nlmsg_data(nlh);
1009 rtm->rtm_family = AF_INET;
1010 rtm->rtm_dst_len = dst_len;
1011 rtm->rtm_src_len = 0;
1012 rtm->rtm_tos = tos;
1013 if (tb_id < 256)
1014 rtm->rtm_table = tb_id;
1015 else
1016 rtm->rtm_table = RT_TABLE_COMPAT;
1017 if (nla_put_u32(skb, RTA_TABLE, tb_id))
1018 goto nla_put_failure;
1019 rtm->rtm_type = type;
1020 rtm->rtm_flags = fi->fib_flags;
1021 rtm->rtm_scope = fi->fib_scope;
1022 rtm->rtm_protocol = fi->fib_protocol;
1023
1024 if (rtm->rtm_dst_len &&
1025 nla_put_be32(skb, RTA_DST, dst))
1026 goto nla_put_failure;
1027 if (fi->fib_priority &&
1028 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1029 goto nla_put_failure;
1030 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
1031 goto nla_put_failure;
1032
1033 if (fi->fib_prefsrc &&
1034 nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
1035 goto nla_put_failure;
1036 if (fi->fib_nhs == 1) {
1037 if (fi->fib_nh->nh_gw &&
1038 nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
1039 goto nla_put_failure;
1040 if (fi->fib_nh->nh_oif &&
1041 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
1042 goto nla_put_failure;
1043#ifdef CONFIG_IP_ROUTE_CLASSID
1044 if (fi->fib_nh[0].nh_tclassid &&
1045 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1046 goto nla_put_failure;
1047#endif
1048 }
1049#ifdef CONFIG_IP_ROUTE_MULTIPATH
1050 if (fi->fib_nhs > 1) {
1051 struct rtnexthop *rtnh;
1052 struct nlattr *mp;
1053
1054 mp = nla_nest_start(skb, RTA_MULTIPATH);
1055 if (mp == NULL)
1056 goto nla_put_failure;
1057
1058 for_nexthops(fi) {
1059 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1060 if (rtnh == NULL)
1061 goto nla_put_failure;
1062
1063 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1064 rtnh->rtnh_hops = nh->nh_weight - 1;
1065 rtnh->rtnh_ifindex = nh->nh_oif;
1066
1067 if (nh->nh_gw &&
1068 nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
1069 goto nla_put_failure;
1070#ifdef CONFIG_IP_ROUTE_CLASSID
1071 if (nh->nh_tclassid &&
1072 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1073 goto nla_put_failure;
1074#endif
1075
1076 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1077 } endfor_nexthops(fi);
1078
1079 nla_nest_end(skb, mp);
1080 }
1081#endif
1082 return nlmsg_end(skb, nlh);
1083
1084nla_put_failure:
1085 nlmsg_cancel(skb, nlh);
1086 return -EMSGSIZE;
1087}
1088
1089
1090
1091
1092
1093
1094
1095int fib_sync_down_addr(struct net *net, __be32 local)
1096{
1097 int ret = 0;
1098 unsigned int hash = fib_laddr_hashfn(local);
1099 struct hlist_head *head = &fib_info_laddrhash[hash];
1100 struct fib_info *fi;
1101
1102 if (fib_info_laddrhash == NULL || local == 0)
1103 return 0;
1104
1105 hlist_for_each_entry(fi, head, fib_lhash) {
1106 if (!net_eq(fi->fib_net, net))
1107 continue;
1108 if (fi->fib_prefsrc == local) {
1109 fi->fib_flags |= RTNH_F_DEAD;
1110 ret++;
1111 }
1112 }
1113 return ret;
1114}
1115
1116int fib_sync_down_dev(struct net_device *dev, int force)
1117{
1118 int ret = 0;
1119 int scope = RT_SCOPE_NOWHERE;
1120 struct fib_info *prev_fi = NULL;
1121 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1122 struct hlist_head *head = &fib_info_devhash[hash];
1123 struct fib_nh *nh;
1124
1125 if (force)
1126 scope = -1;
1127
1128 hlist_for_each_entry(nh, head, nh_hash) {
1129 struct fib_info *fi = nh->nh_parent;
1130 int dead;
1131
1132 BUG_ON(!fi->fib_nhs);
1133 if (nh->nh_dev != dev || fi == prev_fi)
1134 continue;
1135 prev_fi = fi;
1136 dead = 0;
1137 change_nexthops(fi) {
1138 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1139 dead++;
1140 else if (nexthop_nh->nh_dev == dev &&
1141 nexthop_nh->nh_scope != scope) {
1142 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1143#ifdef CONFIG_IP_ROUTE_MULTIPATH
1144 spin_lock_bh(&fib_multipath_lock);
1145 fi->fib_power -= nexthop_nh->nh_power;
1146 nexthop_nh->nh_power = 0;
1147 spin_unlock_bh(&fib_multipath_lock);
1148#endif
1149 dead++;
1150 }
1151#ifdef CONFIG_IP_ROUTE_MULTIPATH
1152 if (force > 1 && nexthop_nh->nh_dev == dev) {
1153 dead = fi->fib_nhs;
1154 break;
1155 }
1156#endif
1157 } endfor_nexthops(fi)
1158 if (dead == fi->fib_nhs) {
1159 fi->fib_flags |= RTNH_F_DEAD;
1160 ret++;
1161 }
1162 }
1163
1164 return ret;
1165}
1166
1167
1168void fib_select_default(struct fib_result *res)
1169{
1170 struct fib_info *fi = NULL, *last_resort = NULL;
1171 struct list_head *fa_head = res->fa_head;
1172 struct fib_table *tb = res->table;
1173 int order = -1, last_idx = -1;
1174 struct fib_alias *fa;
1175
1176 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1177 struct fib_info *next_fi = fa->fa_info;
1178
1179 if (next_fi->fib_scope != res->scope ||
1180 fa->fa_type != RTN_UNICAST)
1181 continue;
1182
1183 if (next_fi->fib_priority > res->fi->fib_priority)
1184 break;
1185 if (!next_fi->fib_nh[0].nh_gw ||
1186 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1187 continue;
1188
1189 fib_alias_accessed(fa);
1190
1191 if (fi == NULL) {
1192 if (next_fi != res->fi)
1193 break;
1194 } else if (!fib_detect_death(fi, order, &last_resort,
1195 &last_idx, tb->tb_default)) {
1196 fib_result_assign(res, fi);
1197 tb->tb_default = order;
1198 goto out;
1199 }
1200 fi = next_fi;
1201 order++;
1202 }
1203
1204 if (order <= 0 || fi == NULL) {
1205 tb->tb_default = -1;
1206 goto out;
1207 }
1208
1209 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1210 tb->tb_default)) {
1211 fib_result_assign(res, fi);
1212 tb->tb_default = order;
1213 goto out;
1214 }
1215
1216 if (last_idx >= 0)
1217 fib_result_assign(res, last_resort);
1218 tb->tb_default = last_idx;
1219out:
1220 return;
1221}
1222
1223#ifdef CONFIG_IP_ROUTE_MULTIPATH
1224
1225
1226
1227
1228
1229int fib_sync_up(struct net_device *dev)
1230{
1231 struct fib_info *prev_fi;
1232 unsigned int hash;
1233 struct hlist_head *head;
1234 struct fib_nh *nh;
1235 int ret;
1236
1237 if (!(dev->flags & IFF_UP))
1238 return 0;
1239
1240 prev_fi = NULL;
1241 hash = fib_devindex_hashfn(dev->ifindex);
1242 head = &fib_info_devhash[hash];
1243 ret = 0;
1244
1245 hlist_for_each_entry(nh, head, nh_hash) {
1246 struct fib_info *fi = nh->nh_parent;
1247 int alive;
1248
1249 BUG_ON(!fi->fib_nhs);
1250 if (nh->nh_dev != dev || fi == prev_fi)
1251 continue;
1252
1253 prev_fi = fi;
1254 alive = 0;
1255 change_nexthops(fi) {
1256 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1257 alive++;
1258 continue;
1259 }
1260 if (nexthop_nh->nh_dev == NULL ||
1261 !(nexthop_nh->nh_dev->flags & IFF_UP))
1262 continue;
1263 if (nexthop_nh->nh_dev != dev ||
1264 !__in_dev_get_rtnl(dev))
1265 continue;
1266 alive++;
1267 spin_lock_bh(&fib_multipath_lock);
1268 nexthop_nh->nh_power = 0;
1269 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1270 spin_unlock_bh(&fib_multipath_lock);
1271 } endfor_nexthops(fi)
1272
1273 if (alive > 0) {
1274 fi->fib_flags &= ~RTNH_F_DEAD;
1275 ret++;
1276 }
1277 }
1278
1279 return ret;
1280}
1281
1282
1283
1284
1285
1286void fib_select_multipath(struct fib_result *res)
1287{
1288 struct fib_info *fi = res->fi;
1289 int w;
1290
1291 spin_lock_bh(&fib_multipath_lock);
1292 if (fi->fib_power <= 0) {
1293 int power = 0;
1294 change_nexthops(fi) {
1295 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1296 power += nexthop_nh->nh_weight;
1297 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1298 }
1299 } endfor_nexthops(fi);
1300 fi->fib_power = power;
1301 if (power <= 0) {
1302 spin_unlock_bh(&fib_multipath_lock);
1303
1304 res->nh_sel = 0;
1305 return;
1306 }
1307 }
1308
1309
1310
1311
1312
1313
1314 w = jiffies % fi->fib_power;
1315
1316 change_nexthops(fi) {
1317 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
1318 nexthop_nh->nh_power) {
1319 w -= nexthop_nh->nh_power;
1320 if (w <= 0) {
1321 nexthop_nh->nh_power--;
1322 fi->fib_power--;
1323 res->nh_sel = nhsel;
1324 spin_unlock_bh(&fib_multipath_lock);
1325 return;
1326 }
1327 }
1328 } endfor_nexthops(fi);
1329
1330
1331 res->nh_sel = 0;
1332 spin_unlock_bh(&fib_multipath_lock);
1333}
1334#endif
1335