1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/ip.h>
17#include <linux/init.h>
18#include <linux/moduleparam.h>
19#include <linux/netfilter.h>
20#include <linux/rtnetlink.h>
21#include <net/rtnetlink.h>
22#include <linux/u64_stats_sync.h>
23#include <linux/hashtable.h>
24#include <linux/spinlock_types.h>
25
26#include <linux/inetdevice.h>
27#include <net/arp.h>
28#include <net/ip.h>
29#include <net/ip_fib.h>
30#include <net/ip6_fib.h>
31#include <net/ip6_route.h>
32#include <net/route.h>
33#include <net/addrconf.h>
34#include <net/l3mdev.h>
35#include <net/fib_rules.h>
36#include <net/netns/generic.h>
37
38#define DRV_NAME "vrf"
39#define DRV_VERSION "1.1"
40
41#define FIB_RULE_PREF 1000
42
43#define HT_MAP_BITS 4
44#define HASH_INITVAL ((u32)0xcafef00d)
45
46struct vrf_map {
47 DECLARE_HASHTABLE(ht, HT_MAP_BITS);
48 spinlock_t vmap_lock;
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84 u32 shared_tables;
85
86 bool strict_mode;
87};
88
89struct vrf_map_elem {
90 struct hlist_node hnode;
91 struct list_head vrf_list;
92
93 u32 table_id;
94 int users;
95 int ifindex;
96};
97
98static unsigned int vrf_net_id;
99
100
101struct netns_vrf {
102
103 bool add_fib_rules;
104
105 struct vrf_map vmap;
106 struct ctl_table_header *ctl_hdr;
107};
108
109struct net_vrf {
110 struct rtable __rcu *rth;
111 struct rt6_info __rcu *rt6;
112#if IS_ENABLED(CONFIG_IPV6)
113 struct fib6_table *fib6_table;
114#endif
115 u32 tb_id;
116
117 struct list_head me_list;
118 int ifindex;
119};
120
121struct pcpu_dstats {
122 u64 tx_pkts;
123 u64 tx_bytes;
124 u64 tx_drps;
125 u64 rx_pkts;
126 u64 rx_bytes;
127 u64 rx_drps;
128 struct u64_stats_sync syncp;
129};
130
131static void vrf_rx_stats(struct net_device *dev, int len)
132{
133 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
134
135 u64_stats_update_begin(&dstats->syncp);
136 dstats->rx_pkts++;
137 dstats->rx_bytes += len;
138 u64_stats_update_end(&dstats->syncp);
139}
140
141static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
142{
143 vrf_dev->stats.tx_errors++;
144 kfree_skb(skb);
145}
146
147static void vrf_get_stats64(struct net_device *dev,
148 struct rtnl_link_stats64 *stats)
149{
150 int i;
151
152 for_each_possible_cpu(i) {
153 const struct pcpu_dstats *dstats;
154 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
155 unsigned int start;
156
157 dstats = per_cpu_ptr(dev->dstats, i);
158 do {
159 start = u64_stats_fetch_begin_irq(&dstats->syncp);
160 tbytes = dstats->tx_bytes;
161 tpkts = dstats->tx_pkts;
162 tdrops = dstats->tx_drps;
163 rbytes = dstats->rx_bytes;
164 rpkts = dstats->rx_pkts;
165 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
166 stats->tx_bytes += tbytes;
167 stats->tx_packets += tpkts;
168 stats->tx_dropped += tdrops;
169 stats->rx_bytes += rbytes;
170 stats->rx_packets += rpkts;
171 }
172}
173
174static struct vrf_map *netns_vrf_map(struct net *net)
175{
176 struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
177
178 return &nn_vrf->vmap;
179}
180
181static struct vrf_map *netns_vrf_map_by_dev(struct net_device *dev)
182{
183 return netns_vrf_map(dev_net(dev));
184}
185
186static int vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem *me)
187{
188 struct list_head *me_head = &me->vrf_list;
189 struct net_vrf *vrf;
190
191 if (list_empty(me_head))
192 return -ENODEV;
193
194 vrf = list_first_entry(me_head, struct net_vrf, me_list);
195
196 return vrf->ifindex;
197}
198
199static struct vrf_map_elem *vrf_map_elem_alloc(gfp_t flags)
200{
201 struct vrf_map_elem *me;
202
203 me = kmalloc(sizeof(*me), flags);
204 if (!me)
205 return NULL;
206
207 return me;
208}
209
210static void vrf_map_elem_free(struct vrf_map_elem *me)
211{
212 kfree(me);
213}
214
215static void vrf_map_elem_init(struct vrf_map_elem *me, int table_id,
216 int ifindex, int users)
217{
218 me->table_id = table_id;
219 me->ifindex = ifindex;
220 me->users = users;
221 INIT_LIST_HEAD(&me->vrf_list);
222}
223
224static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap,
225 u32 table_id)
226{
227 struct vrf_map_elem *me;
228 u32 key;
229
230 key = jhash_1word(table_id, HASH_INITVAL);
231 hash_for_each_possible(vmap->ht, me, hnode, key) {
232 if (me->table_id == table_id)
233 return me;
234 }
235
236 return NULL;
237}
238
239static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me)
240{
241 u32 table_id = me->table_id;
242 u32 key;
243
244 key = jhash_1word(table_id, HASH_INITVAL);
245 hash_add(vmap->ht, &me->hnode, key);
246}
247
248static void vrf_map_del_elem(struct vrf_map_elem *me)
249{
250 hash_del(&me->hnode);
251}
252
253static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock)
254{
255 spin_lock(&vmap->vmap_lock);
256}
257
258static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock)
259{
260 spin_unlock(&vmap->vmap_lock);
261}
262
263
264static int
265vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack)
266{
267 struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
268 struct net_vrf *vrf = netdev_priv(dev);
269 struct vrf_map_elem *new_me, *me;
270 u32 table_id = vrf->tb_id;
271 bool free_new_me = false;
272 int users;
273 int res;
274
275
276
277
278 new_me = vrf_map_elem_alloc(GFP_KERNEL);
279 if (!new_me)
280 return -ENOMEM;
281
282 vrf_map_elem_init(new_me, table_id, dev->ifindex, 0);
283
284 vrf_map_lock(vmap);
285
286 me = vrf_map_lookup_elem(vmap, table_id);
287 if (!me) {
288 me = new_me;
289 vrf_map_add_elem(vmap, me);
290 goto link_vrf;
291 }
292
293
294
295
296 free_new_me = true;
297 if (vmap->strict_mode) {
298
299 NL_SET_ERR_MSG(extack, "Table is used by another VRF");
300 res = -EBUSY;
301 goto unlock;
302 }
303
304link_vrf:
305 users = ++me->users;
306 if (users == 2)
307 ++vmap->shared_tables;
308
309 list_add(&vrf->me_list, &me->vrf_list);
310
311 res = 0;
312
313unlock:
314 vrf_map_unlock(vmap);
315
316
317 if (free_new_me)
318 vrf_map_elem_free(new_me);
319
320 return res;
321}
322
323
324static void vrf_map_unregister_dev(struct net_device *dev)
325{
326 struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
327 struct net_vrf *vrf = netdev_priv(dev);
328 u32 table_id = vrf->tb_id;
329 struct vrf_map_elem *me;
330 int users;
331
332 vrf_map_lock(vmap);
333
334 me = vrf_map_lookup_elem(vmap, table_id);
335 if (!me)
336 goto unlock;
337
338 list_del(&vrf->me_list);
339
340 users = --me->users;
341 if (users == 1) {
342 --vmap->shared_tables;
343 } else if (users == 0) {
344 vrf_map_del_elem(me);
345
346
347 vrf_map_elem_free(me);
348 }
349
350unlock:
351 vrf_map_unlock(vmap);
352}
353
354
355static int vrf_ifindex_lookup_by_table_id(struct net *net, u32 table_id)
356{
357 struct vrf_map *vmap = netns_vrf_map(net);
358 struct vrf_map_elem *me;
359 int ifindex;
360
361 vrf_map_lock(vmap);
362
363 if (!vmap->strict_mode) {
364 ifindex = -EPERM;
365 goto unlock;
366 }
367
368 me = vrf_map_lookup_elem(vmap, table_id);
369 if (!me) {
370 ifindex = -ENODEV;
371 goto unlock;
372 }
373
374 ifindex = vrf_map_elem_get_vrf_ifindex(me);
375
376unlock:
377 vrf_map_unlock(vmap);
378
379 return ifindex;
380}
381
382
383
384
385static bool qdisc_tx_is_default(const struct net_device *dev)
386{
387 struct netdev_queue *txq;
388 struct Qdisc *qdisc;
389
390 if (dev->num_tx_queues > 1)
391 return false;
392
393 txq = netdev_get_tx_queue(dev, 0);
394 qdisc = rcu_access_pointer(txq->qdisc);
395
396 return !qdisc->enqueue;
397}
398
399
400
401
402static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
403 struct dst_entry *dst)
404{
405 int len = skb->len;
406
407 skb_orphan(skb);
408
409 skb_dst_set(skb, dst);
410
411
412
413
414 skb->pkt_type = PACKET_LOOPBACK;
415
416 skb->protocol = eth_type_trans(skb, dev);
417
418 if (likely(netif_rx(skb) == NET_RX_SUCCESS))
419 vrf_rx_stats(dev, len);
420 else
421 this_cpu_inc(dev->dstats->rx_drps);
422
423 return NETDEV_TX_OK;
424}
425
426#if IS_ENABLED(CONFIG_IPV6)
427static int vrf_ip6_local_out(struct net *net, struct sock *sk,
428 struct sk_buff *skb)
429{
430 int err;
431
432 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
433 sk, skb, NULL, skb_dst(skb)->dev, dst_output);
434
435 if (likely(err == 1))
436 err = dst_output(net, sk, skb);
437
438 return err;
439}
440
441static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
442 struct net_device *dev)
443{
444 const struct ipv6hdr *iph;
445 struct net *net = dev_net(skb->dev);
446 struct flowi6 fl6;
447 int ret = NET_XMIT_DROP;
448 struct dst_entry *dst;
449 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
450
451 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
452 goto err;
453
454 iph = ipv6_hdr(skb);
455
456 memset(&fl6, 0, sizeof(fl6));
457
458 fl6.flowi6_oif = dev->ifindex;
459 fl6.flowi6_iif = LOOPBACK_IFINDEX;
460 fl6.daddr = iph->daddr;
461 fl6.saddr = iph->saddr;
462 fl6.flowlabel = ip6_flowinfo(iph);
463 fl6.flowi6_mark = skb->mark;
464 fl6.flowi6_proto = iph->nexthdr;
465 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
466
467 dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
468 if (IS_ERR(dst) || dst == dst_null)
469 goto err;
470
471 skb_dst_drop(skb);
472
473
474
475
476
477 if (dst->dev == dev)
478 return vrf_local_xmit(skb, dev, dst);
479
480 skb_dst_set(skb, dst);
481
482
483 __skb_pull(skb, skb_network_offset(skb));
484
485 ret = vrf_ip6_local_out(net, skb->sk, skb);
486 if (unlikely(net_xmit_eval(ret)))
487 dev->stats.tx_errors++;
488 else
489 ret = NET_XMIT_SUCCESS;
490
491 return ret;
492err:
493 vrf_tx_error(dev, skb);
494 return NET_XMIT_DROP;
495}
496#else
497static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
498 struct net_device *dev)
499{
500 vrf_tx_error(dev, skb);
501 return NET_XMIT_DROP;
502}
503#endif
504
505
506static int vrf_ip_local_out(struct net *net, struct sock *sk,
507 struct sk_buff *skb)
508{
509 int err;
510
511 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
512 skb, NULL, skb_dst(skb)->dev, dst_output);
513 if (likely(err == 1))
514 err = dst_output(net, sk, skb);
515
516 return err;
517}
518
519static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
520 struct net_device *vrf_dev)
521{
522 struct iphdr *ip4h;
523 int ret = NET_XMIT_DROP;
524 struct flowi4 fl4;
525 struct net *net = dev_net(vrf_dev);
526 struct rtable *rt;
527
528 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
529 goto err;
530
531 ip4h = ip_hdr(skb);
532
533 memset(&fl4, 0, sizeof(fl4));
534
535 fl4.flowi4_oif = vrf_dev->ifindex;
536 fl4.flowi4_iif = LOOPBACK_IFINDEX;
537 fl4.flowi4_tos = RT_TOS(ip4h->tos);
538 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
539 fl4.flowi4_proto = ip4h->protocol;
540 fl4.daddr = ip4h->daddr;
541 fl4.saddr = ip4h->saddr;
542
543 rt = ip_route_output_flow(net, &fl4, NULL);
544 if (IS_ERR(rt))
545 goto err;
546
547 skb_dst_drop(skb);
548
549
550
551
552
553 if (rt->dst.dev == vrf_dev)
554 return vrf_local_xmit(skb, vrf_dev, &rt->dst);
555
556 skb_dst_set(skb, &rt->dst);
557
558
559 __skb_pull(skb, skb_network_offset(skb));
560
561 if (!ip4h->saddr) {
562 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
563 RT_SCOPE_LINK);
564 }
565
566 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
567 if (unlikely(net_xmit_eval(ret)))
568 vrf_dev->stats.tx_errors++;
569 else
570 ret = NET_XMIT_SUCCESS;
571
572out:
573 return ret;
574err:
575 vrf_tx_error(vrf_dev, skb);
576 goto out;
577}
578
579static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
580{
581 switch (skb->protocol) {
582 case htons(ETH_P_IP):
583 return vrf_process_v4_outbound(skb, dev);
584 case htons(ETH_P_IPV6):
585 return vrf_process_v6_outbound(skb, dev);
586 default:
587 vrf_tx_error(dev, skb);
588 return NET_XMIT_DROP;
589 }
590}
591
592static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
593{
594 int len = skb->len;
595 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
596
597 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
598 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
599
600 u64_stats_update_begin(&dstats->syncp);
601 dstats->tx_pkts++;
602 dstats->tx_bytes += len;
603 u64_stats_update_end(&dstats->syncp);
604 } else {
605 this_cpu_inc(dev->dstats->tx_drps);
606 }
607
608 return ret;
609}
610
611static int vrf_finish_direct(struct net *net, struct sock *sk,
612 struct sk_buff *skb)
613{
614 struct net_device *vrf_dev = skb->dev;
615
616 if (!list_empty(&vrf_dev->ptype_all) &&
617 likely(skb_headroom(skb) >= ETH_HLEN)) {
618 struct ethhdr *eth = skb_push(skb, ETH_HLEN);
619
620 ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
621 eth_zero_addr(eth->h_dest);
622 eth->h_proto = skb->protocol;
623
624 rcu_read_lock_bh();
625 dev_queue_xmit_nit(skb, vrf_dev);
626 rcu_read_unlock_bh();
627
628 skb_pull(skb, ETH_HLEN);
629 }
630
631 return 1;
632}
633
634#if IS_ENABLED(CONFIG_IPV6)
635
636static int vrf_finish_output6(struct net *net, struct sock *sk,
637 struct sk_buff *skb)
638{
639 struct dst_entry *dst = skb_dst(skb);
640 struct net_device *dev = dst->dev;
641 const struct in6_addr *nexthop;
642 struct neighbour *neigh;
643 int ret;
644
645 nf_reset_ct(skb);
646
647 skb->protocol = htons(ETH_P_IPV6);
648 skb->dev = dev;
649
650 rcu_read_lock_bh();
651 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
652 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
653 if (unlikely(!neigh))
654 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
655 if (!IS_ERR(neigh)) {
656 sock_confirm_neigh(skb, neigh);
657 ret = neigh_output(neigh, skb, false);
658 rcu_read_unlock_bh();
659 return ret;
660 }
661 rcu_read_unlock_bh();
662
663 IP6_INC_STATS(dev_net(dst->dev),
664 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
665 kfree_skb(skb);
666 return -EINVAL;
667}
668
669
670static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
671{
672 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
673 net, sk, skb, NULL, skb_dst(skb)->dev,
674 vrf_finish_output6,
675 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
676}
677
678
679
680
681
682static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
683 struct sk_buff *skb)
684{
685 struct net_vrf *vrf = netdev_priv(vrf_dev);
686 struct dst_entry *dst = NULL;
687 struct rt6_info *rt6;
688
689 rcu_read_lock();
690
691 rt6 = rcu_dereference(vrf->rt6);
692 if (likely(rt6)) {
693 dst = &rt6->dst;
694 dst_hold(dst);
695 }
696
697 rcu_read_unlock();
698
699 if (unlikely(!dst)) {
700 vrf_tx_error(vrf_dev, skb);
701 return NULL;
702 }
703
704 skb_dst_drop(skb);
705 skb_dst_set(skb, dst);
706
707 return skb;
708}
709
710static int vrf_output6_direct(struct net *net, struct sock *sk,
711 struct sk_buff *skb)
712{
713 skb->protocol = htons(ETH_P_IPV6);
714
715 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
716 net, sk, skb, NULL, skb->dev,
717 vrf_finish_direct,
718 !(IPCB(skb)->flags & IPSKB_REROUTED));
719}
720
721static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
722 struct sock *sk,
723 struct sk_buff *skb)
724{
725 struct net *net = dev_net(vrf_dev);
726 int err;
727
728 skb->dev = vrf_dev;
729
730 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
731 skb, NULL, vrf_dev, vrf_output6_direct);
732
733 if (likely(err == 1))
734 err = vrf_output6_direct(net, sk, skb);
735
736
737 if (likely(err == 1))
738 nf_reset_ct(skb);
739 else
740 skb = NULL;
741
742 return skb;
743}
744
745static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
746 struct sock *sk,
747 struct sk_buff *skb)
748{
749
750 if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
751 return skb;
752
753 if (qdisc_tx_is_default(vrf_dev) ||
754 IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
755 return vrf_ip6_out_direct(vrf_dev, sk, skb);
756
757 return vrf_ip6_out_redirect(vrf_dev, skb);
758}
759
760
761static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
762{
763 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
764 struct net *net = dev_net(dev);
765 struct dst_entry *dst;
766
767 RCU_INIT_POINTER(vrf->rt6, NULL);
768 synchronize_rcu();
769
770
771
772
773 if (rt6) {
774 dst = &rt6->dst;
775 dev_put(dst->dev);
776 dst->dev = net->loopback_dev;
777 dev_hold(dst->dev);
778 dst_release(dst);
779 }
780}
781
782static int vrf_rt6_create(struct net_device *dev)
783{
784 int flags = DST_NOPOLICY | DST_NOXFRM;
785 struct net_vrf *vrf = netdev_priv(dev);
786 struct net *net = dev_net(dev);
787 struct rt6_info *rt6;
788 int rc = -ENOMEM;
789
790
791 if (!ipv6_mod_enabled())
792 return 0;
793
794 vrf->fib6_table = fib6_new_table(net, vrf->tb_id);
795 if (!vrf->fib6_table)
796 goto out;
797
798
799 rt6 = ip6_dst_alloc(net, dev, flags);
800 if (!rt6)
801 goto out;
802
803 rt6->dst.output = vrf_output6;
804
805 rcu_assign_pointer(vrf->rt6, rt6);
806
807 rc = 0;
808out:
809 return rc;
810}
811#else
812static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
813 struct sock *sk,
814 struct sk_buff *skb)
815{
816 return skb;
817}
818
819static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
820{
821}
822
823static int vrf_rt6_create(struct net_device *dev)
824{
825 return 0;
826}
827#endif
828
829
830static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
831{
832 struct dst_entry *dst = skb_dst(skb);
833 struct rtable *rt = (struct rtable *)dst;
834 struct net_device *dev = dst->dev;
835 unsigned int hh_len = LL_RESERVED_SPACE(dev);
836 struct neighbour *neigh;
837 bool is_v6gw = false;
838 int ret = -EINVAL;
839
840 nf_reset_ct(skb);
841
842
843 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
844 struct sk_buff *skb2;
845
846 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
847 if (!skb2) {
848 ret = -ENOMEM;
849 goto err;
850 }
851 if (skb->sk)
852 skb_set_owner_w(skb2, skb->sk);
853
854 consume_skb(skb);
855 skb = skb2;
856 }
857
858 rcu_read_lock_bh();
859
860 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
861 if (!IS_ERR(neigh)) {
862 sock_confirm_neigh(skb, neigh);
863
864 ret = neigh_output(neigh, skb, is_v6gw);
865 rcu_read_unlock_bh();
866 return ret;
867 }
868
869 rcu_read_unlock_bh();
870err:
871 vrf_tx_error(skb->dev, skb);
872 return ret;
873}
874
875static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
876{
877 struct net_device *dev = skb_dst(skb)->dev;
878
879 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
880
881 skb->dev = dev;
882 skb->protocol = htons(ETH_P_IP);
883
884 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
885 net, sk, skb, NULL, dev,
886 vrf_finish_output,
887 !(IPCB(skb)->flags & IPSKB_REROUTED));
888}
889
890
891
892
893
894static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
895 struct sk_buff *skb)
896{
897 struct net_vrf *vrf = netdev_priv(vrf_dev);
898 struct dst_entry *dst = NULL;
899 struct rtable *rth;
900
901 rcu_read_lock();
902
903 rth = rcu_dereference(vrf->rth);
904 if (likely(rth)) {
905 dst = &rth->dst;
906 dst_hold(dst);
907 }
908
909 rcu_read_unlock();
910
911 if (unlikely(!dst)) {
912 vrf_tx_error(vrf_dev, skb);
913 return NULL;
914 }
915
916 skb_dst_drop(skb);
917 skb_dst_set(skb, dst);
918
919 return skb;
920}
921
922static int vrf_output_direct(struct net *net, struct sock *sk,
923 struct sk_buff *skb)
924{
925 skb->protocol = htons(ETH_P_IP);
926
927 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
928 net, sk, skb, NULL, skb->dev,
929 vrf_finish_direct,
930 !(IPCB(skb)->flags & IPSKB_REROUTED));
931}
932
933static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
934 struct sock *sk,
935 struct sk_buff *skb)
936{
937 struct net *net = dev_net(vrf_dev);
938 int err;
939
940 skb->dev = vrf_dev;
941
942 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
943 skb, NULL, vrf_dev, vrf_output_direct);
944
945 if (likely(err == 1))
946 err = vrf_output_direct(net, sk, skb);
947
948
949 if (likely(err == 1))
950 nf_reset_ct(skb);
951 else
952 skb = NULL;
953
954 return skb;
955}
956
957static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
958 struct sock *sk,
959 struct sk_buff *skb)
960{
961
962 if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
963 ipv4_is_lbcast(ip_hdr(skb)->daddr))
964 return skb;
965
966 if (qdisc_tx_is_default(vrf_dev) ||
967 IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
968 return vrf_ip_out_direct(vrf_dev, sk, skb);
969
970 return vrf_ip_out_redirect(vrf_dev, skb);
971}
972
973
974static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
975 struct sock *sk,
976 struct sk_buff *skb,
977 u16 proto)
978{
979 switch (proto) {
980 case AF_INET:
981 return vrf_ip_out(vrf_dev, sk, skb);
982 case AF_INET6:
983 return vrf_ip6_out(vrf_dev, sk, skb);
984 }
985
986 return skb;
987}
988
989
990static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
991{
992 struct rtable *rth = rtnl_dereference(vrf->rth);
993 struct net *net = dev_net(dev);
994 struct dst_entry *dst;
995
996 RCU_INIT_POINTER(vrf->rth, NULL);
997 synchronize_rcu();
998
999
1000
1001
1002 if (rth) {
1003 dst = &rth->dst;
1004 dev_put(dst->dev);
1005 dst->dev = net->loopback_dev;
1006 dev_hold(dst->dev);
1007 dst_release(dst);
1008 }
1009}
1010
1011static int vrf_rtable_create(struct net_device *dev)
1012{
1013 struct net_vrf *vrf = netdev_priv(dev);
1014 struct rtable *rth;
1015
1016 if (!fib_new_table(dev_net(dev), vrf->tb_id))
1017 return -ENOMEM;
1018
1019
1020 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1);
1021 if (!rth)
1022 return -ENOMEM;
1023
1024 rth->dst.output = vrf_output;
1025
1026 rcu_assign_pointer(vrf->rth, rth);
1027
1028 return 0;
1029}
1030
1031
1032
1033
1034static void cycle_netdev(struct net_device *dev,
1035 struct netlink_ext_ack *extack)
1036{
1037 unsigned int flags = dev->flags;
1038 int ret;
1039
1040 if (!netif_running(dev))
1041 return;
1042
1043 ret = dev_change_flags(dev, flags & ~IFF_UP, extack);
1044 if (ret >= 0)
1045 ret = dev_change_flags(dev, flags, extack);
1046
1047 if (ret < 0) {
1048 netdev_err(dev,
1049 "Failed to cycle device %s; route tables might be wrong!\n",
1050 dev->name);
1051 }
1052}
1053
1054static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
1055 struct netlink_ext_ack *extack)
1056{
1057 int ret;
1058
1059
1060
1061
1062 if (port_dev == dev_net(dev)->loopback_dev) {
1063 NL_SET_ERR_MSG(extack,
1064 "Can not enslave loopback device to a VRF");
1065 return -EOPNOTSUPP;
1066 }
1067
1068 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
1069 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack);
1070 if (ret < 0)
1071 goto err;
1072
1073 cycle_netdev(port_dev, extack);
1074
1075 return 0;
1076
1077err:
1078 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
1079 return ret;
1080}
1081
1082static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
1083 struct netlink_ext_ack *extack)
1084{
1085 if (netif_is_l3_master(port_dev)) {
1086 NL_SET_ERR_MSG(extack,
1087 "Can not enslave an L3 master device to a VRF");
1088 return -EINVAL;
1089 }
1090
1091 if (netif_is_l3_slave(port_dev))
1092 return -EINVAL;
1093
1094 return do_vrf_add_slave(dev, port_dev, extack);
1095}
1096
1097
1098static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
1099{
1100 netdev_upper_dev_unlink(port_dev, dev);
1101 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
1102
1103 cycle_netdev(port_dev, NULL);
1104
1105 return 0;
1106}
1107
1108static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
1109{
1110 return do_vrf_del_slave(dev, port_dev);
1111}
1112
1113static void vrf_dev_uninit(struct net_device *dev)
1114{
1115 struct net_vrf *vrf = netdev_priv(dev);
1116
1117 vrf_rtable_release(dev, vrf);
1118 vrf_rt6_release(dev, vrf);
1119
1120 free_percpu(dev->dstats);
1121 dev->dstats = NULL;
1122}
1123
1124static int vrf_dev_init(struct net_device *dev)
1125{
1126 struct net_vrf *vrf = netdev_priv(dev);
1127
1128 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
1129 if (!dev->dstats)
1130 goto out_nomem;
1131
1132
1133 if (vrf_rtable_create(dev) != 0)
1134 goto out_stats;
1135
1136 if (vrf_rt6_create(dev) != 0)
1137 goto out_rth;
1138
1139 dev->flags = IFF_MASTER | IFF_NOARP;
1140
1141
1142 dev->mtu = 64 * 1024;
1143
1144
1145 dev->operstate = IF_OPER_UP;
1146 netdev_lockdep_set_classes(dev);
1147 return 0;
1148
1149out_rth:
1150 vrf_rtable_release(dev, vrf);
1151out_stats:
1152 free_percpu(dev->dstats);
1153 dev->dstats = NULL;
1154out_nomem:
1155 return -ENOMEM;
1156}
1157
1158static const struct net_device_ops vrf_netdev_ops = {
1159 .ndo_init = vrf_dev_init,
1160 .ndo_uninit = vrf_dev_uninit,
1161 .ndo_start_xmit = vrf_xmit,
1162 .ndo_set_mac_address = eth_mac_addr,
1163 .ndo_get_stats64 = vrf_get_stats64,
1164 .ndo_add_slave = vrf_add_slave,
1165 .ndo_del_slave = vrf_del_slave,
1166};
1167
1168static u32 vrf_fib_table(const struct net_device *dev)
1169{
1170 struct net_vrf *vrf = netdev_priv(dev);
1171
1172 return vrf->tb_id;
1173}
1174
1175static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1176{
1177 kfree_skb(skb);
1178 return 0;
1179}
1180
1181static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
1182 struct sk_buff *skb,
1183 struct net_device *dev)
1184{
1185 struct net *net = dev_net(dev);
1186
1187 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
1188 skb = NULL;
1189
1190 return skb;
1191}
1192
1193#if IS_ENABLED(CONFIG_IPV6)
1194
1195
1196
1197
1198
1199static bool ipv6_ndisc_frame(const struct sk_buff *skb)
1200{
1201 const struct ipv6hdr *iph = ipv6_hdr(skb);
1202 bool rc = false;
1203
1204 if (iph->nexthdr == NEXTHDR_ICMP) {
1205 const struct icmp6hdr *icmph;
1206 struct icmp6hdr _icmph;
1207
1208 icmph = skb_header_pointer(skb, sizeof(*iph),
1209 sizeof(_icmph), &_icmph);
1210 if (!icmph)
1211 goto out;
1212
1213 switch (icmph->icmp6_type) {
1214 case NDISC_ROUTER_SOLICITATION:
1215 case NDISC_ROUTER_ADVERTISEMENT:
1216 case NDISC_NEIGHBOUR_SOLICITATION:
1217 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1218 case NDISC_REDIRECT:
1219 rc = true;
1220 break;
1221 }
1222 }
1223
1224out:
1225 return rc;
1226}
1227
1228static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
1229 const struct net_device *dev,
1230 struct flowi6 *fl6,
1231 int ifindex,
1232 const struct sk_buff *skb,
1233 int flags)
1234{
1235 struct net_vrf *vrf = netdev_priv(dev);
1236
1237 return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags);
1238}
1239
1240static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
1241 int ifindex)
1242{
1243 const struct ipv6hdr *iph = ipv6_hdr(skb);
1244 struct flowi6 fl6 = {
1245 .flowi6_iif = ifindex,
1246 .flowi6_mark = skb->mark,
1247 .flowi6_proto = iph->nexthdr,
1248 .daddr = iph->daddr,
1249 .saddr = iph->saddr,
1250 .flowlabel = ip6_flowinfo(iph),
1251 };
1252 struct net *net = dev_net(vrf_dev);
1253 struct rt6_info *rt6;
1254
1255 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
1256 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
1257 if (unlikely(!rt6))
1258 return;
1259
1260 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
1261 return;
1262
1263 skb_dst_set(skb, &rt6->dst);
1264}
1265
1266static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1267 struct sk_buff *skb)
1268{
1269 int orig_iif = skb->skb_iif;
1270 bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
1271 bool is_ndisc = ipv6_ndisc_frame(skb);
1272
1273
1274
1275
1276 if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
1277 skb->dev = vrf_dev;
1278 skb->skb_iif = vrf_dev->ifindex;
1279 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1280 if (skb->pkt_type == PACKET_LOOPBACK)
1281 skb->pkt_type = PACKET_HOST;
1282 goto out;
1283 }
1284
1285
1286 if (!is_ndisc) {
1287 vrf_rx_stats(vrf_dev, skb->len);
1288 skb->dev = vrf_dev;
1289 skb->skb_iif = vrf_dev->ifindex;
1290
1291 if (!list_empty(&vrf_dev->ptype_all)) {
1292 skb_push(skb, skb->mac_len);
1293 dev_queue_xmit_nit(skb, vrf_dev);
1294 skb_pull(skb, skb->mac_len);
1295 }
1296
1297 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1298 }
1299
1300 if (need_strict)
1301 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1302
1303 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
1304out:
1305 return skb;
1306}
1307
1308#else
1309static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1310 struct sk_buff *skb)
1311{
1312 return skb;
1313}
1314#endif
1315
1316static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1317 struct sk_buff *skb)
1318{
1319 skb->dev = vrf_dev;
1320 skb->skb_iif = vrf_dev->ifindex;
1321 IPCB(skb)->flags |= IPSKB_L3SLAVE;
1322
1323 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1324 goto out;
1325
1326
1327
1328
1329 if (skb->pkt_type == PACKET_LOOPBACK) {
1330 skb->pkt_type = PACKET_HOST;
1331 goto out;
1332 }
1333
1334 vrf_rx_stats(vrf_dev, skb->len);
1335
1336 if (!list_empty(&vrf_dev->ptype_all)) {
1337 skb_push(skb, skb->mac_len);
1338 dev_queue_xmit_nit(skb, vrf_dev);
1339 skb_pull(skb, skb->mac_len);
1340 }
1341
1342 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
1343out:
1344 return skb;
1345}
1346
1347
1348static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1349 struct sk_buff *skb,
1350 u16 proto)
1351{
1352 switch (proto) {
1353 case AF_INET:
1354 return vrf_ip_rcv(vrf_dev, skb);
1355 case AF_INET6:
1356 return vrf_ip6_rcv(vrf_dev, skb);
1357 }
1358
1359 return skb;
1360}
1361
1362#if IS_ENABLED(CONFIG_IPV6)
1363
1364
1365
1366
1367
1368static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1369 struct flowi6 *fl6)
1370{
1371 struct net *net = dev_net(dev);
1372 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_DST_NOREF;
1373 struct dst_entry *dst = NULL;
1374 struct rt6_info *rt;
1375
1376
1377
1378
1379
1380 if (fl6->flowi6_oif == dev->ifindex) {
1381 dst = &net->ipv6.ip6_null_entry->dst;
1382 return dst;
1383 }
1384
1385 if (!ipv6_addr_any(&fl6->saddr))
1386 flags |= RT6_LOOKUP_F_HAS_SADDR;
1387
1388 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags);
1389 if (rt)
1390 dst = &rt->dst;
1391
1392 return dst;
1393}
1394#endif
1395
1396static const struct l3mdev_ops vrf_l3mdev_ops = {
1397 .l3mdev_fib_table = vrf_fib_table,
1398 .l3mdev_l3_rcv = vrf_l3_rcv,
1399 .l3mdev_l3_out = vrf_l3_out,
1400#if IS_ENABLED(CONFIG_IPV6)
1401 .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
1402#endif
1403};
1404
1405static void vrf_get_drvinfo(struct net_device *dev,
1406 struct ethtool_drvinfo *info)
1407{
1408 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1409 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1410}
1411
1412static const struct ethtool_ops vrf_ethtool_ops = {
1413 .get_drvinfo = vrf_get_drvinfo,
1414};
1415
1416static inline size_t vrf_fib_rule_nl_size(void)
1417{
1418 size_t sz;
1419
1420 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1421 sz += nla_total_size(sizeof(u8));
1422 sz += nla_total_size(sizeof(u32));
1423 sz += nla_total_size(sizeof(u8));
1424
1425 return sz;
1426}
1427
1428static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1429{
1430 struct fib_rule_hdr *frh;
1431 struct nlmsghdr *nlh;
1432 struct sk_buff *skb;
1433 int err;
1434
1435 if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) &&
1436 !ipv6_mod_enabled())
1437 return 0;
1438
1439 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1440 if (!skb)
1441 return -ENOMEM;
1442
1443 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1444 if (!nlh)
1445 goto nla_put_failure;
1446
1447
1448 nlh->nlmsg_flags |= NLM_F_EXCL;
1449
1450 frh = nlmsg_data(nlh);
1451 memset(frh, 0, sizeof(*frh));
1452 frh->family = family;
1453 frh->action = FR_ACT_TO_TBL;
1454
1455 if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL))
1456 goto nla_put_failure;
1457
1458 if (nla_put_u8(skb, FRA_L3MDEV, 1))
1459 goto nla_put_failure;
1460
1461 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1462 goto nla_put_failure;
1463
1464 nlmsg_end(skb, nlh);
1465
1466
1467 skb->sk = dev_net(dev)->rtnl;
1468 if (add_it) {
1469 err = fib_nl_newrule(skb, nlh, NULL);
1470 if (err == -EEXIST)
1471 err = 0;
1472 } else {
1473 err = fib_nl_delrule(skb, nlh, NULL);
1474 if (err == -ENOENT)
1475 err = 0;
1476 }
1477 nlmsg_free(skb);
1478
1479 return err;
1480
1481nla_put_failure:
1482 nlmsg_free(skb);
1483
1484 return -EMSGSIZE;
1485}
1486
1487static int vrf_add_fib_rules(const struct net_device *dev)
1488{
1489 int err;
1490
1491 err = vrf_fib_rule(dev, AF_INET, true);
1492 if (err < 0)
1493 goto out_err;
1494
1495 err = vrf_fib_rule(dev, AF_INET6, true);
1496 if (err < 0)
1497 goto ipv6_err;
1498
1499#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1500 err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true);
1501 if (err < 0)
1502 goto ipmr_err;
1503#endif
1504
1505#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1506 err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true);
1507 if (err < 0)
1508 goto ip6mr_err;
1509#endif
1510
1511 return 0;
1512
1513#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1514ip6mr_err:
1515 vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false);
1516#endif
1517
1518#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1519ipmr_err:
1520 vrf_fib_rule(dev, AF_INET6, false);
1521#endif
1522
1523ipv6_err:
1524 vrf_fib_rule(dev, AF_INET, false);
1525
1526out_err:
1527 netdev_err(dev, "Failed to add FIB rules.\n");
1528 return err;
1529}
1530
1531static void vrf_setup(struct net_device *dev)
1532{
1533 ether_setup(dev);
1534
1535
1536 dev->netdev_ops = &vrf_netdev_ops;
1537 dev->l3mdev_ops = &vrf_l3mdev_ops;
1538 dev->ethtool_ops = &vrf_ethtool_ops;
1539 dev->needs_free_netdev = true;
1540
1541
1542 eth_hw_addr_random(dev);
1543
1544
1545 dev->features |= NETIF_F_LLTX;
1546
1547
1548 dev->features |= NETIF_F_NETNS_LOCAL;
1549
1550
1551 dev->features |= NETIF_F_VLAN_CHALLENGED;
1552
1553
1554 dev->features |= NETIF_F_GSO_SOFTWARE;
1555 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
1556 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1557
1558 dev->hw_features = dev->features;
1559 dev->hw_enc_features = dev->features;
1560
1561
1562 dev->priv_flags |= IFF_NO_QUEUE;
1563 dev->priv_flags |= IFF_NO_RX_HANDLER;
1564 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1565
1566
1567
1568
1569
1570 dev->min_mtu = IPV6_MIN_MTU;
1571 dev->max_mtu = ETH_MAX_MTU;
1572}
1573
1574static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
1575 struct netlink_ext_ack *extack)
1576{
1577 if (tb[IFLA_ADDRESS]) {
1578 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1579 NL_SET_ERR_MSG(extack, "Invalid hardware address");
1580 return -EINVAL;
1581 }
1582 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1583 NL_SET_ERR_MSG(extack, "Invalid hardware address");
1584 return -EADDRNOTAVAIL;
1585 }
1586 }
1587 return 0;
1588}
1589
1590static void vrf_dellink(struct net_device *dev, struct list_head *head)
1591{
1592 struct net_device *port_dev;
1593 struct list_head *iter;
1594
1595 netdev_for_each_lower_dev(dev, port_dev, iter)
1596 vrf_del_slave(dev, port_dev);
1597
1598 vrf_map_unregister_dev(dev);
1599
1600 unregister_netdevice_queue(dev, head);
1601}
1602
1603static int vrf_newlink(struct net *src_net, struct net_device *dev,
1604 struct nlattr *tb[], struct nlattr *data[],
1605 struct netlink_ext_ack *extack)
1606{
1607 struct net_vrf *vrf = netdev_priv(dev);
1608 struct netns_vrf *nn_vrf;
1609 bool *add_fib_rules;
1610 struct net *net;
1611 int err;
1612
1613 if (!data || !data[IFLA_VRF_TABLE]) {
1614 NL_SET_ERR_MSG(extack, "VRF table id is missing");
1615 return -EINVAL;
1616 }
1617
1618 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
1619 if (vrf->tb_id == RT_TABLE_UNSPEC) {
1620 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE],
1621 "Invalid VRF table id");
1622 return -EINVAL;
1623 }
1624
1625 dev->priv_flags |= IFF_L3MDEV_MASTER;
1626
1627 err = register_netdevice(dev);
1628 if (err)
1629 goto out;
1630
1631
1632
1633
1634
1635 vrf->ifindex = dev->ifindex;
1636
1637 err = vrf_map_register_dev(dev, extack);
1638 if (err) {
1639 unregister_netdevice(dev);
1640 goto out;
1641 }
1642
1643 net = dev_net(dev);
1644 nn_vrf = net_generic(net, vrf_net_id);
1645
1646 add_fib_rules = &nn_vrf->add_fib_rules;
1647 if (*add_fib_rules) {
1648 err = vrf_add_fib_rules(dev);
1649 if (err) {
1650 vrf_map_unregister_dev(dev);
1651 unregister_netdevice(dev);
1652 goto out;
1653 }
1654 *add_fib_rules = false;
1655 }
1656
1657out:
1658 return err;
1659}
1660
1661static size_t vrf_nl_getsize(const struct net_device *dev)
1662{
1663 return nla_total_size(sizeof(u32));
1664}
1665
1666static int vrf_fillinfo(struct sk_buff *skb,
1667 const struct net_device *dev)
1668{
1669 struct net_vrf *vrf = netdev_priv(dev);
1670
1671 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1672}
1673
1674static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1675 const struct net_device *slave_dev)
1676{
1677 return nla_total_size(sizeof(u32));
1678}
1679
1680static int vrf_fill_slave_info(struct sk_buff *skb,
1681 const struct net_device *vrf_dev,
1682 const struct net_device *slave_dev)
1683{
1684 struct net_vrf *vrf = netdev_priv(vrf_dev);
1685
1686 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1687 return -EMSGSIZE;
1688
1689 return 0;
1690}
1691
1692static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1693 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
1694};
1695
1696static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1697 .kind = DRV_NAME,
1698 .priv_size = sizeof(struct net_vrf),
1699
1700 .get_size = vrf_nl_getsize,
1701 .policy = vrf_nl_policy,
1702 .validate = vrf_validate,
1703 .fill_info = vrf_fillinfo,
1704
1705 .get_slave_size = vrf_get_slave_size,
1706 .fill_slave_info = vrf_fill_slave_info,
1707
1708 .newlink = vrf_newlink,
1709 .dellink = vrf_dellink,
1710 .setup = vrf_setup,
1711 .maxtype = IFLA_VRF_MAX,
1712};
1713
1714static int vrf_device_event(struct notifier_block *unused,
1715 unsigned long event, void *ptr)
1716{
1717 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1718
1719
1720 if (event == NETDEV_UNREGISTER) {
1721 struct net_device *vrf_dev;
1722
1723 if (!netif_is_l3_slave(dev))
1724 goto out;
1725
1726 vrf_dev = netdev_master_upper_dev_get(dev);
1727 vrf_del_slave(vrf_dev, dev);
1728 }
1729out:
1730 return NOTIFY_DONE;
1731}
1732
1733static struct notifier_block vrf_notifier_block __read_mostly = {
1734 .notifier_call = vrf_device_event,
1735};
1736
1737static int vrf_map_init(struct vrf_map *vmap)
1738{
1739 spin_lock_init(&vmap->vmap_lock);
1740 hash_init(vmap->ht);
1741
1742 vmap->strict_mode = false;
1743
1744 return 0;
1745}
1746
1747#ifdef CONFIG_SYSCTL
1748static bool vrf_strict_mode(struct vrf_map *vmap)
1749{
1750 bool strict_mode;
1751
1752 vrf_map_lock(vmap);
1753 strict_mode = vmap->strict_mode;
1754 vrf_map_unlock(vmap);
1755
1756 return strict_mode;
1757}
1758
1759static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode)
1760{
1761 bool *cur_mode;
1762 int res = 0;
1763
1764 vrf_map_lock(vmap);
1765
1766 cur_mode = &vmap->strict_mode;
1767 if (*cur_mode == new_mode)
1768 goto unlock;
1769
1770 if (*cur_mode) {
1771
1772 *cur_mode = false;
1773 } else {
1774 if (vmap->shared_tables) {
1775
1776
1777
1778 res = -EBUSY;
1779 goto unlock;
1780 }
1781
1782
1783
1784
1785 *cur_mode = true;
1786 }
1787
1788unlock:
1789 vrf_map_unlock(vmap);
1790
1791 return res;
1792}
1793
1794static int vrf_shared_table_handler(struct ctl_table *table, int write,
1795 void *buffer, size_t *lenp, loff_t *ppos)
1796{
1797 struct net *net = (struct net *)table->extra1;
1798 struct vrf_map *vmap = netns_vrf_map(net);
1799 int proc_strict_mode = 0;
1800 struct ctl_table tmp = {
1801 .procname = table->procname,
1802 .data = &proc_strict_mode,
1803 .maxlen = sizeof(int),
1804 .mode = table->mode,
1805 .extra1 = SYSCTL_ZERO,
1806 .extra2 = SYSCTL_ONE,
1807 };
1808 int ret;
1809
1810 if (!write)
1811 proc_strict_mode = vrf_strict_mode(vmap);
1812
1813 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
1814
1815 if (write && ret == 0)
1816 ret = vrf_strict_mode_change(vmap, (bool)proc_strict_mode);
1817
1818 return ret;
1819}
1820
1821static const struct ctl_table vrf_table[] = {
1822 {
1823 .procname = "strict_mode",
1824 .data = NULL,
1825 .maxlen = sizeof(int),
1826 .mode = 0644,
1827 .proc_handler = vrf_shared_table_handler,
1828
1829 .extra1 = NULL,
1830 },
1831 { },
1832};
1833
1834static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
1835{
1836 struct ctl_table *table;
1837
1838 table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL);
1839 if (!table)
1840 return -ENOMEM;
1841
1842
1843 table[0].extra1 = net;
1844
1845 nn_vrf->ctl_hdr = register_net_sysctl(net, "net/vrf", table);
1846 if (!nn_vrf->ctl_hdr) {
1847 kfree(table);
1848 return -ENOMEM;
1849 }
1850
1851 return 0;
1852}
1853
1854static void vrf_netns_exit_sysctl(struct net *net)
1855{
1856 struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
1857 struct ctl_table *table;
1858
1859 table = nn_vrf->ctl_hdr->ctl_table_arg;
1860 unregister_net_sysctl_table(nn_vrf->ctl_hdr);
1861 kfree(table);
1862}
1863#else
1864static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
1865{
1866 return 0;
1867}
1868
1869static void vrf_netns_exit_sysctl(struct net *net)
1870{
1871}
1872#endif
1873
1874
1875static int __net_init vrf_netns_init(struct net *net)
1876{
1877 struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
1878
1879 nn_vrf->add_fib_rules = true;
1880 vrf_map_init(&nn_vrf->vmap);
1881
1882 return vrf_netns_init_sysctl(net, nn_vrf);
1883}
1884
1885static void __net_exit vrf_netns_exit(struct net *net)
1886{
1887 vrf_netns_exit_sysctl(net);
1888}
1889
1890static struct pernet_operations vrf_net_ops __net_initdata = {
1891 .init = vrf_netns_init,
1892 .exit = vrf_netns_exit,
1893 .id = &vrf_net_id,
1894 .size = sizeof(struct netns_vrf),
1895};
1896
1897static int __init vrf_init_module(void)
1898{
1899 int rc;
1900
1901 register_netdevice_notifier(&vrf_notifier_block);
1902
1903 rc = register_pernet_subsys(&vrf_net_ops);
1904 if (rc < 0)
1905 goto error;
1906
1907 rc = l3mdev_table_lookup_register(L3MDEV_TYPE_VRF,
1908 vrf_ifindex_lookup_by_table_id);
1909 if (rc < 0)
1910 goto unreg_pernet;
1911
1912 rc = rtnl_link_register(&vrf_link_ops);
1913 if (rc < 0)
1914 goto table_lookup_unreg;
1915
1916 return 0;
1917
1918table_lookup_unreg:
1919 l3mdev_table_lookup_unregister(L3MDEV_TYPE_VRF,
1920 vrf_ifindex_lookup_by_table_id);
1921
1922unreg_pernet:
1923 unregister_pernet_subsys(&vrf_net_ops);
1924
1925error:
1926 unregister_netdevice_notifier(&vrf_notifier_block);
1927 return rc;
1928}
1929
1930module_init(vrf_init_module);
1931MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1932MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1933MODULE_LICENSE("GPL");
1934MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1935MODULE_VERSION(DRV_VERSION);
1936