linux/drivers/net/vrf.c
<<
>>
Prefs
   1/*
   2 * vrf.c: device driver to encapsulate a VRF space
   3 *
   4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
   5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
   6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
   7 *
   8 * Based on dummy, team and ipvlan drivers
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/kernel.h>
  18#include <linux/netdevice.h>
  19#include <linux/etherdevice.h>
  20#include <linux/ip.h>
  21#include <linux/init.h>
  22#include <linux/moduleparam.h>
  23#include <linux/netfilter.h>
  24#include <linux/rtnetlink.h>
  25#include <net/rtnetlink.h>
  26#include <linux/u64_stats_sync.h>
  27#include <linux/hashtable.h>
  28
  29#include <linux/inetdevice.h>
  30#include <net/arp.h>
  31#include <net/ip.h>
  32#include <net/ip_fib.h>
  33#include <net/ip6_fib.h>
  34#include <net/ip6_route.h>
  35#include <net/route.h>
  36#include <net/addrconf.h>
  37#include <net/l3mdev.h>
  38#include <net/fib_rules.h>
  39
  40#define DRV_NAME        "vrf"
  41#define DRV_VERSION     "1.0"
  42
  43#define FIB_RULE_PREF  1000       /* default preference for FIB rules */
  44static bool add_fib_rules = true;
  45
  46struct net_vrf {
  47        struct rtable __rcu     *rth;
  48        struct rtable __rcu     *rth_local;
  49        struct rt6_info __rcu   *rt6;
  50        struct rt6_info __rcu   *rt6_local;
  51        u32                     tb_id;
  52};
  53
  54struct pcpu_dstats {
  55        u64                     tx_pkts;
  56        u64                     tx_bytes;
  57        u64                     tx_drps;
  58        u64                     rx_pkts;
  59        u64                     rx_bytes;
  60        u64                     rx_drps;
  61        struct u64_stats_sync   syncp;
  62};
  63
  64static void vrf_rx_stats(struct net_device *dev, int len)
  65{
  66        struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
  67
  68        u64_stats_update_begin(&dstats->syncp);
  69        dstats->rx_pkts++;
  70        dstats->rx_bytes += len;
  71        u64_stats_update_end(&dstats->syncp);
  72}
  73
  74static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
  75{
  76        vrf_dev->stats.tx_errors++;
  77        kfree_skb(skb);
  78}
  79
  80static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
  81                                                 struct rtnl_link_stats64 *stats)
  82{
  83        int i;
  84
  85        for_each_possible_cpu(i) {
  86                const struct pcpu_dstats *dstats;
  87                u64 tbytes, tpkts, tdrops, rbytes, rpkts;
  88                unsigned int start;
  89
  90                dstats = per_cpu_ptr(dev->dstats, i);
  91                do {
  92                        start = u64_stats_fetch_begin_irq(&dstats->syncp);
  93                        tbytes = dstats->tx_bytes;
  94                        tpkts = dstats->tx_pkts;
  95                        tdrops = dstats->tx_drps;
  96                        rbytes = dstats->rx_bytes;
  97                        rpkts = dstats->rx_pkts;
  98                } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
  99                stats->tx_bytes += tbytes;
 100                stats->tx_packets += tpkts;
 101                stats->tx_dropped += tdrops;
 102                stats->rx_bytes += rbytes;
 103                stats->rx_packets += rpkts;
 104        }
 105        return stats;
 106}
 107
 108/* Local traffic destined to local address. Reinsert the packet to rx
 109 * path, similar to loopback handling.
 110 */
 111static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
 112                          struct dst_entry *dst)
 113{
 114        int len = skb->len;
 115
 116        skb_orphan(skb);
 117
 118        skb_dst_set(skb, dst);
 119        skb_dst_force(skb);
 120
 121        /* set pkt_type to avoid skb hitting packet taps twice -
 122         * once on Tx and again in Rx processing
 123         */
 124        skb->pkt_type = PACKET_LOOPBACK;
 125
 126        skb->protocol = eth_type_trans(skb, dev);
 127
 128        if (likely(netif_rx(skb) == NET_RX_SUCCESS))
 129                vrf_rx_stats(dev, len);
 130        else
 131                this_cpu_inc(dev->dstats->rx_drps);
 132
 133        return NETDEV_TX_OK;
 134}
 135
 136#if IS_ENABLED(CONFIG_IPV6)
 137static int vrf_ip6_local_out(struct net *net, struct sock *sk,
 138                             struct sk_buff *skb)
 139{
 140        int err;
 141
 142        err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
 143                      sk, skb, NULL, skb_dst(skb)->dev, dst_output);
 144
 145        if (likely(err == 1))
 146                err = dst_output(net, sk, skb);
 147
 148        return err;
 149}
 150
 151static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
 152                                           struct net_device *dev)
 153{
 154        const struct ipv6hdr *iph = ipv6_hdr(skb);
 155        struct net *net = dev_net(skb->dev);
 156        struct flowi6 fl6 = {
 157                /* needed to match OIF rule */
 158                .flowi6_oif = dev->ifindex,
 159                .flowi6_iif = LOOPBACK_IFINDEX,
 160                .daddr = iph->daddr,
 161                .saddr = iph->saddr,
 162                .flowlabel = ip6_flowinfo(iph),
 163                .flowi6_mark = skb->mark,
 164                .flowi6_proto = iph->nexthdr,
 165                .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
 166        };
 167        int ret = NET_XMIT_DROP;
 168        struct dst_entry *dst;
 169        struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
 170
 171        dst = ip6_route_output(net, NULL, &fl6);
 172        if (dst == dst_null)
 173                goto err;
 174
 175        skb_dst_drop(skb);
 176
 177        /* if dst.dev is loopback or the VRF device again this is locally
 178         * originated traffic destined to a local address. Short circuit
 179         * to Rx path using our local dst
 180         */
 181        if (dst->dev == net->loopback_dev || dst->dev == dev) {
 182                struct net_vrf *vrf = netdev_priv(dev);
 183                struct rt6_info *rt6_local;
 184
 185                /* release looked up dst and use cached local dst */
 186                dst_release(dst);
 187
 188                rcu_read_lock();
 189
 190                rt6_local = rcu_dereference(vrf->rt6_local);
 191                if (unlikely(!rt6_local)) {
 192                        rcu_read_unlock();
 193                        goto err;
 194                }
 195
 196                /* Ordering issue: cached local dst is created on newlink
 197                 * before the IPv6 initialization. Using the local dst
 198                 * requires rt6i_idev to be set so make sure it is.
 199                 */
 200                if (unlikely(!rt6_local->rt6i_idev)) {
 201                        rt6_local->rt6i_idev = in6_dev_get(dev);
 202                        if (!rt6_local->rt6i_idev) {
 203                                rcu_read_unlock();
 204                                goto err;
 205                        }
 206                }
 207
 208                dst = &rt6_local->dst;
 209                dst_hold(dst);
 210
 211                rcu_read_unlock();
 212
 213                return vrf_local_xmit(skb, dev, &rt6_local->dst);
 214        }
 215
 216        skb_dst_set(skb, dst);
 217
 218        /* strip the ethernet header added for pass through VRF device */
 219        __skb_pull(skb, skb_network_offset(skb));
 220
 221        ret = vrf_ip6_local_out(net, skb->sk, skb);
 222        if (unlikely(net_xmit_eval(ret)))
 223                dev->stats.tx_errors++;
 224        else
 225                ret = NET_XMIT_SUCCESS;
 226
 227        return ret;
 228err:
 229        vrf_tx_error(dev, skb);
 230        return NET_XMIT_DROP;
 231}
 232#else
 233static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
 234                                           struct net_device *dev)
 235{
 236        vrf_tx_error(dev, skb);
 237        return NET_XMIT_DROP;
 238}
 239#endif
 240
 241/* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
 242static int vrf_ip_local_out(struct net *net, struct sock *sk,
 243                            struct sk_buff *skb)
 244{
 245        int err;
 246
 247        err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
 248                      skb, NULL, skb_dst(skb)->dev, dst_output);
 249        if (likely(err == 1))
 250                err = dst_output(net, sk, skb);
 251
 252        return err;
 253}
 254
 255static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
 256                                           struct net_device *vrf_dev)
 257{
 258        struct iphdr *ip4h = ip_hdr(skb);
 259        int ret = NET_XMIT_DROP;
 260        struct flowi4 fl4 = {
 261                /* needed to match OIF rule */
 262                .flowi4_oif = vrf_dev->ifindex,
 263                .flowi4_iif = LOOPBACK_IFINDEX,
 264                .flowi4_tos = RT_TOS(ip4h->tos),
 265                .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
 266                .daddr = ip4h->daddr,
 267        };
 268        struct net *net = dev_net(vrf_dev);
 269        struct rtable *rt;
 270
 271        rt = ip_route_output_flow(net, &fl4, NULL);
 272        if (IS_ERR(rt))
 273                goto err;
 274
 275        if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
 276                ip_rt_put(rt);
 277                goto err;
 278        }
 279
 280        skb_dst_drop(skb);
 281
 282        /* if dst.dev is loopback or the VRF device again this is locally
 283         * originated traffic destined to a local address. Short circuit
 284         * to Rx path using our local dst
 285         */
 286        if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) {
 287                struct net_vrf *vrf = netdev_priv(vrf_dev);
 288                struct rtable *rth_local;
 289                struct dst_entry *dst = NULL;
 290
 291                ip_rt_put(rt);
 292
 293                rcu_read_lock();
 294
 295                rth_local = rcu_dereference(vrf->rth_local);
 296                if (likely(rth_local)) {
 297                        dst = &rth_local->dst;
 298                        dst_hold(dst);
 299                }
 300
 301                rcu_read_unlock();
 302
 303                if (unlikely(!dst))
 304                        goto err;
 305
 306                return vrf_local_xmit(skb, vrf_dev, dst);
 307        }
 308
 309        skb_dst_set(skb, &rt->dst);
 310
 311        /* strip the ethernet header added for pass through VRF device */
 312        __skb_pull(skb, skb_network_offset(skb));
 313
 314        if (!ip4h->saddr) {
 315                ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
 316                                               RT_SCOPE_LINK);
 317        }
 318
 319        ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
 320        if (unlikely(net_xmit_eval(ret)))
 321                vrf_dev->stats.tx_errors++;
 322        else
 323                ret = NET_XMIT_SUCCESS;
 324
 325out:
 326        return ret;
 327err:
 328        vrf_tx_error(vrf_dev, skb);
 329        goto out;
 330}
 331
 332static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
 333{
 334        switch (skb->protocol) {
 335        case htons(ETH_P_IP):
 336                return vrf_process_v4_outbound(skb, dev);
 337        case htons(ETH_P_IPV6):
 338                return vrf_process_v6_outbound(skb, dev);
 339        default:
 340                vrf_tx_error(dev, skb);
 341                return NET_XMIT_DROP;
 342        }
 343}
 344
 345static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
 346{
 347        netdev_tx_t ret = is_ip_tx_frame(skb, dev);
 348
 349        if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 350                struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
 351
 352                u64_stats_update_begin(&dstats->syncp);
 353                dstats->tx_pkts++;
 354                dstats->tx_bytes += skb->len;
 355                u64_stats_update_end(&dstats->syncp);
 356        } else {
 357                this_cpu_inc(dev->dstats->tx_drps);
 358        }
 359
 360        return ret;
 361}
 362
 363#if IS_ENABLED(CONFIG_IPV6)
 364/* modelled after ip6_finish_output2 */
 365static int vrf_finish_output6(struct net *net, struct sock *sk,
 366                              struct sk_buff *skb)
 367{
 368        struct dst_entry *dst = skb_dst(skb);
 369        struct net_device *dev = dst->dev;
 370        struct neighbour *neigh;
 371        struct in6_addr *nexthop;
 372        int ret;
 373
 374        skb->protocol = htons(ETH_P_IPV6);
 375        skb->dev = dev;
 376
 377        rcu_read_lock_bh();
 378        nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
 379        neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
 380        if (unlikely(!neigh))
 381                neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
 382        if (!IS_ERR(neigh)) {
 383                ret = dst_neigh_output(dst, neigh, skb);
 384                rcu_read_unlock_bh();
 385                return ret;
 386        }
 387        rcu_read_unlock_bh();
 388
 389        IP6_INC_STATS(dev_net(dst->dev),
 390                      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 391        kfree_skb(skb);
 392        return -EINVAL;
 393}
 394
 395/* modelled after ip6_output */
 396static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
 397{
 398        return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 399                            net, sk, skb, NULL, skb_dst(skb)->dev,
 400                            vrf_finish_output6,
 401                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 402}
 403
 404/* set dst on skb to send packet to us via dev_xmit path. Allows
 405 * packet to go through device based features such as qdisc, netfilter
 406 * hooks and packet sockets with skb->dev set to vrf device.
 407 */
 408static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
 409                                   struct sock *sk,
 410                                   struct sk_buff *skb)
 411{
 412        struct net_vrf *vrf = netdev_priv(vrf_dev);
 413        struct dst_entry *dst = NULL;
 414        struct rt6_info *rt6;
 415
 416        /* don't divert link scope packets */
 417        if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
 418                return skb;
 419
 420        rcu_read_lock();
 421
 422        rt6 = rcu_dereference(vrf->rt6);
 423        if (likely(rt6)) {
 424                dst = &rt6->dst;
 425                dst_hold(dst);
 426        }
 427
 428        rcu_read_unlock();
 429
 430        if (unlikely(!dst)) {
 431                vrf_tx_error(vrf_dev, skb);
 432                return NULL;
 433        }
 434
 435        skb_dst_drop(skb);
 436        skb_dst_set(skb, dst);
 437
 438        return skb;
 439}
 440
 441/* holding rtnl */
 442static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
 443{
 444        struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
 445        struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local);
 446        struct net *net = dev_net(dev);
 447        struct dst_entry *dst;
 448
 449        RCU_INIT_POINTER(vrf->rt6, NULL);
 450        RCU_INIT_POINTER(vrf->rt6_local, NULL);
 451        synchronize_rcu();
 452
 453        /* move dev in dst's to loopback so this VRF device can be deleted
 454         * - based on dst_ifdown
 455         */
 456        if (rt6) {
 457                dst = &rt6->dst;
 458                dev_put(dst->dev);
 459                dst->dev = net->loopback_dev;
 460                dev_hold(dst->dev);
 461                dst_release(dst);
 462        }
 463
 464        if (rt6_local) {
 465                if (rt6_local->rt6i_idev)
 466                        in6_dev_put(rt6_local->rt6i_idev);
 467
 468                dst = &rt6_local->dst;
 469                dev_put(dst->dev);
 470                dst->dev = net->loopback_dev;
 471                dev_hold(dst->dev);
 472                dst_release(dst);
 473        }
 474}
 475
 476static int vrf_rt6_create(struct net_device *dev)
 477{
 478        int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE;
 479        struct net_vrf *vrf = netdev_priv(dev);
 480        struct net *net = dev_net(dev);
 481        struct fib6_table *rt6i_table;
 482        struct rt6_info *rt6, *rt6_local;
 483        int rc = -ENOMEM;
 484
 485        /* IPv6 can be CONFIG enabled and then disabled runtime */
 486        if (!ipv6_mod_enabled())
 487                return 0;
 488
 489        rt6i_table = fib6_new_table(net, vrf->tb_id);
 490        if (!rt6i_table)
 491                goto out;
 492
 493        /* create a dst for routing packets out a VRF device */
 494        rt6 = ip6_dst_alloc(net, dev, flags);
 495        if (!rt6)
 496                goto out;
 497
 498        dst_hold(&rt6->dst);
 499
 500        rt6->rt6i_table = rt6i_table;
 501        rt6->dst.output = vrf_output6;
 502
 503        /* create a dst for local routing - packets sent locally
 504         * to local address via the VRF device as a loopback
 505         */
 506        rt6_local = ip6_dst_alloc(net, dev, flags);
 507        if (!rt6_local) {
 508                dst_release(&rt6->dst);
 509                goto out;
 510        }
 511
 512        dst_hold(&rt6_local->dst);
 513
 514        rt6_local->rt6i_idev  = in6_dev_get(dev);
 515        rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL;
 516        rt6_local->rt6i_table = rt6i_table;
 517        rt6_local->dst.input  = ip6_input;
 518
 519        rcu_assign_pointer(vrf->rt6, rt6);
 520        rcu_assign_pointer(vrf->rt6_local, rt6_local);
 521
 522        rc = 0;
 523out:
 524        return rc;
 525}
 526#else
 527static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
 528                                   struct sock *sk,
 529                                   struct sk_buff *skb)
 530{
 531        return skb;
 532}
 533
 534static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
 535{
 536}
 537
 538static int vrf_rt6_create(struct net_device *dev)
 539{
 540        return 0;
 541}
 542#endif
 543
 544/* modelled after ip_finish_output2 */
 545static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 546{
 547        struct dst_entry *dst = skb_dst(skb);
 548        struct rtable *rt = (struct rtable *)dst;
 549        struct net_device *dev = dst->dev;
 550        unsigned int hh_len = LL_RESERVED_SPACE(dev);
 551        struct neighbour *neigh;
 552        u32 nexthop;
 553        int ret = -EINVAL;
 554
 555        /* Be paranoid, rather than too clever. */
 556        if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
 557                struct sk_buff *skb2;
 558
 559                skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
 560                if (!skb2) {
 561                        ret = -ENOMEM;
 562                        goto err;
 563                }
 564                if (skb->sk)
 565                        skb_set_owner_w(skb2, skb->sk);
 566
 567                consume_skb(skb);
 568                skb = skb2;
 569        }
 570
 571        rcu_read_lock_bh();
 572
 573        nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
 574        neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
 575        if (unlikely(!neigh))
 576                neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
 577        if (!IS_ERR(neigh))
 578                ret = dst_neigh_output(dst, neigh, skb);
 579
 580        rcu_read_unlock_bh();
 581err:
 582        if (unlikely(ret < 0))
 583                vrf_tx_error(skb->dev, skb);
 584        return ret;
 585}
 586
 587static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 588{
 589        struct net_device *dev = skb_dst(skb)->dev;
 590
 591        IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 592
 593        skb->dev = dev;
 594        skb->protocol = htons(ETH_P_IP);
 595
 596        return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 597                            net, sk, skb, NULL, dev,
 598                            vrf_finish_output,
 599                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 600}
 601
 602/* set dst on skb to send packet to us via dev_xmit path. Allows
 603 * packet to go through device based features such as qdisc, netfilter
 604 * hooks and packet sockets with skb->dev set to vrf device.
 605 */
 606static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
 607                                  struct sock *sk,
 608                                  struct sk_buff *skb)
 609{
 610        struct net_vrf *vrf = netdev_priv(vrf_dev);
 611        struct dst_entry *dst = NULL;
 612        struct rtable *rth;
 613
 614        rcu_read_lock();
 615
 616        rth = rcu_dereference(vrf->rth);
 617        if (likely(rth)) {
 618                dst = &rth->dst;
 619                dst_hold(dst);
 620        }
 621
 622        rcu_read_unlock();
 623
 624        if (unlikely(!dst)) {
 625                vrf_tx_error(vrf_dev, skb);
 626                return NULL;
 627        }
 628
 629        skb_dst_drop(skb);
 630        skb_dst_set(skb, dst);
 631
 632        return skb;
 633}
 634
 635/* called with rcu lock held */
 636static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
 637                                  struct sock *sk,
 638                                  struct sk_buff *skb,
 639                                  u16 proto)
 640{
 641        switch (proto) {
 642        case AF_INET:
 643                return vrf_ip_out(vrf_dev, sk, skb);
 644        case AF_INET6:
 645                return vrf_ip6_out(vrf_dev, sk, skb);
 646        }
 647
 648        return skb;
 649}
 650
 651/* holding rtnl */
 652static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
 653{
 654        struct rtable *rth = rtnl_dereference(vrf->rth);
 655        struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
 656        struct net *net = dev_net(dev);
 657        struct dst_entry *dst;
 658
 659        RCU_INIT_POINTER(vrf->rth, NULL);
 660        RCU_INIT_POINTER(vrf->rth_local, NULL);
 661        synchronize_rcu();
 662
 663        /* move dev in dst's to loopback so this VRF device can be deleted
 664         * - based on dst_ifdown
 665         */
 666        if (rth) {
 667                dst = &rth->dst;
 668                dev_put(dst->dev);
 669                dst->dev = net->loopback_dev;
 670                dev_hold(dst->dev);
 671                dst_release(dst);
 672        }
 673
 674        if (rth_local) {
 675                dst = &rth_local->dst;
 676                dev_put(dst->dev);
 677                dst->dev = net->loopback_dev;
 678                dev_hold(dst->dev);
 679                dst_release(dst);
 680        }
 681}
 682
 683static int vrf_rtable_create(struct net_device *dev)
 684{
 685        struct net_vrf *vrf = netdev_priv(dev);
 686        struct rtable *rth, *rth_local;
 687
 688        if (!fib_new_table(dev_net(dev), vrf->tb_id))
 689                return -ENOMEM;
 690
 691        /* create a dst for routing packets out through a VRF device */
 692        rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
 693        if (!rth)
 694                return -ENOMEM;
 695
 696        /* create a dst for local ingress routing - packets sent locally
 697         * to local address via the VRF device as a loopback
 698         */
 699        rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0);
 700        if (!rth_local) {
 701                dst_release(&rth->dst);
 702                return -ENOMEM;
 703        }
 704
 705        rth->dst.output = vrf_output;
 706        rth->rt_table_id = vrf->tb_id;
 707
 708        rth_local->rt_table_id = vrf->tb_id;
 709
 710        rcu_assign_pointer(vrf->rth, rth);
 711        rcu_assign_pointer(vrf->rth_local, rth_local);
 712
 713        return 0;
 714}
 715
 716/**************************** device handling ********************/
 717
 718/* cycle interface to flush neighbor cache and move routes across tables */
 719static void cycle_netdev(struct net_device *dev)
 720{
 721        unsigned int flags = dev->flags;
 722        int ret;
 723
 724        if (!netif_running(dev))
 725                return;
 726
 727        ret = dev_change_flags(dev, flags & ~IFF_UP);
 728        if (ret >= 0)
 729                ret = dev_change_flags(dev, flags);
 730
 731        if (ret < 0) {
 732                netdev_err(dev,
 733                           "Failed to cycle device %s; route tables might be wrong!\n",
 734                           dev->name);
 735        }
 736}
 737
 738static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 739{
 740        int ret;
 741
 742        ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
 743        if (ret < 0)
 744                return ret;
 745
 746        port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
 747        cycle_netdev(port_dev);
 748
 749        return 0;
 750}
 751
 752static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 753{
 754        if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
 755                return -EINVAL;
 756
 757        return do_vrf_add_slave(dev, port_dev);
 758}
 759
 760/* inverse of do_vrf_add_slave */
 761static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
 762{
 763        netdev_upper_dev_unlink(port_dev, dev);
 764        port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
 765
 766        cycle_netdev(port_dev);
 767
 768        return 0;
 769}
 770
 771static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
 772{
 773        return do_vrf_del_slave(dev, port_dev);
 774}
 775
 776static void vrf_dev_uninit(struct net_device *dev)
 777{
 778        struct net_vrf *vrf = netdev_priv(dev);
 779        struct net_device *port_dev;
 780        struct list_head *iter;
 781
 782        vrf_rtable_release(dev, vrf);
 783        vrf_rt6_release(dev, vrf);
 784
 785        netdev_for_each_lower_dev(dev, port_dev, iter)
 786                vrf_del_slave(dev, port_dev);
 787
 788        free_percpu(dev->dstats);
 789        dev->dstats = NULL;
 790}
 791
 792static int vrf_dev_init(struct net_device *dev)
 793{
 794        struct net_vrf *vrf = netdev_priv(dev);
 795
 796        dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
 797        if (!dev->dstats)
 798                goto out_nomem;
 799
 800        /* create the default dst which points back to us */
 801        if (vrf_rtable_create(dev) != 0)
 802                goto out_stats;
 803
 804        if (vrf_rt6_create(dev) != 0)
 805                goto out_rth;
 806
 807        dev->flags = IFF_MASTER | IFF_NOARP;
 808
 809        /* MTU is irrelevant for VRF device; set to 64k similar to lo */
 810        dev->mtu = 64 * 1024;
 811
 812        /* similarly, oper state is irrelevant; set to up to avoid confusion */
 813        dev->operstate = IF_OPER_UP;
 814        netdev_lockdep_set_classes(dev);
 815        return 0;
 816
 817out_rth:
 818        vrf_rtable_release(dev, vrf);
 819out_stats:
 820        free_percpu(dev->dstats);
 821        dev->dstats = NULL;
 822out_nomem:
 823        return -ENOMEM;
 824}
 825
 826static const struct net_device_ops vrf_netdev_ops = {
 827        .ndo_init               = vrf_dev_init,
 828        .ndo_uninit             = vrf_dev_uninit,
 829        .ndo_start_xmit         = vrf_xmit,
 830        .ndo_get_stats64        = vrf_get_stats64,
 831        .ndo_add_slave          = vrf_add_slave,
 832        .ndo_del_slave          = vrf_del_slave,
 833};
 834
 835static u32 vrf_fib_table(const struct net_device *dev)
 836{
 837        struct net_vrf *vrf = netdev_priv(dev);
 838
 839        return vrf->tb_id;
 840}
 841
 842static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 843{
 844        return 0;
 845}
 846
 847static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
 848                                      struct sk_buff *skb,
 849                                      struct net_device *dev)
 850{
 851        struct net *net = dev_net(dev);
 852
 853        nf_reset(skb);
 854
 855        if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
 856                skb = NULL;    /* kfree_skb(skb) handled by nf code */
 857
 858        return skb;
 859}
 860
 861#if IS_ENABLED(CONFIG_IPV6)
 862/* neighbor handling is done with actual device; do not want
 863 * to flip skb->dev for those ndisc packets. This really fails
 864 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
 865 * a start.
 866 */
 867static bool ipv6_ndisc_frame(const struct sk_buff *skb)
 868{
 869        const struct ipv6hdr *iph = ipv6_hdr(skb);
 870        bool rc = false;
 871
 872        if (iph->nexthdr == NEXTHDR_ICMP) {
 873                const struct icmp6hdr *icmph;
 874                struct icmp6hdr _icmph;
 875
 876                icmph = skb_header_pointer(skb, sizeof(*iph),
 877                                           sizeof(_icmph), &_icmph);
 878                if (!icmph)
 879                        goto out;
 880
 881                switch (icmph->icmp6_type) {
 882                case NDISC_ROUTER_SOLICITATION:
 883                case NDISC_ROUTER_ADVERTISEMENT:
 884                case NDISC_NEIGHBOUR_SOLICITATION:
 885                case NDISC_NEIGHBOUR_ADVERTISEMENT:
 886                case NDISC_REDIRECT:
 887                        rc = true;
 888                        break;
 889                }
 890        }
 891
 892out:
 893        return rc;
 894}
 895
 896static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
 897                                             const struct net_device *dev,
 898                                             struct flowi6 *fl6,
 899                                             int ifindex,
 900                                             int flags)
 901{
 902        struct net_vrf *vrf = netdev_priv(dev);
 903        struct fib6_table *table = NULL;
 904        struct rt6_info *rt6;
 905
 906        rcu_read_lock();
 907
 908        /* fib6_table does not have a refcnt and can not be freed */
 909        rt6 = rcu_dereference(vrf->rt6);
 910        if (likely(rt6))
 911                table = rt6->rt6i_table;
 912
 913        rcu_read_unlock();
 914
 915        if (!table)
 916                return NULL;
 917
 918        return ip6_pol_route(net, table, ifindex, fl6, flags);
 919}
 920
 921static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
 922                              int ifindex)
 923{
 924        const struct ipv6hdr *iph = ipv6_hdr(skb);
 925        struct flowi6 fl6 = {
 926                .daddr          = iph->daddr,
 927                .saddr          = iph->saddr,
 928                .flowlabel      = ip6_flowinfo(iph),
 929                .flowi6_mark    = skb->mark,
 930                .flowi6_proto   = iph->nexthdr,
 931                .flowi6_iif     = ifindex,
 932        };
 933        struct net *net = dev_net(vrf_dev);
 934        struct rt6_info *rt6;
 935
 936        rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
 937                                   RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
 938        if (unlikely(!rt6))
 939                return;
 940
 941        if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
 942                return;
 943
 944        skb_dst_set(skb, &rt6->dst);
 945}
 946
 947static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
 948                                   struct sk_buff *skb)
 949{
 950        int orig_iif = skb->skb_iif;
 951        bool need_strict;
 952
 953        /* loopback traffic; do not push through packet taps again.
 954         * Reset pkt_type for upper layers to process skb
 955         */
 956        if (skb->pkt_type == PACKET_LOOPBACK) {
 957                skb->dev = vrf_dev;
 958                skb->skb_iif = vrf_dev->ifindex;
 959                IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
 960                skb->pkt_type = PACKET_HOST;
 961                goto out;
 962        }
 963
 964        /* if packet is NDISC or addressed to multicast or link-local
 965         * then keep the ingress interface
 966         */
 967        need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
 968        if (!ipv6_ndisc_frame(skb) && !need_strict) {
 969                skb->dev = vrf_dev;
 970                skb->skb_iif = vrf_dev->ifindex;
 971
 972                skb_push(skb, skb->mac_len);
 973                dev_queue_xmit_nit(skb, vrf_dev);
 974                skb_pull(skb, skb->mac_len);
 975
 976                IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
 977        }
 978
 979        if (need_strict)
 980                vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
 981
 982        skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
 983out:
 984        return skb;
 985}
 986
 987#else
 988static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
 989                                   struct sk_buff *skb)
 990{
 991        return skb;
 992}
 993#endif
 994
 995static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
 996                                  struct sk_buff *skb)
 997{
 998        skb->dev = vrf_dev;
 999        skb->skb_iif = vrf_dev->ifindex;
1000        IPCB(skb)->flags |= IPSKB_L3SLAVE;
1001
1002        /* loopback traffic; do not push through packet taps again.
1003         * Reset pkt_type for upper layers to process skb
1004         */
1005        if (skb->pkt_type == PACKET_LOOPBACK) {
1006                skb->pkt_type = PACKET_HOST;
1007                goto out;
1008        }
1009
1010        skb_push(skb, skb->mac_len);
1011        dev_queue_xmit_nit(skb, vrf_dev);
1012        skb_pull(skb, skb->mac_len);
1013
1014        skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
1015out:
1016        return skb;
1017}
1018
1019/* called with rcu lock held */
1020static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1021                                  struct sk_buff *skb,
1022                                  u16 proto)
1023{
1024        switch (proto) {
1025        case AF_INET:
1026                return vrf_ip_rcv(vrf_dev, skb);
1027        case AF_INET6:
1028                return vrf_ip6_rcv(vrf_dev, skb);
1029        }
1030
1031        return skb;
1032}
1033
1034#if IS_ENABLED(CONFIG_IPV6)
1035/* send to link-local or multicast address via interface enslaved to
1036 * VRF device. Force lookup to VRF table without changing flow struct
1037 */
1038static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1039                                              struct flowi6 *fl6)
1040{
1041        struct net *net = dev_net(dev);
1042        int flags = RT6_LOOKUP_F_IFACE;
1043        struct dst_entry *dst = NULL;
1044        struct rt6_info *rt;
1045
1046        /* VRF device does not have a link-local address and
1047         * sending packets to link-local or mcast addresses over
1048         * a VRF device does not make sense
1049         */
1050        if (fl6->flowi6_oif == dev->ifindex) {
1051                dst = &net->ipv6.ip6_null_entry->dst;
1052                dst_hold(dst);
1053                return dst;
1054        }
1055
1056        if (!ipv6_addr_any(&fl6->saddr))
1057                flags |= RT6_LOOKUP_F_HAS_SADDR;
1058
1059        rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
1060        if (rt)
1061                dst = &rt->dst;
1062
1063        return dst;
1064}
1065#endif
1066
1067static const struct l3mdev_ops vrf_l3mdev_ops = {
1068        .l3mdev_fib_table       = vrf_fib_table,
1069        .l3mdev_l3_rcv          = vrf_l3_rcv,
1070        .l3mdev_l3_out          = vrf_l3_out,
1071#if IS_ENABLED(CONFIG_IPV6)
1072        .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
1073#endif
1074};
1075
1076static void vrf_get_drvinfo(struct net_device *dev,
1077                            struct ethtool_drvinfo *info)
1078{
1079        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1080        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1081}
1082
1083static const struct ethtool_ops vrf_ethtool_ops = {
1084        .get_drvinfo    = vrf_get_drvinfo,
1085};
1086
1087static inline size_t vrf_fib_rule_nl_size(void)
1088{
1089        size_t sz;
1090
1091        sz  = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1092        sz += nla_total_size(sizeof(u8));       /* FRA_L3MDEV */
1093        sz += nla_total_size(sizeof(u32));      /* FRA_PRIORITY */
1094
1095        return sz;
1096}
1097
1098static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1099{
1100        struct fib_rule_hdr *frh;
1101        struct nlmsghdr *nlh;
1102        struct sk_buff *skb;
1103        int err;
1104
1105        if (family == AF_INET6 && !ipv6_mod_enabled())
1106                return 0;
1107
1108        skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1109        if (!skb)
1110                return -ENOMEM;
1111
1112        nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1113        if (!nlh)
1114                goto nla_put_failure;
1115
1116        /* rule only needs to appear once */
1117        nlh->nlmsg_flags &= NLM_F_EXCL;
1118
1119        frh = nlmsg_data(nlh);
1120        memset(frh, 0, sizeof(*frh));
1121        frh->family = family;
1122        frh->action = FR_ACT_TO_TBL;
1123
1124        if (nla_put_u32(skb, FRA_L3MDEV, 1))
1125                goto nla_put_failure;
1126
1127        if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1128                goto nla_put_failure;
1129
1130        nlmsg_end(skb, nlh);
1131
1132        /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1133        skb->sk = dev_net(dev)->rtnl;
1134        if (add_it) {
1135                err = fib_nl_newrule(skb, nlh);
1136                if (err == -EEXIST)
1137                        err = 0;
1138        } else {
1139                err = fib_nl_delrule(skb, nlh);
1140                if (err == -ENOENT)
1141                        err = 0;
1142        }
1143        nlmsg_free(skb);
1144
1145        return err;
1146
1147nla_put_failure:
1148        nlmsg_free(skb);
1149
1150        return -EMSGSIZE;
1151}
1152
1153static int vrf_add_fib_rules(const struct net_device *dev)
1154{
1155        int err;
1156
1157        err = vrf_fib_rule(dev, AF_INET,  true);
1158        if (err < 0)
1159                goto out_err;
1160
1161        err = vrf_fib_rule(dev, AF_INET6, true);
1162        if (err < 0)
1163                goto ipv6_err;
1164
1165        return 0;
1166
1167ipv6_err:
1168        vrf_fib_rule(dev, AF_INET,  false);
1169
1170out_err:
1171        netdev_err(dev, "Failed to add FIB rules.\n");
1172        return err;
1173}
1174
1175static void vrf_setup(struct net_device *dev)
1176{
1177        ether_setup(dev);
1178
1179        /* Initialize the device structure. */
1180        dev->netdev_ops = &vrf_netdev_ops;
1181        dev->l3mdev_ops = &vrf_l3mdev_ops;
1182        dev->ethtool_ops = &vrf_ethtool_ops;
1183        dev->destructor = free_netdev;
1184
1185        /* Fill in device structure with ethernet-generic values. */
1186        eth_hw_addr_random(dev);
1187
1188        /* don't acquire vrf device's netif_tx_lock when transmitting */
1189        dev->features |= NETIF_F_LLTX;
1190
1191        /* don't allow vrf devices to change network namespaces. */
1192        dev->features |= NETIF_F_NETNS_LOCAL;
1193
1194        /* does not make sense for a VLAN to be added to a vrf device */
1195        dev->features   |= NETIF_F_VLAN_CHALLENGED;
1196
1197        /* enable offload features */
1198        dev->features   |= NETIF_F_GSO_SOFTWARE;
1199        dev->features   |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
1200        dev->features   |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1201
1202        dev->hw_features = dev->features;
1203        dev->hw_enc_features = dev->features;
1204
1205        /* default to no qdisc; user can add if desired */
1206        dev->priv_flags |= IFF_NO_QUEUE;
1207}
1208
1209static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
1210{
1211        if (tb[IFLA_ADDRESS]) {
1212                if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1213                        return -EINVAL;
1214                if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1215                        return -EADDRNOTAVAIL;
1216        }
1217        return 0;
1218}
1219
1220static void vrf_dellink(struct net_device *dev, struct list_head *head)
1221{
1222        unregister_netdevice_queue(dev, head);
1223}
1224
1225static int vrf_newlink(struct net *src_net, struct net_device *dev,
1226                       struct nlattr *tb[], struct nlattr *data[])
1227{
1228        struct net_vrf *vrf = netdev_priv(dev);
1229        int err;
1230
1231        if (!data || !data[IFLA_VRF_TABLE])
1232                return -EINVAL;
1233
1234        vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
1235
1236        dev->priv_flags |= IFF_L3MDEV_MASTER;
1237
1238        err = register_netdevice(dev);
1239        if (err)
1240                goto out;
1241
1242        if (add_fib_rules) {
1243                err = vrf_add_fib_rules(dev);
1244                if (err) {
1245                        unregister_netdevice(dev);
1246                        goto out;
1247                }
1248                add_fib_rules = false;
1249        }
1250
1251out:
1252        return err;
1253}
1254
1255static size_t vrf_nl_getsize(const struct net_device *dev)
1256{
1257        return nla_total_size(sizeof(u32));  /* IFLA_VRF_TABLE */
1258}
1259
1260static int vrf_fillinfo(struct sk_buff *skb,
1261                        const struct net_device *dev)
1262{
1263        struct net_vrf *vrf = netdev_priv(dev);
1264
1265        return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1266}
1267
1268static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1269                                 const struct net_device *slave_dev)
1270{
1271        return nla_total_size(sizeof(u32));  /* IFLA_VRF_PORT_TABLE */
1272}
1273
1274static int vrf_fill_slave_info(struct sk_buff *skb,
1275                               const struct net_device *vrf_dev,
1276                               const struct net_device *slave_dev)
1277{
1278        struct net_vrf *vrf = netdev_priv(vrf_dev);
1279
1280        if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1281                return -EMSGSIZE;
1282
1283        return 0;
1284}
1285
1286static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1287        [IFLA_VRF_TABLE] = { .type = NLA_U32 },
1288};
1289
1290static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1291        .kind           = DRV_NAME,
1292        .priv_size      = sizeof(struct net_vrf),
1293
1294        .get_size       = vrf_nl_getsize,
1295        .policy         = vrf_nl_policy,
1296        .validate       = vrf_validate,
1297        .fill_info      = vrf_fillinfo,
1298
1299        .get_slave_size  = vrf_get_slave_size,
1300        .fill_slave_info = vrf_fill_slave_info,
1301
1302        .newlink        = vrf_newlink,
1303        .dellink        = vrf_dellink,
1304        .setup          = vrf_setup,
1305        .maxtype        = IFLA_VRF_MAX,
1306};
1307
1308static int vrf_device_event(struct notifier_block *unused,
1309                            unsigned long event, void *ptr)
1310{
1311        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1312
1313        /* only care about unregister events to drop slave references */
1314        if (event == NETDEV_UNREGISTER) {
1315                struct net_device *vrf_dev;
1316
1317                if (!netif_is_l3_slave(dev))
1318                        goto out;
1319
1320                vrf_dev = netdev_master_upper_dev_get(dev);
1321                vrf_del_slave(vrf_dev, dev);
1322        }
1323out:
1324        return NOTIFY_DONE;
1325}
1326
1327static struct notifier_block vrf_notifier_block __read_mostly = {
1328        .notifier_call = vrf_device_event,
1329};
1330
1331static int __init vrf_init_module(void)
1332{
1333        int rc;
1334
1335        register_netdevice_notifier(&vrf_notifier_block);
1336
1337        rc = rtnl_link_register(&vrf_link_ops);
1338        if (rc < 0)
1339                goto error;
1340
1341        return 0;
1342
1343error:
1344        unregister_netdevice_notifier(&vrf_notifier_block);
1345        return rc;
1346}
1347
1348module_init(vrf_init_module);
1349MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1350MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1351MODULE_LICENSE("GPL");
1352MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1353MODULE_VERSION(DRV_VERSION);
1354