linux/drivers/infiniband/sw/rxe/rxe_net.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   5 */
   6
   7#include <linux/skbuff.h>
   8#include <linux/if_arp.h>
   9#include <linux/netdevice.h>
  10#include <linux/if.h>
  11#include <linux/if_vlan.h>
  12#include <net/udp_tunnel.h>
  13#include <net/sch_generic.h>
  14#include <linux/netfilter.h>
  15#include <rdma/ib_addr.h>
  16
  17#include "rxe.h"
  18#include "rxe_net.h"
  19#include "rxe_loc.h"
  20
  21static struct rxe_recv_sockets recv_sockets;
  22
  23static struct dst_entry *rxe_find_route4(struct net_device *ndev,
  24                                  struct in_addr *saddr,
  25                                  struct in_addr *daddr)
  26{
  27        struct rtable *rt;
  28        struct flowi4 fl = { { 0 } };
  29
  30        memset(&fl, 0, sizeof(fl));
  31        fl.flowi4_oif = ndev->ifindex;
  32        memcpy(&fl.saddr, saddr, sizeof(*saddr));
  33        memcpy(&fl.daddr, daddr, sizeof(*daddr));
  34        fl.flowi4_proto = IPPROTO_UDP;
  35
  36        rt = ip_route_output_key(&init_net, &fl);
  37        if (IS_ERR(rt)) {
  38                pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
  39                return NULL;
  40        }
  41
  42        return &rt->dst;
  43}
  44
  45#if IS_ENABLED(CONFIG_IPV6)
  46static struct dst_entry *rxe_find_route6(struct net_device *ndev,
  47                                         struct in6_addr *saddr,
  48                                         struct in6_addr *daddr)
  49{
  50        struct dst_entry *ndst;
  51        struct flowi6 fl6 = { { 0 } };
  52
  53        memset(&fl6, 0, sizeof(fl6));
  54        fl6.flowi6_oif = ndev->ifindex;
  55        memcpy(&fl6.saddr, saddr, sizeof(*saddr));
  56        memcpy(&fl6.daddr, daddr, sizeof(*daddr));
  57        fl6.flowi6_proto = IPPROTO_UDP;
  58
  59        ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
  60                                               recv_sockets.sk6->sk, &fl6,
  61                                               NULL);
  62        if (IS_ERR(ndst)) {
  63                pr_err_ratelimited("no route to %pI6\n", daddr);
  64                return NULL;
  65        }
  66
  67        if (unlikely(ndst->error)) {
  68                pr_err("no route to %pI6\n", daddr);
  69                goto put;
  70        }
  71
  72        return ndst;
  73put:
  74        dst_release(ndst);
  75        return NULL;
  76}
  77
  78#else
  79
  80static struct dst_entry *rxe_find_route6(struct net_device *ndev,
  81                                         struct in6_addr *saddr,
  82                                         struct in6_addr *daddr)
  83{
  84        return NULL;
  85}
  86
  87#endif
  88
  89static struct dst_entry *rxe_find_route(struct net_device *ndev,
  90                                        struct rxe_qp *qp,
  91                                        struct rxe_av *av)
  92{
  93        struct dst_entry *dst = NULL;
  94
  95        if (qp_type(qp) == IB_QPT_RC)
  96                dst = sk_dst_get(qp->sk->sk);
  97
  98        if (!dst || !dst_check(dst, qp->dst_cookie)) {
  99                if (dst)
 100                        dst_release(dst);
 101
 102                if (av->network_type == RXE_NETWORK_TYPE_IPV4) {
 103                        struct in_addr *saddr;
 104                        struct in_addr *daddr;
 105
 106                        saddr = &av->sgid_addr._sockaddr_in.sin_addr;
 107                        daddr = &av->dgid_addr._sockaddr_in.sin_addr;
 108                        dst = rxe_find_route4(ndev, saddr, daddr);
 109                } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) {
 110                        struct in6_addr *saddr6;
 111                        struct in6_addr *daddr6;
 112
 113                        saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
 114                        daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
 115                        dst = rxe_find_route6(ndev, saddr6, daddr6);
 116#if IS_ENABLED(CONFIG_IPV6)
 117                        if (dst)
 118                                qp->dst_cookie =
 119                                        rt6_get_cookie((struct rt6_info *)dst);
 120#endif
 121                }
 122
 123                if (dst && (qp_type(qp) == IB_QPT_RC)) {
 124                        dst_hold(dst);
 125                        sk_dst_set(qp->sk->sk, dst);
 126                }
 127        }
 128        return dst;
 129}
 130
 131static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 132{
 133        struct udphdr *udph;
 134        struct rxe_dev *rxe;
 135        struct net_device *ndev = skb->dev;
 136        struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
 137
 138        /* takes a reference on rxe->ib_dev
 139         * drop when skb is freed
 140         */
 141        rxe = rxe_get_dev_from_net(ndev);
 142        if (!rxe && is_vlan_dev(ndev))
 143                rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
 144        if (!rxe)
 145                goto drop;
 146
 147        if (skb_linearize(skb)) {
 148                pr_err("skb_linearize failed\n");
 149                ib_device_put(&rxe->ib_dev);
 150                goto drop;
 151        }
 152
 153        udph = udp_hdr(skb);
 154        pkt->rxe = rxe;
 155        pkt->port_num = 1;
 156        pkt->hdr = (u8 *)(udph + 1);
 157        pkt->mask = RXE_GRH_MASK;
 158        pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
 159
 160        rxe_rcv(skb);
 161
 162        return 0;
 163drop:
 164        kfree_skb(skb);
 165
 166        return 0;
 167}
 168
 169static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
 170                                           bool ipv6)
 171{
 172        int err;
 173        struct socket *sock;
 174        struct udp_port_cfg udp_cfg = { };
 175        struct udp_tunnel_sock_cfg tnl_cfg = { };
 176
 177        if (ipv6) {
 178                udp_cfg.family = AF_INET6;
 179                udp_cfg.ipv6_v6only = 1;
 180        } else {
 181                udp_cfg.family = AF_INET;
 182        }
 183
 184        udp_cfg.local_udp_port = port;
 185
 186        /* Create UDP socket */
 187        err = udp_sock_create(net, &udp_cfg, &sock);
 188        if (err < 0)
 189                return ERR_PTR(err);
 190
 191        tnl_cfg.encap_type = 1;
 192        tnl_cfg.encap_rcv = rxe_udp_encap_recv;
 193
 194        /* Setup UDP tunnel */
 195        setup_udp_tunnel_sock(net, sock, &tnl_cfg);
 196
 197        return sock;
 198}
 199
 200static void rxe_release_udp_tunnel(struct socket *sk)
 201{
 202        if (sk)
 203                udp_tunnel_sock_release(sk);
 204}
 205
 206static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
 207                            __be16 dst_port)
 208{
 209        struct udphdr *udph;
 210
 211        __skb_push(skb, sizeof(*udph));
 212        skb_reset_transport_header(skb);
 213        udph = udp_hdr(skb);
 214
 215        udph->dest = dst_port;
 216        udph->source = src_port;
 217        udph->len = htons(skb->len);
 218        udph->check = 0;
 219}
 220
 221static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
 222                             __be32 saddr, __be32 daddr, __u8 proto,
 223                             __u8 tos, __u8 ttl, __be16 df, bool xnet)
 224{
 225        struct iphdr *iph;
 226
 227        skb_scrub_packet(skb, xnet);
 228
 229        skb_clear_hash(skb);
 230        skb_dst_set(skb, dst_clone(dst));
 231        memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 232
 233        skb_push(skb, sizeof(struct iphdr));
 234        skb_reset_network_header(skb);
 235
 236        iph = ip_hdr(skb);
 237
 238        iph->version    =       IPVERSION;
 239        iph->ihl        =       sizeof(struct iphdr) >> 2;
 240        iph->tot_len    =       htons(skb->len);
 241        iph->frag_off   =       df;
 242        iph->protocol   =       proto;
 243        iph->tos        =       tos;
 244        iph->daddr      =       daddr;
 245        iph->saddr      =       saddr;
 246        iph->ttl        =       ttl;
 247        __ip_select_ident(dev_net(dst->dev), iph,
 248                          skb_shinfo(skb)->gso_segs ?: 1);
 249}
 250
 251static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
 252                             struct in6_addr *saddr, struct in6_addr *daddr,
 253                             __u8 proto, __u8 prio, __u8 ttl)
 254{
 255        struct ipv6hdr *ip6h;
 256
 257        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 258        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
 259                            | IPSKB_REROUTED);
 260        skb_dst_set(skb, dst_clone(dst));
 261
 262        __skb_push(skb, sizeof(*ip6h));
 263        skb_reset_network_header(skb);
 264        ip6h              = ipv6_hdr(skb);
 265        ip6_flow_hdr(ip6h, prio, htonl(0));
 266        ip6h->payload_len = htons(skb->len);
 267        ip6h->nexthdr     = proto;
 268        ip6h->hop_limit   = ttl;
 269        ip6h->daddr       = *daddr;
 270        ip6h->saddr       = *saddr;
 271        ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
 272}
 273
 274static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt,
 275                    struct sk_buff *skb)
 276{
 277        struct rxe_qp *qp = pkt->qp;
 278        struct dst_entry *dst;
 279        bool xnet = false;
 280        __be16 df = htons(IP_DF);
 281        struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
 282        struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
 283
 284        dst = rxe_find_route(skb->dev, qp, av);
 285        if (!dst) {
 286                pr_err("Host not reachable\n");
 287                return -EHOSTUNREACH;
 288        }
 289
 290        prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
 291                        cpu_to_be16(ROCE_V2_UDP_DPORT));
 292
 293        prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
 294                         av->grh.traffic_class, av->grh.hop_limit, df, xnet);
 295
 296        dst_release(dst);
 297        return 0;
 298}
 299
 300static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt,
 301                    struct sk_buff *skb)
 302{
 303        struct rxe_qp *qp = pkt->qp;
 304        struct dst_entry *dst;
 305        struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
 306        struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
 307
 308        dst = rxe_find_route(skb->dev, qp, av);
 309        if (!dst) {
 310                pr_err("Host not reachable\n");
 311                return -EHOSTUNREACH;
 312        }
 313
 314        prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
 315                        cpu_to_be16(ROCE_V2_UDP_DPORT));
 316
 317        prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
 318                         av->grh.traffic_class,
 319                         av->grh.hop_limit);
 320
 321        dst_release(dst);
 322        return 0;
 323}
 324
 325int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
 326                struct sk_buff *skb)
 327{
 328        int err = 0;
 329
 330        if (skb->protocol == htons(ETH_P_IP))
 331                err = prepare4(av, pkt, skb);
 332        else if (skb->protocol == htons(ETH_P_IPV6))
 333                err = prepare6(av, pkt, skb);
 334
 335        if (ether_addr_equal(skb->dev->dev_addr, av->dmac))
 336                pkt->mask |= RXE_LOOPBACK_MASK;
 337
 338        return err;
 339}
 340
 341static void rxe_skb_tx_dtor(struct sk_buff *skb)
 342{
 343        struct sock *sk = skb->sk;
 344        struct rxe_qp *qp = sk->sk_user_data;
 345        int skb_out = atomic_dec_return(&qp->skb_out);
 346
 347        if (unlikely(qp->need_req_skb &&
 348                     skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
 349                rxe_run_task(&qp->req.task, 1);
 350
 351        rxe_put(qp);
 352}
 353
 354static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
 355{
 356        int err;
 357
 358        skb->destructor = rxe_skb_tx_dtor;
 359        skb->sk = pkt->qp->sk->sk;
 360
 361        rxe_get(pkt->qp);
 362        atomic_inc(&pkt->qp->skb_out);
 363
 364        if (skb->protocol == htons(ETH_P_IP)) {
 365                err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
 366        } else if (skb->protocol == htons(ETH_P_IPV6)) {
 367                err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
 368        } else {
 369                pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
 370                atomic_dec(&pkt->qp->skb_out);
 371                rxe_put(pkt->qp);
 372                kfree_skb(skb);
 373                return -EINVAL;
 374        }
 375
 376        if (unlikely(net_xmit_eval(err))) {
 377                pr_debug("error sending packet: %d\n", err);
 378                return -EAGAIN;
 379        }
 380
 381        return 0;
 382}
 383
 384/* fix up a send packet to match the packets
 385 * received from UDP before looping them back
 386 */
 387static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
 388{
 389        memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
 390
 391        if (skb->protocol == htons(ETH_P_IP))
 392                skb_pull(skb, sizeof(struct iphdr));
 393        else
 394                skb_pull(skb, sizeof(struct ipv6hdr));
 395
 396        if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) {
 397                kfree_skb(skb);
 398                return -EIO;
 399        }
 400
 401        rxe_rcv(skb);
 402
 403        return 0;
 404}
 405
 406int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 407                    struct sk_buff *skb)
 408{
 409        int err;
 410        int is_request = pkt->mask & RXE_REQ_MASK;
 411        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 412
 413        if ((is_request && (qp->req.state != QP_STATE_READY)) ||
 414            (!is_request && (qp->resp.state != QP_STATE_READY))) {
 415                pr_info("Packet dropped. QP is not in ready state\n");
 416                goto drop;
 417        }
 418
 419        rxe_icrc_generate(skb, pkt);
 420
 421        if (pkt->mask & RXE_LOOPBACK_MASK)
 422                err = rxe_loopback(skb, pkt);
 423        else
 424                err = rxe_send(skb, pkt);
 425        if (err) {
 426                rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
 427                return err;
 428        }
 429
 430        if ((qp_type(qp) != IB_QPT_RC) &&
 431            (pkt->mask & RXE_END_MASK)) {
 432                pkt->wqe->state = wqe_state_done;
 433                rxe_run_task(&qp->comp.task, 1);
 434        }
 435
 436        rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
 437        goto done;
 438
 439drop:
 440        kfree_skb(skb);
 441        err = 0;
 442done:
 443        return err;
 444}
 445
 446struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
 447                                int paylen, struct rxe_pkt_info *pkt)
 448{
 449        unsigned int hdr_len;
 450        struct sk_buff *skb = NULL;
 451        struct net_device *ndev;
 452        const struct ib_gid_attr *attr;
 453        const int port_num = 1;
 454
 455        attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
 456        if (IS_ERR(attr))
 457                return NULL;
 458
 459        if (av->network_type == RXE_NETWORK_TYPE_IPV4)
 460                hdr_len = ETH_HLEN + sizeof(struct udphdr) +
 461                        sizeof(struct iphdr);
 462        else
 463                hdr_len = ETH_HLEN + sizeof(struct udphdr) +
 464                        sizeof(struct ipv6hdr);
 465
 466        rcu_read_lock();
 467        ndev = rdma_read_gid_attr_ndev_rcu(attr);
 468        if (IS_ERR(ndev)) {
 469                rcu_read_unlock();
 470                goto out;
 471        }
 472        skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
 473                        GFP_ATOMIC);
 474
 475        if (unlikely(!skb)) {
 476                rcu_read_unlock();
 477                goto out;
 478        }
 479
 480        skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
 481
 482        /* FIXME: hold reference to this netdev until life of this skb. */
 483        skb->dev        = ndev;
 484        rcu_read_unlock();
 485
 486        if (av->network_type == RXE_NETWORK_TYPE_IPV4)
 487                skb->protocol = htons(ETH_P_IP);
 488        else
 489                skb->protocol = htons(ETH_P_IPV6);
 490
 491        pkt->rxe        = rxe;
 492        pkt->port_num   = port_num;
 493        pkt->hdr        = skb_put(skb, paylen);
 494        pkt->mask       |= RXE_GRH_MASK;
 495
 496out:
 497        rdma_put_gid_attr(attr);
 498        return skb;
 499}
 500
 501/*
 502 * this is required by rxe_cfg to match rxe devices in
 503 * /sys/class/infiniband up with their underlying ethernet devices
 504 */
 505const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
 506{
 507        return rxe->ndev->name;
 508}
 509
 510int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
 511{
 512        int err;
 513        struct rxe_dev *rxe = NULL;
 514
 515        rxe = ib_alloc_device(rxe_dev, ib_dev);
 516        if (!rxe)
 517                return -ENOMEM;
 518
 519        rxe->ndev = ndev;
 520
 521        err = rxe_add(rxe, ndev->mtu, ibdev_name);
 522        if (err) {
 523                ib_dealloc_device(&rxe->ib_dev);
 524                return err;
 525        }
 526
 527        return 0;
 528}
 529
 530static void rxe_port_event(struct rxe_dev *rxe,
 531                           enum ib_event_type event)
 532{
 533        struct ib_event ev;
 534
 535        ev.device = &rxe->ib_dev;
 536        ev.element.port_num = 1;
 537        ev.event = event;
 538
 539        ib_dispatch_event(&ev);
 540}
 541
 542/* Caller must hold net_info_lock */
 543void rxe_port_up(struct rxe_dev *rxe)
 544{
 545        struct rxe_port *port;
 546
 547        port = &rxe->port;
 548        port->attr.state = IB_PORT_ACTIVE;
 549
 550        rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
 551        dev_info(&rxe->ib_dev.dev, "set active\n");
 552}
 553
 554/* Caller must hold net_info_lock */
 555void rxe_port_down(struct rxe_dev *rxe)
 556{
 557        struct rxe_port *port;
 558
 559        port = &rxe->port;
 560        port->attr.state = IB_PORT_DOWN;
 561
 562        rxe_port_event(rxe, IB_EVENT_PORT_ERR);
 563        rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
 564        dev_info(&rxe->ib_dev.dev, "set down\n");
 565}
 566
 567void rxe_set_port_state(struct rxe_dev *rxe)
 568{
 569        if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
 570                rxe_port_up(rxe);
 571        else
 572                rxe_port_down(rxe);
 573}
 574
 575static int rxe_notify(struct notifier_block *not_blk,
 576                      unsigned long event,
 577                      void *arg)
 578{
 579        struct net_device *ndev = netdev_notifier_info_to_dev(arg);
 580        struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
 581
 582        if (!rxe)
 583                return NOTIFY_OK;
 584
 585        switch (event) {
 586        case NETDEV_UNREGISTER:
 587                ib_unregister_device_queued(&rxe->ib_dev);
 588                break;
 589        case NETDEV_UP:
 590                rxe_port_up(rxe);
 591                break;
 592        case NETDEV_DOWN:
 593                rxe_port_down(rxe);
 594                break;
 595        case NETDEV_CHANGEMTU:
 596                pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
 597                rxe_set_mtu(rxe, ndev->mtu);
 598                break;
 599        case NETDEV_CHANGE:
 600                rxe_set_port_state(rxe);
 601                break;
 602        case NETDEV_REBOOT:
 603        case NETDEV_GOING_DOWN:
 604        case NETDEV_CHANGEADDR:
 605        case NETDEV_CHANGENAME:
 606        case NETDEV_FEAT_CHANGE:
 607        default:
 608                pr_info("ignoring netdev event = %ld for %s\n",
 609                        event, ndev->name);
 610                break;
 611        }
 612
 613        ib_device_put(&rxe->ib_dev);
 614        return NOTIFY_OK;
 615}
 616
 617static struct notifier_block rxe_net_notifier = {
 618        .notifier_call = rxe_notify,
 619};
 620
 621static int rxe_net_ipv4_init(void)
 622{
 623        recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
 624                                htons(ROCE_V2_UDP_DPORT), false);
 625        if (IS_ERR(recv_sockets.sk4)) {
 626                recv_sockets.sk4 = NULL;
 627                pr_err("Failed to create IPv4 UDP tunnel\n");
 628                return -1;
 629        }
 630
 631        return 0;
 632}
 633
 634static int rxe_net_ipv6_init(void)
 635{
 636#if IS_ENABLED(CONFIG_IPV6)
 637
 638        recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
 639                                                htons(ROCE_V2_UDP_DPORT), true);
 640        if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) {
 641                recv_sockets.sk6 = NULL;
 642                pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n");
 643                return 0;
 644        }
 645
 646        if (IS_ERR(recv_sockets.sk6)) {
 647                recv_sockets.sk6 = NULL;
 648                pr_err("Failed to create IPv6 UDP tunnel\n");
 649                return -1;
 650        }
 651#endif
 652        return 0;
 653}
 654
 655void rxe_net_exit(void)
 656{
 657        rxe_release_udp_tunnel(recv_sockets.sk6);
 658        rxe_release_udp_tunnel(recv_sockets.sk4);
 659        unregister_netdevice_notifier(&rxe_net_notifier);
 660}
 661
 662int rxe_net_init(void)
 663{
 664        int err;
 665
 666        recv_sockets.sk6 = NULL;
 667
 668        err = rxe_net_ipv4_init();
 669        if (err)
 670                return err;
 671        err = rxe_net_ipv6_init();
 672        if (err)
 673                goto err_out;
 674        err = register_netdevice_notifier(&rxe_net_notifier);
 675        if (err) {
 676                pr_err("Failed to register netdev notifier\n");
 677                goto err_out;
 678        }
 679        return 0;
 680err_out:
 681        rxe_net_exit();
 682        return err;
 683}
 684