linux/net/openvswitch/datapath.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2007-2014 Nicira, Inc.
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/init.h>
   9#include <linux/module.h>
  10#include <linux/if_arp.h>
  11#include <linux/if_vlan.h>
  12#include <linux/in.h>
  13#include <linux/ip.h>
  14#include <linux/jhash.h>
  15#include <linux/delay.h>
  16#include <linux/time.h>
  17#include <linux/etherdevice.h>
  18#include <linux/genetlink.h>
  19#include <linux/kernel.h>
  20#include <linux/kthread.h>
  21#include <linux/mutex.h>
  22#include <linux/percpu.h>
  23#include <linux/rcupdate.h>
  24#include <linux/tcp.h>
  25#include <linux/udp.h>
  26#include <linux/ethtool.h>
  27#include <linux/wait.h>
  28#include <asm/div64.h>
  29#include <linux/highmem.h>
  30#include <linux/netfilter_bridge.h>
  31#include <linux/netfilter_ipv4.h>
  32#include <linux/inetdevice.h>
  33#include <linux/list.h>
  34#include <linux/openvswitch.h>
  35#include <linux/rculist.h>
  36#include <linux/dmi.h>
  37#include <net/genetlink.h>
  38#include <net/net_namespace.h>
  39#include <net/netns/generic.h>
  40
  41#include "datapath.h"
  42#include "flow.h"
  43#include "flow_table.h"
  44#include "flow_netlink.h"
  45#include "meter.h"
  46#include "vport-internal_dev.h"
  47#include "vport-netdev.h"
  48
  49unsigned int ovs_net_id __read_mostly;
  50
  51static struct genl_family dp_packet_genl_family;
  52static struct genl_family dp_flow_genl_family;
  53static struct genl_family dp_datapath_genl_family;
  54
  55static const struct nla_policy flow_policy[];
  56
  57static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
  58        .name = OVS_FLOW_MCGROUP,
  59};
  60
  61static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
  62        .name = OVS_DATAPATH_MCGROUP,
  63};
  64
  65static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
  66        .name = OVS_VPORT_MCGROUP,
  67};
  68
  69/* Check if need to build a reply message.
  70 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
  71static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
  72                            unsigned int group)
  73{
  74        return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
  75               genl_has_listeners(family, genl_info_net(info), group);
  76}
  77
  78static void ovs_notify(struct genl_family *family,
  79                       struct sk_buff *skb, struct genl_info *info)
  80{
  81        genl_notify(family, skb, info, 0, GFP_KERNEL);
  82}
  83
  84/**
  85 * DOC: Locking:
  86 *
  87 * All writes e.g. Writes to device state (add/remove datapath, port, set
  88 * operations on vports, etc.), Writes to other state (flow table
  89 * modifications, set miscellaneous datapath parameters, etc.) are protected
  90 * by ovs_lock.
  91 *
  92 * Reads are protected by RCU.
  93 *
  94 * There are a few special cases (mostly stats) that have their own
  95 * synchronization but they nest under all of above and don't interact with
  96 * each other.
  97 *
  98 * The RTNL lock nests inside ovs_mutex.
  99 */
 100
 101static DEFINE_MUTEX(ovs_mutex);
 102
 103void ovs_lock(void)
 104{
 105        mutex_lock(&ovs_mutex);
 106}
 107
 108void ovs_unlock(void)
 109{
 110        mutex_unlock(&ovs_mutex);
 111}
 112
 113#ifdef CONFIG_LOCKDEP
 114int lockdep_ovsl_is_held(void)
 115{
 116        if (debug_locks)
 117                return lockdep_is_held(&ovs_mutex);
 118        else
 119                return 1;
 120}
 121#endif
 122
 123static struct vport *new_vport(const struct vport_parms *);
 124static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
 125                             const struct sw_flow_key *,
 126                             const struct dp_upcall_info *,
 127                             uint32_t cutlen);
 128static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
 129                                  const struct sw_flow_key *,
 130                                  const struct dp_upcall_info *,
 131                                  uint32_t cutlen);
 132
 133static void ovs_dp_masks_rebalance(struct work_struct *work);
 134
 135/* Must be called with rcu_read_lock or ovs_mutex. */
 136const char *ovs_dp_name(const struct datapath *dp)
 137{
 138        struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
 139        return ovs_vport_name(vport);
 140}
 141
 142static int get_dpifindex(const struct datapath *dp)
 143{
 144        struct vport *local;
 145        int ifindex;
 146
 147        rcu_read_lock();
 148
 149        local = ovs_vport_rcu(dp, OVSP_LOCAL);
 150        if (local)
 151                ifindex = local->dev->ifindex;
 152        else
 153                ifindex = 0;
 154
 155        rcu_read_unlock();
 156
 157        return ifindex;
 158}
 159
 160static void destroy_dp_rcu(struct rcu_head *rcu)
 161{
 162        struct datapath *dp = container_of(rcu, struct datapath, rcu);
 163
 164        ovs_flow_tbl_destroy(&dp->table);
 165        free_percpu(dp->stats_percpu);
 166        kfree(dp->ports);
 167        ovs_meters_exit(dp);
 168        kfree(dp);
 169}
 170
 171static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
 172                                            u16 port_no)
 173{
 174        return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
 175}
 176
 177/* Called with ovs_mutex or RCU read lock. */
 178struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
 179{
 180        struct vport *vport;
 181        struct hlist_head *head;
 182
 183        head = vport_hash_bucket(dp, port_no);
 184        hlist_for_each_entry_rcu(vport, head, dp_hash_node,
 185                                 lockdep_ovsl_is_held()) {
 186                if (vport->port_no == port_no)
 187                        return vport;
 188        }
 189        return NULL;
 190}
 191
 192/* Called with ovs_mutex. */
 193static struct vport *new_vport(const struct vport_parms *parms)
 194{
 195        struct vport *vport;
 196
 197        vport = ovs_vport_add(parms);
 198        if (!IS_ERR(vport)) {
 199                struct datapath *dp = parms->dp;
 200                struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
 201
 202                hlist_add_head_rcu(&vport->dp_hash_node, head);
 203        }
 204        return vport;
 205}
 206
 207void ovs_dp_detach_port(struct vport *p)
 208{
 209        ASSERT_OVSL();
 210
 211        /* First drop references to device. */
 212        hlist_del_rcu(&p->dp_hash_node);
 213
 214        /* Then destroy it. */
 215        ovs_vport_del(p);
 216}
 217
 218/* Must be called with rcu_read_lock. */
 219void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
 220{
 221        const struct vport *p = OVS_CB(skb)->input_vport;
 222        struct datapath *dp = p->dp;
 223        struct sw_flow *flow;
 224        struct sw_flow_actions *sf_acts;
 225        struct dp_stats_percpu *stats;
 226        u64 *stats_counter;
 227        u32 n_mask_hit;
 228        u32 n_cache_hit;
 229        int error;
 230
 231        stats = this_cpu_ptr(dp->stats_percpu);
 232
 233        /* Look up flow. */
 234        flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
 235                                         &n_mask_hit, &n_cache_hit);
 236        if (unlikely(!flow)) {
 237                struct dp_upcall_info upcall;
 238
 239                memset(&upcall, 0, sizeof(upcall));
 240                upcall.cmd = OVS_PACKET_CMD_MISS;
 241                upcall.portid = ovs_vport_find_upcall_portid(p, skb);
 242                upcall.mru = OVS_CB(skb)->mru;
 243                error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
 244                if (unlikely(error))
 245                        kfree_skb(skb);
 246                else
 247                        consume_skb(skb);
 248                stats_counter = &stats->n_missed;
 249                goto out;
 250        }
 251
 252        ovs_flow_stats_update(flow, key->tp.flags, skb);
 253        sf_acts = rcu_dereference(flow->sf_acts);
 254        error = ovs_execute_actions(dp, skb, sf_acts, key);
 255        if (unlikely(error))
 256                net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n",
 257                                    ovs_dp_name(dp), error);
 258
 259        stats_counter = &stats->n_hit;
 260
 261out:
 262        /* Update datapath statistics. */
 263        u64_stats_update_begin(&stats->syncp);
 264        (*stats_counter)++;
 265        stats->n_mask_hit += n_mask_hit;
 266        stats->n_cache_hit += n_cache_hit;
 267        u64_stats_update_end(&stats->syncp);
 268}
 269
 270int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
 271                  const struct sw_flow_key *key,
 272                  const struct dp_upcall_info *upcall_info,
 273                  uint32_t cutlen)
 274{
 275        struct dp_stats_percpu *stats;
 276        int err;
 277
 278        if (upcall_info->portid == 0) {
 279                err = -ENOTCONN;
 280                goto err;
 281        }
 282
 283        if (!skb_is_gso(skb))
 284                err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
 285        else
 286                err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
 287        if (err)
 288                goto err;
 289
 290        return 0;
 291
 292err:
 293        stats = this_cpu_ptr(dp->stats_percpu);
 294
 295        u64_stats_update_begin(&stats->syncp);
 296        stats->n_lost++;
 297        u64_stats_update_end(&stats->syncp);
 298
 299        return err;
 300}
 301
 302static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
 303                             const struct sw_flow_key *key,
 304                             const struct dp_upcall_info *upcall_info,
 305                             uint32_t cutlen)
 306{
 307        unsigned int gso_type = skb_shinfo(skb)->gso_type;
 308        struct sw_flow_key later_key;
 309        struct sk_buff *segs, *nskb;
 310        int err;
 311
 312        BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_GSO_CB_OFFSET);
 313        segs = __skb_gso_segment(skb, NETIF_F_SG, false);
 314        if (IS_ERR(segs))
 315                return PTR_ERR(segs);
 316        if (segs == NULL)
 317                return -EINVAL;
 318
 319        if (gso_type & SKB_GSO_UDP) {
 320                /* The initial flow key extracted by ovs_flow_key_extract()
 321                 * in this case is for a first fragment, so we need to
 322                 * properly mark later fragments.
 323                 */
 324                later_key = *key;
 325                later_key.ip.frag = OVS_FRAG_TYPE_LATER;
 326        }
 327
 328        /* Queue all of the segments. */
 329        skb_list_walk_safe(segs, skb, nskb) {
 330                if (gso_type & SKB_GSO_UDP && skb != segs)
 331                        key = &later_key;
 332
 333                err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
 334                if (err)
 335                        break;
 336
 337        }
 338
 339        /* Free all of the segments. */
 340        skb_list_walk_safe(segs, skb, nskb) {
 341                if (err)
 342                        kfree_skb(skb);
 343                else
 344                        consume_skb(skb);
 345        }
 346        return err;
 347}
 348
 349static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
 350                              unsigned int hdrlen, int actions_attrlen)
 351{
 352        size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
 353                + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
 354                + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
 355                + nla_total_size(sizeof(unsigned int)) /* OVS_PACKET_ATTR_LEN */
 356                + nla_total_size(sizeof(u64)); /* OVS_PACKET_ATTR_HASH */
 357
 358        /* OVS_PACKET_ATTR_USERDATA */
 359        if (upcall_info->userdata)
 360                size += NLA_ALIGN(upcall_info->userdata->nla_len);
 361
 362        /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
 363        if (upcall_info->egress_tun_info)
 364                size += nla_total_size(ovs_tun_key_attr_size());
 365
 366        /* OVS_PACKET_ATTR_ACTIONS */
 367        if (upcall_info->actions_len)
 368                size += nla_total_size(actions_attrlen);
 369
 370        /* OVS_PACKET_ATTR_MRU */
 371        if (upcall_info->mru)
 372                size += nla_total_size(sizeof(upcall_info->mru));
 373
 374        return size;
 375}
 376
 377static void pad_packet(struct datapath *dp, struct sk_buff *skb)
 378{
 379        if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
 380                size_t plen = NLA_ALIGN(skb->len) - skb->len;
 381
 382                if (plen > 0)
 383                        skb_put_zero(skb, plen);
 384        }
 385}
 386
 387static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 388                                  const struct sw_flow_key *key,
 389                                  const struct dp_upcall_info *upcall_info,
 390                                  uint32_t cutlen)
 391{
 392        struct ovs_header *upcall;
 393        struct sk_buff *nskb = NULL;
 394        struct sk_buff *user_skb = NULL; /* to be queued to userspace */
 395        struct nlattr *nla;
 396        size_t len;
 397        unsigned int hlen;
 398        int err, dp_ifindex;
 399        u64 hash;
 400
 401        dp_ifindex = get_dpifindex(dp);
 402        if (!dp_ifindex)
 403                return -ENODEV;
 404
 405        if (skb_vlan_tag_present(skb)) {
 406                nskb = skb_clone(skb, GFP_ATOMIC);
 407                if (!nskb)
 408                        return -ENOMEM;
 409
 410                nskb = __vlan_hwaccel_push_inside(nskb);
 411                if (!nskb)
 412                        return -ENOMEM;
 413
 414                skb = nskb;
 415        }
 416
 417        if (nla_attr_size(skb->len) > USHRT_MAX) {
 418                err = -EFBIG;
 419                goto out;
 420        }
 421
 422        /* Complete checksum if needed */
 423        if (skb->ip_summed == CHECKSUM_PARTIAL &&
 424            (err = skb_csum_hwoffload_help(skb, 0)))
 425                goto out;
 426
 427        /* Older versions of OVS user space enforce alignment of the last
 428         * Netlink attribute to NLA_ALIGNTO which would require extensive
 429         * padding logic. Only perform zerocopy if padding is not required.
 430         */
 431        if (dp->user_features & OVS_DP_F_UNALIGNED)
 432                hlen = skb_zerocopy_headlen(skb);
 433        else
 434                hlen = skb->len;
 435
 436        len = upcall_msg_size(upcall_info, hlen - cutlen,
 437                              OVS_CB(skb)->acts_origlen);
 438        user_skb = genlmsg_new(len, GFP_ATOMIC);
 439        if (!user_skb) {
 440                err = -ENOMEM;
 441                goto out;
 442        }
 443
 444        upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
 445                             0, upcall_info->cmd);
 446        if (!upcall) {
 447                err = -EINVAL;
 448                goto out;
 449        }
 450        upcall->dp_ifindex = dp_ifindex;
 451
 452        err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
 453        if (err)
 454                goto out;
 455
 456        if (upcall_info->userdata)
 457                __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
 458                          nla_len(upcall_info->userdata),
 459                          nla_data(upcall_info->userdata));
 460
 461        if (upcall_info->egress_tun_info) {
 462                nla = nla_nest_start_noflag(user_skb,
 463                                            OVS_PACKET_ATTR_EGRESS_TUN_KEY);
 464                if (!nla) {
 465                        err = -EMSGSIZE;
 466                        goto out;
 467                }
 468                err = ovs_nla_put_tunnel_info(user_skb,
 469                                              upcall_info->egress_tun_info);
 470                if (err)
 471                        goto out;
 472
 473                nla_nest_end(user_skb, nla);
 474        }
 475
 476        if (upcall_info->actions_len) {
 477                nla = nla_nest_start_noflag(user_skb, OVS_PACKET_ATTR_ACTIONS);
 478                if (!nla) {
 479                        err = -EMSGSIZE;
 480                        goto out;
 481                }
 482                err = ovs_nla_put_actions(upcall_info->actions,
 483                                          upcall_info->actions_len,
 484                                          user_skb);
 485                if (!err)
 486                        nla_nest_end(user_skb, nla);
 487                else
 488                        nla_nest_cancel(user_skb, nla);
 489        }
 490
 491        /* Add OVS_PACKET_ATTR_MRU */
 492        if (upcall_info->mru &&
 493            nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU, upcall_info->mru)) {
 494                err = -ENOBUFS;
 495                goto out;
 496        }
 497
 498        /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
 499        if (cutlen > 0 &&
 500            nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN, skb->len)) {
 501                err = -ENOBUFS;
 502                goto out;
 503        }
 504
 505        /* Add OVS_PACKET_ATTR_HASH */
 506        hash = skb_get_hash_raw(skb);
 507        if (skb->sw_hash)
 508                hash |= OVS_PACKET_HASH_SW_BIT;
 509
 510        if (skb->l4_hash)
 511                hash |= OVS_PACKET_HASH_L4_BIT;
 512
 513        if (nla_put(user_skb, OVS_PACKET_ATTR_HASH, sizeof (u64), &hash)) {
 514                err = -ENOBUFS;
 515                goto out;
 516        }
 517
 518        /* Only reserve room for attribute header, packet data is added
 519         * in skb_zerocopy() */
 520        if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
 521                err = -ENOBUFS;
 522                goto out;
 523        }
 524        nla->nla_len = nla_attr_size(skb->len - cutlen);
 525
 526        err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
 527        if (err)
 528                goto out;
 529
 530        /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
 531        pad_packet(dp, user_skb);
 532
 533        ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
 534
 535        err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
 536        user_skb = NULL;
 537out:
 538        if (err)
 539                skb_tx_error(skb);
 540        kfree_skb(user_skb);
 541        kfree_skb(nskb);
 542        return err;
 543}
 544
 545static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
 546{
 547        struct ovs_header *ovs_header = info->userhdr;
 548        struct net *net = sock_net(skb->sk);
 549        struct nlattr **a = info->attrs;
 550        struct sw_flow_actions *acts;
 551        struct sk_buff *packet;
 552        struct sw_flow *flow;
 553        struct sw_flow_actions *sf_acts;
 554        struct datapath *dp;
 555        struct vport *input_vport;
 556        u16 mru = 0;
 557        u64 hash;
 558        int len;
 559        int err;
 560        bool log = !a[OVS_PACKET_ATTR_PROBE];
 561
 562        err = -EINVAL;
 563        if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
 564            !a[OVS_PACKET_ATTR_ACTIONS])
 565                goto err;
 566
 567        len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
 568        packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
 569        err = -ENOMEM;
 570        if (!packet)
 571                goto err;
 572        skb_reserve(packet, NET_IP_ALIGN);
 573
 574        nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
 575
 576        /* Set packet's mru */
 577        if (a[OVS_PACKET_ATTR_MRU]) {
 578                mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
 579                packet->ignore_df = 1;
 580        }
 581        OVS_CB(packet)->mru = mru;
 582
 583        if (a[OVS_PACKET_ATTR_HASH]) {
 584                hash = nla_get_u64(a[OVS_PACKET_ATTR_HASH]);
 585
 586                __skb_set_hash(packet, hash & 0xFFFFFFFFULL,
 587                               !!(hash & OVS_PACKET_HASH_SW_BIT),
 588                               !!(hash & OVS_PACKET_HASH_L4_BIT));
 589        }
 590
 591        /* Build an sw_flow for sending this packet. */
 592        flow = ovs_flow_alloc();
 593        err = PTR_ERR(flow);
 594        if (IS_ERR(flow))
 595                goto err_kfree_skb;
 596
 597        err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
 598                                             packet, &flow->key, log);
 599        if (err)
 600                goto err_flow_free;
 601
 602        err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
 603                                   &flow->key, &acts, log);
 604        if (err)
 605                goto err_flow_free;
 606
 607        rcu_assign_pointer(flow->sf_acts, acts);
 608        packet->priority = flow->key.phy.priority;
 609        packet->mark = flow->key.phy.skb_mark;
 610
 611        rcu_read_lock();
 612        dp = get_dp_rcu(net, ovs_header->dp_ifindex);
 613        err = -ENODEV;
 614        if (!dp)
 615                goto err_unlock;
 616
 617        input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
 618        if (!input_vport)
 619                input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
 620
 621        if (!input_vport)
 622                goto err_unlock;
 623
 624        packet->dev = input_vport->dev;
 625        OVS_CB(packet)->input_vport = input_vport;
 626        sf_acts = rcu_dereference(flow->sf_acts);
 627
 628        local_bh_disable();
 629        err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
 630        local_bh_enable();
 631        rcu_read_unlock();
 632
 633        ovs_flow_free(flow, false);
 634        return err;
 635
 636err_unlock:
 637        rcu_read_unlock();
 638err_flow_free:
 639        ovs_flow_free(flow, false);
 640err_kfree_skb:
 641        kfree_skb(packet);
 642err:
 643        return err;
 644}
 645
 646static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
 647        [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
 648        [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
 649        [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
 650        [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
 651        [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
 652        [OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
 653};
 654
 655static const struct genl_small_ops dp_packet_genl_ops[] = {
 656        { .cmd = OVS_PACKET_CMD_EXECUTE,
 657          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 658          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
 659          .doit = ovs_packet_cmd_execute
 660        }
 661};
 662
 663static struct genl_family dp_packet_genl_family __ro_after_init = {
 664        .hdrsize = sizeof(struct ovs_header),
 665        .name = OVS_PACKET_FAMILY,
 666        .version = OVS_PACKET_VERSION,
 667        .maxattr = OVS_PACKET_ATTR_MAX,
 668        .policy = packet_policy,
 669        .netnsok = true,
 670        .parallel_ops = true,
 671        .small_ops = dp_packet_genl_ops,
 672        .n_small_ops = ARRAY_SIZE(dp_packet_genl_ops),
 673        .module = THIS_MODULE,
 674};
 675
 676static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
 677                         struct ovs_dp_megaflow_stats *mega_stats)
 678{
 679        int i;
 680
 681        memset(mega_stats, 0, sizeof(*mega_stats));
 682
 683        stats->n_flows = ovs_flow_tbl_count(&dp->table);
 684        mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
 685
 686        stats->n_hit = stats->n_missed = stats->n_lost = 0;
 687
 688        for_each_possible_cpu(i) {
 689                const struct dp_stats_percpu *percpu_stats;
 690                struct dp_stats_percpu local_stats;
 691                unsigned int start;
 692
 693                percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
 694
 695                do {
 696                        start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
 697                        local_stats = *percpu_stats;
 698                } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
 699
 700                stats->n_hit += local_stats.n_hit;
 701                stats->n_missed += local_stats.n_missed;
 702                stats->n_lost += local_stats.n_lost;
 703                mega_stats->n_mask_hit += local_stats.n_mask_hit;
 704                mega_stats->n_cache_hit += local_stats.n_cache_hit;
 705        }
 706}
 707
 708static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
 709{
 710        return ovs_identifier_is_ufid(sfid) &&
 711               !(ufid_flags & OVS_UFID_F_OMIT_KEY);
 712}
 713
 714static bool should_fill_mask(uint32_t ufid_flags)
 715{
 716        return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
 717}
 718
 719static bool should_fill_actions(uint32_t ufid_flags)
 720{
 721        return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
 722}
 723
 724static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
 725                                    const struct sw_flow_id *sfid,
 726                                    uint32_t ufid_flags)
 727{
 728        size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
 729
 730        /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
 731         * see ovs_nla_put_identifier()
 732         */
 733        if (sfid && ovs_identifier_is_ufid(sfid))
 734                len += nla_total_size(sfid->ufid_len);
 735        else
 736                len += nla_total_size(ovs_key_attr_size());
 737
 738        /* OVS_FLOW_ATTR_KEY */
 739        if (!sfid || should_fill_key(sfid, ufid_flags))
 740                len += nla_total_size(ovs_key_attr_size());
 741
 742        /* OVS_FLOW_ATTR_MASK */
 743        if (should_fill_mask(ufid_flags))
 744                len += nla_total_size(ovs_key_attr_size());
 745
 746        /* OVS_FLOW_ATTR_ACTIONS */
 747        if (should_fill_actions(ufid_flags))
 748                len += nla_total_size(acts->orig_len);
 749
 750        return len
 751                + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
 752                + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
 753                + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
 754}
 755
 756/* Called with ovs_mutex or RCU read lock. */
 757static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
 758                                   struct sk_buff *skb)
 759{
 760        struct ovs_flow_stats stats;
 761        __be16 tcp_flags;
 762        unsigned long used;
 763
 764        ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
 765
 766        if (used &&
 767            nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
 768                              OVS_FLOW_ATTR_PAD))
 769                return -EMSGSIZE;
 770
 771        if (stats.n_packets &&
 772            nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
 773                          sizeof(struct ovs_flow_stats), &stats,
 774                          OVS_FLOW_ATTR_PAD))
 775                return -EMSGSIZE;
 776
 777        if ((u8)ntohs(tcp_flags) &&
 778             nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
 779                return -EMSGSIZE;
 780
 781        return 0;
 782}
 783
 784/* Called with ovs_mutex or RCU read lock. */
 785static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
 786                                     struct sk_buff *skb, int skb_orig_len)
 787{
 788        struct nlattr *start;
 789        int err;
 790
 791        /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
 792         * this is the first flow to be dumped into 'skb'.  This is unusual for
 793         * Netlink but individual action lists can be longer than
 794         * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
 795         * The userspace caller can always fetch the actions separately if it
 796         * really wants them.  (Most userspace callers in fact don't care.)
 797         *
 798         * This can only fail for dump operations because the skb is always
 799         * properly sized for single flows.
 800         */
 801        start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS);
 802        if (start) {
 803                const struct sw_flow_actions *sf_acts;
 804
 805                sf_acts = rcu_dereference_ovsl(flow->sf_acts);
 806                err = ovs_nla_put_actions(sf_acts->actions,
 807                                          sf_acts->actions_len, skb);
 808
 809                if (!err)
 810                        nla_nest_end(skb, start);
 811                else {
 812                        if (skb_orig_len)
 813                                return err;
 814
 815                        nla_nest_cancel(skb, start);
 816                }
 817        } else if (skb_orig_len) {
 818                return -EMSGSIZE;
 819        }
 820
 821        return 0;
 822}
 823
 824/* Called with ovs_mutex or RCU read lock. */
 825static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
 826                                  struct sk_buff *skb, u32 portid,
 827                                  u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
 828{
 829        const int skb_orig_len = skb->len;
 830        struct ovs_header *ovs_header;
 831        int err;
 832
 833        ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
 834                                 flags, cmd);
 835        if (!ovs_header)
 836                return -EMSGSIZE;
 837
 838        ovs_header->dp_ifindex = dp_ifindex;
 839
 840        err = ovs_nla_put_identifier(flow, skb);
 841        if (err)
 842                goto error;
 843
 844        if (should_fill_key(&flow->id, ufid_flags)) {
 845                err = ovs_nla_put_masked_key(flow, skb);
 846                if (err)
 847                        goto error;
 848        }
 849
 850        if (should_fill_mask(ufid_flags)) {
 851                err = ovs_nla_put_mask(flow, skb);
 852                if (err)
 853                        goto error;
 854        }
 855
 856        err = ovs_flow_cmd_fill_stats(flow, skb);
 857        if (err)
 858                goto error;
 859
 860        if (should_fill_actions(ufid_flags)) {
 861                err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
 862                if (err)
 863                        goto error;
 864        }
 865
 866        genlmsg_end(skb, ovs_header);
 867        return 0;
 868
 869error:
 870        genlmsg_cancel(skb, ovs_header);
 871        return err;
 872}
 873
 874/* May not be called with RCU read lock. */
 875static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
 876                                               const struct sw_flow_id *sfid,
 877                                               struct genl_info *info,
 878                                               bool always,
 879                                               uint32_t ufid_flags)
 880{
 881        struct sk_buff *skb;
 882        size_t len;
 883
 884        if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
 885                return NULL;
 886
 887        len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
 888        skb = genlmsg_new(len, GFP_KERNEL);
 889        if (!skb)
 890                return ERR_PTR(-ENOMEM);
 891
 892        return skb;
 893}
 894
 895/* Called with ovs_mutex. */
 896static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
 897                                               int dp_ifindex,
 898                                               struct genl_info *info, u8 cmd,
 899                                               bool always, u32 ufid_flags)
 900{
 901        struct sk_buff *skb;
 902        int retval;
 903
 904        skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
 905                                      &flow->id, info, always, ufid_flags);
 906        if (IS_ERR_OR_NULL(skb))
 907                return skb;
 908
 909        retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
 910                                        info->snd_portid, info->snd_seq, 0,
 911                                        cmd, ufid_flags);
 912        if (WARN_ON_ONCE(retval < 0)) {
 913                kfree_skb(skb);
 914                skb = ERR_PTR(retval);
 915        }
 916        return skb;
 917}
 918
 919static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
 920{
 921        struct net *net = sock_net(skb->sk);
 922        struct nlattr **a = info->attrs;
 923        struct ovs_header *ovs_header = info->userhdr;
 924        struct sw_flow *flow = NULL, *new_flow;
 925        struct sw_flow_mask mask;
 926        struct sk_buff *reply;
 927        struct datapath *dp;
 928        struct sw_flow_actions *acts;
 929        struct sw_flow_match match;
 930        u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
 931        int error;
 932        bool log = !a[OVS_FLOW_ATTR_PROBE];
 933
 934        /* Must have key and actions. */
 935        error = -EINVAL;
 936        if (!a[OVS_FLOW_ATTR_KEY]) {
 937                OVS_NLERR(log, "Flow key attr not present in new flow.");
 938                goto error;
 939        }
 940        if (!a[OVS_FLOW_ATTR_ACTIONS]) {
 941                OVS_NLERR(log, "Flow actions attr not present in new flow.");
 942                goto error;
 943        }
 944
 945        /* Most of the time we need to allocate a new flow, do it before
 946         * locking.
 947         */
 948        new_flow = ovs_flow_alloc();
 949        if (IS_ERR(new_flow)) {
 950                error = PTR_ERR(new_flow);
 951                goto error;
 952        }
 953
 954        /* Extract key. */
 955        ovs_match_init(&match, &new_flow->key, false, &mask);
 956        error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
 957                                  a[OVS_FLOW_ATTR_MASK], log);
 958        if (error)
 959                goto err_kfree_flow;
 960
 961        /* Extract flow identifier. */
 962        error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
 963                                       &new_flow->key, log);
 964        if (error)
 965                goto err_kfree_flow;
 966
 967        /* unmasked key is needed to match when ufid is not used. */
 968        if (ovs_identifier_is_key(&new_flow->id))
 969                match.key = new_flow->id.unmasked_key;
 970
 971        ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
 972
 973        /* Validate actions. */
 974        error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
 975                                     &new_flow->key, &acts, log);
 976        if (error) {
 977                OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
 978                goto err_kfree_flow;
 979        }
 980
 981        reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
 982                                        ufid_flags);
 983        if (IS_ERR(reply)) {
 984                error = PTR_ERR(reply);
 985                goto err_kfree_acts;
 986        }
 987
 988        ovs_lock();
 989        dp = get_dp(net, ovs_header->dp_ifindex);
 990        if (unlikely(!dp)) {
 991                error = -ENODEV;
 992                goto err_unlock_ovs;
 993        }
 994
 995        /* Check if this is a duplicate flow */
 996        if (ovs_identifier_is_ufid(&new_flow->id))
 997                flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
 998        if (!flow)
 999                flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
1000        if (likely(!flow)) {
1001                rcu_assign_pointer(new_flow->sf_acts, acts);
1002
1003                /* Put flow in bucket. */
1004                error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1005                if (unlikely(error)) {
1006                        acts = NULL;
1007                        goto err_unlock_ovs;
1008                }
1009
1010                if (unlikely(reply)) {
1011                        error = ovs_flow_cmd_fill_info(new_flow,
1012                                                       ovs_header->dp_ifindex,
1013                                                       reply, info->snd_portid,
1014                                                       info->snd_seq, 0,
1015                                                       OVS_FLOW_CMD_NEW,
1016                                                       ufid_flags);
1017                        BUG_ON(error < 0);
1018                }
1019                ovs_unlock();
1020        } else {
1021                struct sw_flow_actions *old_acts;
1022
1023                /* Bail out if we're not allowed to modify an existing flow.
1024                 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1025                 * because Generic Netlink treats the latter as a dump
1026                 * request.  We also accept NLM_F_EXCL in case that bug ever
1027                 * gets fixed.
1028                 */
1029                if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1030                                                         | NLM_F_EXCL))) {
1031                        error = -EEXIST;
1032                        goto err_unlock_ovs;
1033                }
1034                /* The flow identifier has to be the same for flow updates.
1035                 * Look for any overlapping flow.
1036                 */
1037                if (unlikely(!ovs_flow_cmp(flow, &match))) {
1038                        if (ovs_identifier_is_key(&flow->id))
1039                                flow = ovs_flow_tbl_lookup_exact(&dp->table,
1040                                                                 &match);
1041                        else /* UFID matches but key is different */
1042                                flow = NULL;
1043                        if (!flow) {
1044                                error = -ENOENT;
1045                                goto err_unlock_ovs;
1046                        }
1047                }
1048                /* Update actions. */
1049                old_acts = ovsl_dereference(flow->sf_acts);
1050                rcu_assign_pointer(flow->sf_acts, acts);
1051
1052                if (unlikely(reply)) {
1053                        error = ovs_flow_cmd_fill_info(flow,
1054                                                       ovs_header->dp_ifindex,
1055                                                       reply, info->snd_portid,
1056                                                       info->snd_seq, 0,
1057                                                       OVS_FLOW_CMD_NEW,
1058                                                       ufid_flags);
1059                        BUG_ON(error < 0);
1060                }
1061                ovs_unlock();
1062
1063                ovs_nla_free_flow_actions_rcu(old_acts);
1064                ovs_flow_free(new_flow, false);
1065        }
1066
1067        if (reply)
1068                ovs_notify(&dp_flow_genl_family, reply, info);
1069        return 0;
1070
1071err_unlock_ovs:
1072        ovs_unlock();
1073        kfree_skb(reply);
1074err_kfree_acts:
1075        ovs_nla_free_flow_actions(acts);
1076err_kfree_flow:
1077        ovs_flow_free(new_flow, false);
1078error:
1079        return error;
1080}
1081
1082/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1083static noinline_for_stack
1084struct sw_flow_actions *get_flow_actions(struct net *net,
1085                                         const struct nlattr *a,
1086                                         const struct sw_flow_key *key,
1087                                         const struct sw_flow_mask *mask,
1088                                         bool log)
1089{
1090        struct sw_flow_actions *acts;
1091        struct sw_flow_key masked_key;
1092        int error;
1093
1094        ovs_flow_mask_key(&masked_key, key, true, mask);
1095        error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1096        if (error) {
1097                OVS_NLERR(log,
1098                          "Actions may not be safe on all matching packets");
1099                return ERR_PTR(error);
1100        }
1101
1102        return acts;
1103}
1104
1105/* Factor out match-init and action-copy to avoid
1106 * "Wframe-larger-than=1024" warning. Because mask is only
1107 * used to get actions, we new a function to save some
1108 * stack space.
1109 *
1110 * If there are not key and action attrs, we return 0
1111 * directly. In the case, the caller will also not use the
1112 * match as before. If there is action attr, we try to get
1113 * actions and save them to *acts. Before returning from
1114 * the function, we reset the match->mask pointer. Because
1115 * we should not to return match object with dangling reference
1116 * to mask.
1117 * */
1118static noinline_for_stack int
1119ovs_nla_init_match_and_action(struct net *net,
1120                              struct sw_flow_match *match,
1121                              struct sw_flow_key *key,
1122                              struct nlattr **a,
1123                              struct sw_flow_actions **acts,
1124                              bool log)
1125{
1126        struct sw_flow_mask mask;
1127        int error = 0;
1128
1129        if (a[OVS_FLOW_ATTR_KEY]) {
1130                ovs_match_init(match, key, true, &mask);
1131                error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1132                                          a[OVS_FLOW_ATTR_MASK], log);
1133                if (error)
1134                        goto error;
1135        }
1136
1137        if (a[OVS_FLOW_ATTR_ACTIONS]) {
1138                if (!a[OVS_FLOW_ATTR_KEY]) {
1139                        OVS_NLERR(log,
1140                                  "Flow key attribute not present in set flow.");
1141                        error = -EINVAL;
1142                        goto error;
1143                }
1144
1145                *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1146                                         &mask, log);
1147                if (IS_ERR(*acts)) {
1148                        error = PTR_ERR(*acts);
1149                        goto error;
1150                }
1151        }
1152
1153        /* On success, error is 0. */
1154error:
1155        match->mask = NULL;
1156        return error;
1157}
1158
1159static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1160{
1161        struct net *net = sock_net(skb->sk);
1162        struct nlattr **a = info->attrs;
1163        struct ovs_header *ovs_header = info->userhdr;
1164        struct sw_flow_key key;
1165        struct sw_flow *flow;
1166        struct sk_buff *reply = NULL;
1167        struct datapath *dp;
1168        struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1169        struct sw_flow_match match;
1170        struct sw_flow_id sfid;
1171        u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1172        int error = 0;
1173        bool log = !a[OVS_FLOW_ATTR_PROBE];
1174        bool ufid_present;
1175
1176        ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1177        if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
1178                OVS_NLERR(log,
1179                          "Flow set message rejected, Key attribute missing.");
1180                return -EINVAL;
1181        }
1182
1183        error = ovs_nla_init_match_and_action(net, &match, &key, a,
1184                                              &acts, log);
1185        if (error)
1186                goto error;
1187
1188        if (acts) {
1189                /* Can allocate before locking if have acts. */
1190                reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1191                                                ufid_flags);
1192                if (IS_ERR(reply)) {
1193                        error = PTR_ERR(reply);
1194                        goto err_kfree_acts;
1195                }
1196        }
1197
1198        ovs_lock();
1199        dp = get_dp(net, ovs_header->dp_ifindex);
1200        if (unlikely(!dp)) {
1201                error = -ENODEV;
1202                goto err_unlock_ovs;
1203        }
1204        /* Check that the flow exists. */
1205        if (ufid_present)
1206                flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1207        else
1208                flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1209        if (unlikely(!flow)) {
1210                error = -ENOENT;
1211                goto err_unlock_ovs;
1212        }
1213
1214        /* Update actions, if present. */
1215        if (likely(acts)) {
1216                old_acts = ovsl_dereference(flow->sf_acts);
1217                rcu_assign_pointer(flow->sf_acts, acts);
1218
1219                if (unlikely(reply)) {
1220                        error = ovs_flow_cmd_fill_info(flow,
1221                                                       ovs_header->dp_ifindex,
1222                                                       reply, info->snd_portid,
1223                                                       info->snd_seq, 0,
1224                                                       OVS_FLOW_CMD_SET,
1225                                                       ufid_flags);
1226                        BUG_ON(error < 0);
1227                }
1228        } else {
1229                /* Could not alloc without acts before locking. */
1230                reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1231                                                info, OVS_FLOW_CMD_SET, false,
1232                                                ufid_flags);
1233
1234                if (IS_ERR(reply)) {
1235                        error = PTR_ERR(reply);
1236                        goto err_unlock_ovs;
1237                }
1238        }
1239
1240        /* Clear stats. */
1241        if (a[OVS_FLOW_ATTR_CLEAR])
1242                ovs_flow_stats_clear(flow);
1243        ovs_unlock();
1244
1245        if (reply)
1246                ovs_notify(&dp_flow_genl_family, reply, info);
1247        if (old_acts)
1248                ovs_nla_free_flow_actions_rcu(old_acts);
1249
1250        return 0;
1251
1252err_unlock_ovs:
1253        ovs_unlock();
1254        kfree_skb(reply);
1255err_kfree_acts:
1256        ovs_nla_free_flow_actions(acts);
1257error:
1258        return error;
1259}
1260
1261static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1262{
1263        struct nlattr **a = info->attrs;
1264        struct ovs_header *ovs_header = info->userhdr;
1265        struct net *net = sock_net(skb->sk);
1266        struct sw_flow_key key;
1267        struct sk_buff *reply;
1268        struct sw_flow *flow;
1269        struct datapath *dp;
1270        struct sw_flow_match match;
1271        struct sw_flow_id ufid;
1272        u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1273        int err = 0;
1274        bool log = !a[OVS_FLOW_ATTR_PROBE];
1275        bool ufid_present;
1276
1277        ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1278        if (a[OVS_FLOW_ATTR_KEY]) {
1279                ovs_match_init(&match, &key, true, NULL);
1280                err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1281                                        log);
1282        } else if (!ufid_present) {
1283                OVS_NLERR(log,
1284                          "Flow get message rejected, Key attribute missing.");
1285                err = -EINVAL;
1286        }
1287        if (err)
1288                return err;
1289
1290        ovs_lock();
1291        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1292        if (!dp) {
1293                err = -ENODEV;
1294                goto unlock;
1295        }
1296
1297        if (ufid_present)
1298                flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1299        else
1300                flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1301        if (!flow) {
1302                err = -ENOENT;
1303                goto unlock;
1304        }
1305
1306        reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1307                                        OVS_FLOW_CMD_GET, true, ufid_flags);
1308        if (IS_ERR(reply)) {
1309                err = PTR_ERR(reply);
1310                goto unlock;
1311        }
1312
1313        ovs_unlock();
1314        return genlmsg_reply(reply, info);
1315unlock:
1316        ovs_unlock();
1317        return err;
1318}
1319
1320static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1321{
1322        struct nlattr **a = info->attrs;
1323        struct ovs_header *ovs_header = info->userhdr;
1324        struct net *net = sock_net(skb->sk);
1325        struct sw_flow_key key;
1326        struct sk_buff *reply;
1327        struct sw_flow *flow = NULL;
1328        struct datapath *dp;
1329        struct sw_flow_match match;
1330        struct sw_flow_id ufid;
1331        u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1332        int err;
1333        bool log = !a[OVS_FLOW_ATTR_PROBE];
1334        bool ufid_present;
1335
1336        ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1337        if (a[OVS_FLOW_ATTR_KEY]) {
1338                ovs_match_init(&match, &key, true, NULL);
1339                err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1340                                        NULL, log);
1341                if (unlikely(err))
1342                        return err;
1343        }
1344
1345        ovs_lock();
1346        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1347        if (unlikely(!dp)) {
1348                err = -ENODEV;
1349                goto unlock;
1350        }
1351
1352        if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1353                err = ovs_flow_tbl_flush(&dp->table);
1354                goto unlock;
1355        }
1356
1357        if (ufid_present)
1358                flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1359        else
1360                flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1361        if (unlikely(!flow)) {
1362                err = -ENOENT;
1363                goto unlock;
1364        }
1365
1366        ovs_flow_tbl_remove(&dp->table, flow);
1367        ovs_unlock();
1368
1369        reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1370                                        &flow->id, info, false, ufid_flags);
1371        if (likely(reply)) {
1372                if (!IS_ERR(reply)) {
1373                        rcu_read_lock();        /*To keep RCU checker happy. */
1374                        err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1375                                                     reply, info->snd_portid,
1376                                                     info->snd_seq, 0,
1377                                                     OVS_FLOW_CMD_DEL,
1378                                                     ufid_flags);
1379                        rcu_read_unlock();
1380                        if (WARN_ON_ONCE(err < 0)) {
1381                                kfree_skb(reply);
1382                                goto out_free;
1383                        }
1384
1385                        ovs_notify(&dp_flow_genl_family, reply, info);
1386                } else {
1387                        netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0,
1388                                        PTR_ERR(reply));
1389                }
1390        }
1391
1392out_free:
1393        ovs_flow_free(flow, true);
1394        return 0;
1395unlock:
1396        ovs_unlock();
1397        return err;
1398}
1399
1400static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1401{
1402        struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1403        struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1404        struct table_instance *ti;
1405        struct datapath *dp;
1406        u32 ufid_flags;
1407        int err;
1408
1409        err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a,
1410                                       OVS_FLOW_ATTR_MAX, flow_policy, NULL);
1411        if (err)
1412                return err;
1413        ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1414
1415        rcu_read_lock();
1416        dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1417        if (!dp) {
1418                rcu_read_unlock();
1419                return -ENODEV;
1420        }
1421
1422        ti = rcu_dereference(dp->table.ti);
1423        for (;;) {
1424                struct sw_flow *flow;
1425                u32 bucket, obj;
1426
1427                bucket = cb->args[0];
1428                obj = cb->args[1];
1429                flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1430                if (!flow)
1431                        break;
1432
1433                if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1434                                           NETLINK_CB(cb->skb).portid,
1435                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
1436                                           OVS_FLOW_CMD_GET, ufid_flags) < 0)
1437                        break;
1438
1439                cb->args[0] = bucket;
1440                cb->args[1] = obj;
1441        }
1442        rcu_read_unlock();
1443        return skb->len;
1444}
1445
1446static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1447        [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1448        [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1449        [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1450        [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1451        [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1452        [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1453        [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1454};
1455
1456static const struct genl_small_ops dp_flow_genl_ops[] = {
1457        { .cmd = OVS_FLOW_CMD_NEW,
1458          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1459          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1460          .doit = ovs_flow_cmd_new
1461        },
1462        { .cmd = OVS_FLOW_CMD_DEL,
1463          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1464          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1465          .doit = ovs_flow_cmd_del
1466        },
1467        { .cmd = OVS_FLOW_CMD_GET,
1468          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1469          .flags = 0,               /* OK for unprivileged users. */
1470          .doit = ovs_flow_cmd_get,
1471          .dumpit = ovs_flow_cmd_dump
1472        },
1473        { .cmd = OVS_FLOW_CMD_SET,
1474          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1475          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1476          .doit = ovs_flow_cmd_set,
1477        },
1478};
1479
1480static struct genl_family dp_flow_genl_family __ro_after_init = {
1481        .hdrsize = sizeof(struct ovs_header),
1482        .name = OVS_FLOW_FAMILY,
1483        .version = OVS_FLOW_VERSION,
1484        .maxattr = OVS_FLOW_ATTR_MAX,
1485        .policy = flow_policy,
1486        .netnsok = true,
1487        .parallel_ops = true,
1488        .small_ops = dp_flow_genl_ops,
1489        .n_small_ops = ARRAY_SIZE(dp_flow_genl_ops),
1490        .mcgrps = &ovs_dp_flow_multicast_group,
1491        .n_mcgrps = 1,
1492        .module = THIS_MODULE,
1493};
1494
1495static size_t ovs_dp_cmd_msg_size(void)
1496{
1497        size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1498
1499        msgsize += nla_total_size(IFNAMSIZ);
1500        msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1501        msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1502        msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1503        msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */
1504
1505        return msgsize;
1506}
1507
1508/* Called with ovs_mutex. */
1509static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1510                                u32 portid, u32 seq, u32 flags, u8 cmd)
1511{
1512        struct ovs_header *ovs_header;
1513        struct ovs_dp_stats dp_stats;
1514        struct ovs_dp_megaflow_stats dp_megaflow_stats;
1515        int err;
1516
1517        ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1518                                 flags, cmd);
1519        if (!ovs_header)
1520                goto error;
1521
1522        ovs_header->dp_ifindex = get_dpifindex(dp);
1523
1524        err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1525        if (err)
1526                goto nla_put_failure;
1527
1528        get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1529        if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1530                          &dp_stats, OVS_DP_ATTR_PAD))
1531                goto nla_put_failure;
1532
1533        if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1534                          sizeof(struct ovs_dp_megaflow_stats),
1535                          &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1536                goto nla_put_failure;
1537
1538        if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1539                goto nla_put_failure;
1540
1541        if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE,
1542                        ovs_flow_tbl_masks_cache_size(&dp->table)))
1543                goto nla_put_failure;
1544
1545        genlmsg_end(skb, ovs_header);
1546        return 0;
1547
1548nla_put_failure:
1549        genlmsg_cancel(skb, ovs_header);
1550error:
1551        return -EMSGSIZE;
1552}
1553
1554static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1555{
1556        return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1557}
1558
1559/* Called with rcu_read_lock or ovs_mutex. */
1560static struct datapath *lookup_datapath(struct net *net,
1561                                        const struct ovs_header *ovs_header,
1562                                        struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1563{
1564        struct datapath *dp;
1565
1566        if (!a[OVS_DP_ATTR_NAME])
1567                dp = get_dp(net, ovs_header->dp_ifindex);
1568        else {
1569                struct vport *vport;
1570
1571                vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1572                dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1573        }
1574        return dp ? dp : ERR_PTR(-ENODEV);
1575}
1576
1577static void ovs_dp_reset_user_features(struct sk_buff *skb,
1578                                       struct genl_info *info)
1579{
1580        struct datapath *dp;
1581
1582        dp = lookup_datapath(sock_net(skb->sk), info->userhdr,
1583                             info->attrs);
1584        if (IS_ERR(dp))
1585                return;
1586
1587        WARN(dp->user_features, "Dropping previously announced user features\n");
1588        dp->user_features = 0;
1589}
1590
1591DEFINE_STATIC_KEY_FALSE(tc_recirc_sharing_support);
1592
1593static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1594{
1595        u32 user_features = 0;
1596
1597        if (a[OVS_DP_ATTR_USER_FEATURES]) {
1598                user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1599
1600                if (user_features & ~(OVS_DP_F_VPORT_PIDS |
1601                                      OVS_DP_F_UNALIGNED |
1602                                      OVS_DP_F_TC_RECIRC_SHARING))
1603                        return -EOPNOTSUPP;
1604
1605#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1606                if (user_features & OVS_DP_F_TC_RECIRC_SHARING)
1607                        return -EOPNOTSUPP;
1608#endif
1609        }
1610
1611        if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) {
1612                int err;
1613                u32 cache_size;
1614
1615                cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]);
1616                err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
1617                if (err)
1618                        return err;
1619        }
1620
1621        dp->user_features = user_features;
1622
1623        if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING)
1624                static_branch_enable(&tc_recirc_sharing_support);
1625        else
1626                static_branch_disable(&tc_recirc_sharing_support);
1627
1628        return 0;
1629}
1630
1631static int ovs_dp_stats_init(struct datapath *dp)
1632{
1633        dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1634        if (!dp->stats_percpu)
1635                return -ENOMEM;
1636
1637        return 0;
1638}
1639
1640static int ovs_dp_vport_init(struct datapath *dp)
1641{
1642        int i;
1643
1644        dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
1645                                  sizeof(struct hlist_head),
1646                                  GFP_KERNEL);
1647        if (!dp->ports)
1648                return -ENOMEM;
1649
1650        for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1651                INIT_HLIST_HEAD(&dp->ports[i]);
1652
1653        return 0;
1654}
1655
1656static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1657{
1658        struct nlattr **a = info->attrs;
1659        struct vport_parms parms;
1660        struct sk_buff *reply;
1661        struct datapath *dp;
1662        struct vport *vport;
1663        struct ovs_net *ovs_net;
1664        int err;
1665
1666        err = -EINVAL;
1667        if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1668                goto err;
1669
1670        reply = ovs_dp_cmd_alloc_info();
1671        if (!reply)
1672                return -ENOMEM;
1673
1674        err = -ENOMEM;
1675        dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1676        if (dp == NULL)
1677                goto err_destroy_reply;
1678
1679        ovs_dp_set_net(dp, sock_net(skb->sk));
1680
1681        /* Allocate table. */
1682        err = ovs_flow_tbl_init(&dp->table);
1683        if (err)
1684                goto err_destroy_dp;
1685
1686        err = ovs_dp_stats_init(dp);
1687        if (err)
1688                goto err_destroy_table;
1689
1690        err = ovs_dp_vport_init(dp);
1691        if (err)
1692                goto err_destroy_stats;
1693
1694        err = ovs_meters_init(dp);
1695        if (err)
1696                goto err_destroy_ports;
1697
1698        /* Set up our datapath device. */
1699        parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1700        parms.type = OVS_VPORT_TYPE_INTERNAL;
1701        parms.options = NULL;
1702        parms.dp = dp;
1703        parms.port_no = OVSP_LOCAL;
1704        parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1705
1706        /* So far only local changes have been made, now need the lock. */
1707        ovs_lock();
1708
1709        err = ovs_dp_change(dp, a);
1710        if (err)
1711                goto err_unlock_and_destroy_meters;
1712
1713        vport = new_vport(&parms);
1714        if (IS_ERR(vport)) {
1715                err = PTR_ERR(vport);
1716                if (err == -EBUSY)
1717                        err = -EEXIST;
1718
1719                if (err == -EEXIST) {
1720                        /* An outdated user space instance that does not understand
1721                         * the concept of user_features has attempted to create a new
1722                         * datapath and is likely to reuse it. Drop all user features.
1723                         */
1724                        if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1725                                ovs_dp_reset_user_features(skb, info);
1726                }
1727
1728                goto err_unlock_and_destroy_meters;
1729        }
1730
1731        err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1732                                   info->snd_seq, 0, OVS_DP_CMD_NEW);
1733        BUG_ON(err < 0);
1734
1735        ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1736        list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1737
1738        ovs_unlock();
1739
1740        ovs_notify(&dp_datapath_genl_family, reply, info);
1741        return 0;
1742
1743err_unlock_and_destroy_meters:
1744        ovs_unlock();
1745        ovs_meters_exit(dp);
1746err_destroy_ports:
1747        kfree(dp->ports);
1748err_destroy_stats:
1749        free_percpu(dp->stats_percpu);
1750err_destroy_table:
1751        ovs_flow_tbl_destroy(&dp->table);
1752err_destroy_dp:
1753        kfree(dp);
1754err_destroy_reply:
1755        kfree_skb(reply);
1756err:
1757        return err;
1758}
1759
1760/* Called with ovs_mutex. */
1761static void __dp_destroy(struct datapath *dp)
1762{
1763        struct flow_table *table = &dp->table;
1764        int i;
1765
1766        for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1767                struct vport *vport;
1768                struct hlist_node *n;
1769
1770                hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1771                        if (vport->port_no != OVSP_LOCAL)
1772                                ovs_dp_detach_port(vport);
1773        }
1774
1775        list_del_rcu(&dp->list_node);
1776
1777        /* OVSP_LOCAL is datapath internal port. We need to make sure that
1778         * all ports in datapath are destroyed first before freeing datapath.
1779         */
1780        ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1781
1782        /* Flush sw_flow in the tables. RCU cb only releases resource
1783         * such as dp, ports and tables. That may avoid some issues
1784         * such as RCU usage warning.
1785         */
1786        table_instance_flow_flush(table, ovsl_dereference(table->ti),
1787                                  ovsl_dereference(table->ufid_ti));
1788
1789        /* RCU destroy the ports, meters and flow tables. */
1790        call_rcu(&dp->rcu, destroy_dp_rcu);
1791}
1792
1793static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1794{
1795        struct sk_buff *reply;
1796        struct datapath *dp;
1797        int err;
1798
1799        reply = ovs_dp_cmd_alloc_info();
1800        if (!reply)
1801                return -ENOMEM;
1802
1803        ovs_lock();
1804        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1805        err = PTR_ERR(dp);
1806        if (IS_ERR(dp))
1807                goto err_unlock_free;
1808
1809        err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1810                                   info->snd_seq, 0, OVS_DP_CMD_DEL);
1811        BUG_ON(err < 0);
1812
1813        __dp_destroy(dp);
1814        ovs_unlock();
1815
1816        ovs_notify(&dp_datapath_genl_family, reply, info);
1817
1818        return 0;
1819
1820err_unlock_free:
1821        ovs_unlock();
1822        kfree_skb(reply);
1823        return err;
1824}
1825
1826static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1827{
1828        struct sk_buff *reply;
1829        struct datapath *dp;
1830        int err;
1831
1832        reply = ovs_dp_cmd_alloc_info();
1833        if (!reply)
1834                return -ENOMEM;
1835
1836        ovs_lock();
1837        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1838        err = PTR_ERR(dp);
1839        if (IS_ERR(dp))
1840                goto err_unlock_free;
1841
1842        err = ovs_dp_change(dp, info->attrs);
1843        if (err)
1844                goto err_unlock_free;
1845
1846        err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1847                                   info->snd_seq, 0, OVS_DP_CMD_SET);
1848        BUG_ON(err < 0);
1849
1850        ovs_unlock();
1851        ovs_notify(&dp_datapath_genl_family, reply, info);
1852
1853        return 0;
1854
1855err_unlock_free:
1856        ovs_unlock();
1857        kfree_skb(reply);
1858        return err;
1859}
1860
1861static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1862{
1863        struct sk_buff *reply;
1864        struct datapath *dp;
1865        int err;
1866
1867        reply = ovs_dp_cmd_alloc_info();
1868        if (!reply)
1869                return -ENOMEM;
1870
1871        ovs_lock();
1872        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1873        if (IS_ERR(dp)) {
1874                err = PTR_ERR(dp);
1875                goto err_unlock_free;
1876        }
1877        err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1878                                   info->snd_seq, 0, OVS_DP_CMD_GET);
1879        BUG_ON(err < 0);
1880        ovs_unlock();
1881
1882        return genlmsg_reply(reply, info);
1883
1884err_unlock_free:
1885        ovs_unlock();
1886        kfree_skb(reply);
1887        return err;
1888}
1889
1890static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1891{
1892        struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1893        struct datapath *dp;
1894        int skip = cb->args[0];
1895        int i = 0;
1896
1897        ovs_lock();
1898        list_for_each_entry(dp, &ovs_net->dps, list_node) {
1899                if (i >= skip &&
1900                    ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1901                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
1902                                         OVS_DP_CMD_GET) < 0)
1903                        break;
1904                i++;
1905        }
1906        ovs_unlock();
1907
1908        cb->args[0] = i;
1909
1910        return skb->len;
1911}
1912
1913static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1914        [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1915        [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1916        [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1917        [OVS_DP_ATTR_MASKS_CACHE_SIZE] =  NLA_POLICY_RANGE(NLA_U32, 0,
1918                PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
1919};
1920
1921static const struct genl_small_ops dp_datapath_genl_ops[] = {
1922        { .cmd = OVS_DP_CMD_NEW,
1923          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1924          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1925          .doit = ovs_dp_cmd_new
1926        },
1927        { .cmd = OVS_DP_CMD_DEL,
1928          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1929          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1930          .doit = ovs_dp_cmd_del
1931        },
1932        { .cmd = OVS_DP_CMD_GET,
1933          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1934          .flags = 0,               /* OK for unprivileged users. */
1935          .doit = ovs_dp_cmd_get,
1936          .dumpit = ovs_dp_cmd_dump
1937        },
1938        { .cmd = OVS_DP_CMD_SET,
1939          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1940          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1941          .doit = ovs_dp_cmd_set,
1942        },
1943};
1944
1945static struct genl_family dp_datapath_genl_family __ro_after_init = {
1946        .hdrsize = sizeof(struct ovs_header),
1947        .name = OVS_DATAPATH_FAMILY,
1948        .version = OVS_DATAPATH_VERSION,
1949        .maxattr = OVS_DP_ATTR_MAX,
1950        .policy = datapath_policy,
1951        .netnsok = true,
1952        .parallel_ops = true,
1953        .small_ops = dp_datapath_genl_ops,
1954        .n_small_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1955        .mcgrps = &ovs_dp_datapath_multicast_group,
1956        .n_mcgrps = 1,
1957        .module = THIS_MODULE,
1958};
1959
1960/* Called with ovs_mutex or RCU read lock. */
1961static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1962                                   struct net *net, u32 portid, u32 seq,
1963                                   u32 flags, u8 cmd, gfp_t gfp)
1964{
1965        struct ovs_header *ovs_header;
1966        struct ovs_vport_stats vport_stats;
1967        int err;
1968
1969        ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1970                                 flags, cmd);
1971        if (!ovs_header)
1972                return -EMSGSIZE;
1973
1974        ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1975
1976        if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1977            nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1978            nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1979                           ovs_vport_name(vport)) ||
1980            nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
1981                goto nla_put_failure;
1982
1983        if (!net_eq(net, dev_net(vport->dev))) {
1984                int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
1985
1986                if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
1987                        goto nla_put_failure;
1988        }
1989
1990        ovs_vport_get_stats(vport, &vport_stats);
1991        if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1992                          sizeof(struct ovs_vport_stats), &vport_stats,
1993                          OVS_VPORT_ATTR_PAD))
1994                goto nla_put_failure;
1995
1996        if (ovs_vport_get_upcall_portids(vport, skb))
1997                goto nla_put_failure;
1998
1999        err = ovs_vport_get_options(vport, skb);
2000        if (err == -EMSGSIZE)
2001                goto error;
2002
2003        genlmsg_end(skb, ovs_header);
2004        return 0;
2005
2006nla_put_failure:
2007        err = -EMSGSIZE;
2008error:
2009        genlmsg_cancel(skb, ovs_header);
2010        return err;
2011}
2012
2013static struct sk_buff *ovs_vport_cmd_alloc_info(void)
2014{
2015        return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2016}
2017
2018/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
2019struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
2020                                         u32 portid, u32 seq, u8 cmd)
2021{
2022        struct sk_buff *skb;
2023        int retval;
2024
2025        skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2026        if (!skb)
2027                return ERR_PTR(-ENOMEM);
2028
2029        retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
2030                                         GFP_KERNEL);
2031        BUG_ON(retval < 0);
2032
2033        return skb;
2034}
2035
2036/* Called with ovs_mutex or RCU read lock. */
2037static struct vport *lookup_vport(struct net *net,
2038                                  const struct ovs_header *ovs_header,
2039                                  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
2040{
2041        struct datapath *dp;
2042        struct vport *vport;
2043
2044        if (a[OVS_VPORT_ATTR_IFINDEX])
2045                return ERR_PTR(-EOPNOTSUPP);
2046        if (a[OVS_VPORT_ATTR_NAME]) {
2047                vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
2048                if (!vport)
2049                        return ERR_PTR(-ENODEV);
2050                if (ovs_header->dp_ifindex &&
2051                    ovs_header->dp_ifindex != get_dpifindex(vport->dp))
2052                        return ERR_PTR(-ENODEV);
2053                return vport;
2054        } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
2055                u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
2056
2057                if (port_no >= DP_MAX_PORTS)
2058                        return ERR_PTR(-EFBIG);
2059
2060                dp = get_dp(net, ovs_header->dp_ifindex);
2061                if (!dp)
2062                        return ERR_PTR(-ENODEV);
2063
2064                vport = ovs_vport_ovsl_rcu(dp, port_no);
2065                if (!vport)
2066                        return ERR_PTR(-ENODEV);
2067                return vport;
2068        } else
2069                return ERR_PTR(-EINVAL);
2070
2071}
2072
2073static unsigned int ovs_get_max_headroom(struct datapath *dp)
2074{
2075        unsigned int dev_headroom, max_headroom = 0;
2076        struct net_device *dev;
2077        struct vport *vport;
2078        int i;
2079
2080        for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2081                hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
2082                                         lockdep_ovsl_is_held()) {
2083                        dev = vport->dev;
2084                        dev_headroom = netdev_get_fwd_headroom(dev);
2085                        if (dev_headroom > max_headroom)
2086                                max_headroom = dev_headroom;
2087                }
2088        }
2089
2090        return max_headroom;
2091}
2092
2093/* Called with ovs_mutex */
2094static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
2095{
2096        struct vport *vport;
2097        int i;
2098
2099        dp->max_headroom = new_headroom;
2100        for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2101                hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
2102                                         lockdep_ovsl_is_held())
2103                        netdev_set_rx_headroom(vport->dev, new_headroom);
2104        }
2105}
2106
2107static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
2108{
2109        struct nlattr **a = info->attrs;
2110        struct ovs_header *ovs_header = info->userhdr;
2111        struct vport_parms parms;
2112        struct sk_buff *reply;
2113        struct vport *vport;
2114        struct datapath *dp;
2115        unsigned int new_headroom;
2116        u32 port_no;
2117        int err;
2118
2119        if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2120            !a[OVS_VPORT_ATTR_UPCALL_PID])
2121                return -EINVAL;
2122        if (a[OVS_VPORT_ATTR_IFINDEX])
2123                return -EOPNOTSUPP;
2124
2125        port_no = a[OVS_VPORT_ATTR_PORT_NO]
2126                ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2127        if (port_no >= DP_MAX_PORTS)
2128                return -EFBIG;
2129
2130        reply = ovs_vport_cmd_alloc_info();
2131        if (!reply)
2132                return -ENOMEM;
2133
2134        ovs_lock();
2135restart:
2136        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2137        err = -ENODEV;
2138        if (!dp)
2139                goto exit_unlock_free;
2140
2141        if (port_no) {
2142                vport = ovs_vport_ovsl(dp, port_no);
2143                err = -EBUSY;
2144                if (vport)
2145                        goto exit_unlock_free;
2146        } else {
2147                for (port_no = 1; ; port_no++) {
2148                        if (port_no >= DP_MAX_PORTS) {
2149                                err = -EFBIG;
2150                                goto exit_unlock_free;
2151                        }
2152                        vport = ovs_vport_ovsl(dp, port_no);
2153                        if (!vport)
2154                                break;
2155                }
2156        }
2157
2158        parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2159        parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2160        parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2161        parms.dp = dp;
2162        parms.port_no = port_no;
2163        parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2164
2165        vport = new_vport(&parms);
2166        err = PTR_ERR(vport);
2167        if (IS_ERR(vport)) {
2168                if (err == -EAGAIN)
2169                        goto restart;
2170                goto exit_unlock_free;
2171        }
2172
2173        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2174                                      info->snd_portid, info->snd_seq, 0,
2175                                      OVS_VPORT_CMD_NEW, GFP_KERNEL);
2176
2177        new_headroom = netdev_get_fwd_headroom(vport->dev);
2178
2179        if (new_headroom > dp->max_headroom)
2180                ovs_update_headroom(dp, new_headroom);
2181        else
2182                netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2183
2184        BUG_ON(err < 0);
2185        ovs_unlock();
2186
2187        ovs_notify(&dp_vport_genl_family, reply, info);
2188        return 0;
2189
2190exit_unlock_free:
2191        ovs_unlock();
2192        kfree_skb(reply);
2193        return err;
2194}
2195
2196static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2197{
2198        struct nlattr **a = info->attrs;
2199        struct sk_buff *reply;
2200        struct vport *vport;
2201        int err;
2202
2203        reply = ovs_vport_cmd_alloc_info();
2204        if (!reply)
2205                return -ENOMEM;
2206
2207        ovs_lock();
2208        vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2209        err = PTR_ERR(vport);
2210        if (IS_ERR(vport))
2211                goto exit_unlock_free;
2212
2213        if (a[OVS_VPORT_ATTR_TYPE] &&
2214            nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2215                err = -EINVAL;
2216                goto exit_unlock_free;
2217        }
2218
2219        if (a[OVS_VPORT_ATTR_OPTIONS]) {
2220                err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2221                if (err)
2222                        goto exit_unlock_free;
2223        }
2224
2225
2226        if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2227                struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2228
2229                err = ovs_vport_set_upcall_portids(vport, ids);
2230                if (err)
2231                        goto exit_unlock_free;
2232        }
2233
2234        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2235                                      info->snd_portid, info->snd_seq, 0,
2236                                      OVS_VPORT_CMD_SET, GFP_KERNEL);
2237        BUG_ON(err < 0);
2238
2239        ovs_unlock();
2240        ovs_notify(&dp_vport_genl_family, reply, info);
2241        return 0;
2242
2243exit_unlock_free:
2244        ovs_unlock();
2245        kfree_skb(reply);
2246        return err;
2247}
2248
2249static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2250{
2251        bool update_headroom = false;
2252        struct nlattr **a = info->attrs;
2253        struct sk_buff *reply;
2254        struct datapath *dp;
2255        struct vport *vport;
2256        unsigned int new_headroom;
2257        int err;
2258
2259        reply = ovs_vport_cmd_alloc_info();
2260        if (!reply)
2261                return -ENOMEM;
2262
2263        ovs_lock();
2264        vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2265        err = PTR_ERR(vport);
2266        if (IS_ERR(vport))
2267                goto exit_unlock_free;
2268
2269        if (vport->port_no == OVSP_LOCAL) {
2270                err = -EINVAL;
2271                goto exit_unlock_free;
2272        }
2273
2274        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2275                                      info->snd_portid, info->snd_seq, 0,
2276                                      OVS_VPORT_CMD_DEL, GFP_KERNEL);
2277        BUG_ON(err < 0);
2278
2279        /* the vport deletion may trigger dp headroom update */
2280        dp = vport->dp;
2281        if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2282                update_headroom = true;
2283
2284        netdev_reset_rx_headroom(vport->dev);
2285        ovs_dp_detach_port(vport);
2286
2287        if (update_headroom) {
2288                new_headroom = ovs_get_max_headroom(dp);
2289
2290                if (new_headroom < dp->max_headroom)
2291                        ovs_update_headroom(dp, new_headroom);
2292        }
2293        ovs_unlock();
2294
2295        ovs_notify(&dp_vport_genl_family, reply, info);
2296        return 0;
2297
2298exit_unlock_free:
2299        ovs_unlock();
2300        kfree_skb(reply);
2301        return err;
2302}
2303
2304static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2305{
2306        struct nlattr **a = info->attrs;
2307        struct ovs_header *ovs_header = info->userhdr;
2308        struct sk_buff *reply;
2309        struct vport *vport;
2310        int err;
2311
2312        reply = ovs_vport_cmd_alloc_info();
2313        if (!reply)
2314                return -ENOMEM;
2315
2316        rcu_read_lock();
2317        vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2318        err = PTR_ERR(vport);
2319        if (IS_ERR(vport))
2320                goto exit_unlock_free;
2321        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2322                                      info->snd_portid, info->snd_seq, 0,
2323                                      OVS_VPORT_CMD_GET, GFP_ATOMIC);
2324        BUG_ON(err < 0);
2325        rcu_read_unlock();
2326
2327        return genlmsg_reply(reply, info);
2328
2329exit_unlock_free:
2330        rcu_read_unlock();
2331        kfree_skb(reply);
2332        return err;
2333}
2334
2335static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2336{
2337        struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2338        struct datapath *dp;
2339        int bucket = cb->args[0], skip = cb->args[1];
2340        int i, j = 0;
2341
2342        rcu_read_lock();
2343        dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2344        if (!dp) {
2345                rcu_read_unlock();
2346                return -ENODEV;
2347        }
2348        for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2349                struct vport *vport;
2350
2351                j = 0;
2352                hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2353                        if (j >= skip &&
2354                            ovs_vport_cmd_fill_info(vport, skb,
2355                                                    sock_net(skb->sk),
2356                                                    NETLINK_CB(cb->skb).portid,
2357                                                    cb->nlh->nlmsg_seq,
2358                                                    NLM_F_MULTI,
2359                                                    OVS_VPORT_CMD_GET,
2360                                                    GFP_ATOMIC) < 0)
2361                                goto out;
2362
2363                        j++;
2364                }
2365                skip = 0;
2366        }
2367out:
2368        rcu_read_unlock();
2369
2370        cb->args[0] = i;
2371        cb->args[1] = j;
2372
2373        return skb->len;
2374}
2375
2376static void ovs_dp_masks_rebalance(struct work_struct *work)
2377{
2378        struct ovs_net *ovs_net = container_of(work, struct ovs_net,
2379                                               masks_rebalance.work);
2380        struct datapath *dp;
2381
2382        ovs_lock();
2383
2384        list_for_each_entry(dp, &ovs_net->dps, list_node)
2385                ovs_flow_masks_rebalance(&dp->table);
2386
2387        ovs_unlock();
2388
2389        schedule_delayed_work(&ovs_net->masks_rebalance,
2390                              msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
2391}
2392
2393static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2394        [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2395        [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2396        [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2397        [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2398        [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
2399        [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2400        [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
2401        [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
2402};
2403
2404static const struct genl_small_ops dp_vport_genl_ops[] = {
2405        { .cmd = OVS_VPORT_CMD_NEW,
2406          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2407          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2408          .doit = ovs_vport_cmd_new
2409        },
2410        { .cmd = OVS_VPORT_CMD_DEL,
2411          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2412          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2413          .doit = ovs_vport_cmd_del
2414        },
2415        { .cmd = OVS_VPORT_CMD_GET,
2416          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2417          .flags = 0,               /* OK for unprivileged users. */
2418          .doit = ovs_vport_cmd_get,
2419          .dumpit = ovs_vport_cmd_dump
2420        },
2421        { .cmd = OVS_VPORT_CMD_SET,
2422          .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2423          .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2424          .doit = ovs_vport_cmd_set,
2425        },
2426};
2427
2428struct genl_family dp_vport_genl_family __ro_after_init = {
2429        .hdrsize = sizeof(struct ovs_header),
2430        .name = OVS_VPORT_FAMILY,
2431        .version = OVS_VPORT_VERSION,
2432        .maxattr = OVS_VPORT_ATTR_MAX,
2433        .policy = vport_policy,
2434        .netnsok = true,
2435        .parallel_ops = true,
2436        .small_ops = dp_vport_genl_ops,
2437        .n_small_ops = ARRAY_SIZE(dp_vport_genl_ops),
2438        .mcgrps = &ovs_dp_vport_multicast_group,
2439        .n_mcgrps = 1,
2440        .module = THIS_MODULE,
2441};
2442
2443static struct genl_family * const dp_genl_families[] = {
2444        &dp_datapath_genl_family,
2445        &dp_vport_genl_family,
2446        &dp_flow_genl_family,
2447        &dp_packet_genl_family,
2448        &dp_meter_genl_family,
2449#if     IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2450        &dp_ct_limit_genl_family,
2451#endif
2452};
2453
2454static void dp_unregister_genl(int n_families)
2455{
2456        int i;
2457
2458        for (i = 0; i < n_families; i++)
2459                genl_unregister_family(dp_genl_families[i]);
2460}
2461
2462static int __init dp_register_genl(void)
2463{
2464        int err;
2465        int i;
2466
2467        for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2468
2469                err = genl_register_family(dp_genl_families[i]);
2470                if (err)
2471                        goto error;
2472        }
2473
2474        return 0;
2475
2476error:
2477        dp_unregister_genl(i);
2478        return err;
2479}
2480
2481static int __net_init ovs_init_net(struct net *net)
2482{
2483        struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2484        int err;
2485
2486        INIT_LIST_HEAD(&ovs_net->dps);
2487        INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2488        INIT_DELAYED_WORK(&ovs_net->masks_rebalance, ovs_dp_masks_rebalance);
2489
2490        err = ovs_ct_init(net);
2491        if (err)
2492                return err;
2493
2494        schedule_delayed_work(&ovs_net->masks_rebalance,
2495                              msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
2496        return 0;
2497}
2498
2499static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2500                                            struct list_head *head)
2501{
2502        struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2503        struct datapath *dp;
2504
2505        list_for_each_entry(dp, &ovs_net->dps, list_node) {
2506                int i;
2507
2508                for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2509                        struct vport *vport;
2510
2511                        hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2512                                if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2513                                        continue;
2514
2515                                if (dev_net(vport->dev) == dnet)
2516                                        list_add(&vport->detach_list, head);
2517                        }
2518                }
2519        }
2520}
2521
2522static void __net_exit ovs_exit_net(struct net *dnet)
2523{
2524        struct datapath *dp, *dp_next;
2525        struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2526        struct vport *vport, *vport_next;
2527        struct net *net;
2528        LIST_HEAD(head);
2529
2530        ovs_lock();
2531
2532        ovs_ct_exit(dnet);
2533
2534        list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2535                __dp_destroy(dp);
2536
2537        down_read(&net_rwsem);
2538        for_each_net(net)
2539                list_vports_from_net(net, dnet, &head);
2540        up_read(&net_rwsem);
2541
2542        /* Detach all vports from given namespace. */
2543        list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2544                list_del(&vport->detach_list);
2545                ovs_dp_detach_port(vport);
2546        }
2547
2548        ovs_unlock();
2549
2550        cancel_delayed_work_sync(&ovs_net->masks_rebalance);
2551        cancel_work_sync(&ovs_net->dp_notify_work);
2552}
2553
2554static struct pernet_operations ovs_net_ops = {
2555        .init = ovs_init_net,
2556        .exit = ovs_exit_net,
2557        .id   = &ovs_net_id,
2558        .size = sizeof(struct ovs_net),
2559};
2560
2561static int __init dp_init(void)
2562{
2563        int err;
2564
2565        BUILD_BUG_ON(sizeof(struct ovs_skb_cb) >
2566                     sizeof_field(struct sk_buff, cb));
2567
2568        pr_info("Open vSwitch switching datapath\n");
2569
2570        err = action_fifos_init();
2571        if (err)
2572                goto error;
2573
2574        err = ovs_internal_dev_rtnl_link_register();
2575        if (err)
2576                goto error_action_fifos_exit;
2577
2578        err = ovs_flow_init();
2579        if (err)
2580                goto error_unreg_rtnl_link;
2581
2582        err = ovs_vport_init();
2583        if (err)
2584                goto error_flow_exit;
2585
2586        err = register_pernet_device(&ovs_net_ops);
2587        if (err)
2588                goto error_vport_exit;
2589
2590        err = register_netdevice_notifier(&ovs_dp_device_notifier);
2591        if (err)
2592                goto error_netns_exit;
2593
2594        err = ovs_netdev_init();
2595        if (err)
2596                goto error_unreg_notifier;
2597
2598        err = dp_register_genl();
2599        if (err < 0)
2600                goto error_unreg_netdev;
2601
2602        return 0;
2603
2604error_unreg_netdev:
2605        ovs_netdev_exit();
2606error_unreg_notifier:
2607        unregister_netdevice_notifier(&ovs_dp_device_notifier);
2608error_netns_exit:
2609        unregister_pernet_device(&ovs_net_ops);
2610error_vport_exit:
2611        ovs_vport_exit();
2612error_flow_exit:
2613        ovs_flow_exit();
2614error_unreg_rtnl_link:
2615        ovs_internal_dev_rtnl_link_unregister();
2616error_action_fifos_exit:
2617        action_fifos_exit();
2618error:
2619        return err;
2620}
2621
2622static void dp_cleanup(void)
2623{
2624        dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2625        ovs_netdev_exit();
2626        unregister_netdevice_notifier(&ovs_dp_device_notifier);
2627        unregister_pernet_device(&ovs_net_ops);
2628        rcu_barrier();
2629        ovs_vport_exit();
2630        ovs_flow_exit();
2631        ovs_internal_dev_rtnl_link_unregister();
2632        action_fifos_exit();
2633}
2634
2635module_init(dp_init);
2636module_exit(dp_cleanup);
2637
2638MODULE_DESCRIPTION("Open vSwitch switching datapath");
2639MODULE_LICENSE("GPL");
2640MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2641MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2642MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2643MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
2644MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
2645MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY);
2646