linux/net/openvswitch/actions.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2007-2017 Nicira, Inc.
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/skbuff.h>
   9#include <linux/in.h>
  10#include <linux/ip.h>
  11#include <linux/openvswitch.h>
  12#include <linux/netfilter_ipv6.h>
  13#include <linux/sctp.h>
  14#include <linux/tcp.h>
  15#include <linux/udp.h>
  16#include <linux/in6.h>
  17#include <linux/if_arp.h>
  18#include <linux/if_vlan.h>
  19
  20#include <net/dst.h>
  21#include <net/ip.h>
  22#include <net/ipv6.h>
  23#include <net/ip6_fib.h>
  24#include <net/checksum.h>
  25#include <net/dsfield.h>
  26#include <net/mpls.h>
  27#include <net/sctp/checksum.h>
  28
  29#include "datapath.h"
  30#include "flow.h"
  31#include "conntrack.h"
  32#include "vport.h"
  33#include "flow_netlink.h"
  34
  35struct deferred_action {
  36        struct sk_buff *skb;
  37        const struct nlattr *actions;
  38        int actions_len;
  39
  40        /* Store pkt_key clone when creating deferred action. */
  41        struct sw_flow_key pkt_key;
  42};
  43
  44#define MAX_L2_LEN      (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
  45struct ovs_frag_data {
  46        unsigned long dst;
  47        struct vport *vport;
  48        struct ovs_skb_cb cb;
  49        __be16 inner_protocol;
  50        u16 network_offset;     /* valid only for MPLS */
  51        u16 vlan_tci;
  52        __be16 vlan_proto;
  53        unsigned int l2_len;
  54        u8 mac_proto;
  55        u8 l2_data[MAX_L2_LEN];
  56};
  57
  58static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
  59
  60#define DEFERRED_ACTION_FIFO_SIZE 10
  61#define OVS_RECURSION_LIMIT 5
  62#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
  63struct action_fifo {
  64        int head;
  65        int tail;
  66        /* Deferred action fifo queue storage. */
  67        struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
  68};
  69
  70struct action_flow_keys {
  71        struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
  72};
  73
  74static struct action_fifo __percpu *action_fifos;
  75static struct action_flow_keys __percpu *flow_keys;
  76static DEFINE_PER_CPU(int, exec_actions_level);
  77
  78/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
  79 * space. Return NULL if out of key spaces.
  80 */
  81static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
  82{
  83        struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
  84        int level = this_cpu_read(exec_actions_level);
  85        struct sw_flow_key *key = NULL;
  86
  87        if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
  88                key = &keys->key[level - 1];
  89                *key = *key_;
  90        }
  91
  92        return key;
  93}
  94
  95static void action_fifo_init(struct action_fifo *fifo)
  96{
  97        fifo->head = 0;
  98        fifo->tail = 0;
  99}
 100
 101static bool action_fifo_is_empty(const struct action_fifo *fifo)
 102{
 103        return (fifo->head == fifo->tail);
 104}
 105
 106static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
 107{
 108        if (action_fifo_is_empty(fifo))
 109                return NULL;
 110
 111        return &fifo->fifo[fifo->tail++];
 112}
 113
 114static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
 115{
 116        if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
 117                return NULL;
 118
 119        return &fifo->fifo[fifo->head++];
 120}
 121
 122/* Return true if fifo is not full */
 123static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
 124                                    const struct sw_flow_key *key,
 125                                    const struct nlattr *actions,
 126                                    const int actions_len)
 127{
 128        struct action_fifo *fifo;
 129        struct deferred_action *da;
 130
 131        fifo = this_cpu_ptr(action_fifos);
 132        da = action_fifo_put(fifo);
 133        if (da) {
 134                da->skb = skb;
 135                da->actions = actions;
 136                da->actions_len = actions_len;
 137                da->pkt_key = *key;
 138        }
 139
 140        return da;
 141}
 142
 143static void invalidate_flow_key(struct sw_flow_key *key)
 144{
 145        key->mac_proto |= SW_FLOW_KEY_INVALID;
 146}
 147
 148static bool is_flow_key_valid(const struct sw_flow_key *key)
 149{
 150        return !(key->mac_proto & SW_FLOW_KEY_INVALID);
 151}
 152
 153static int clone_execute(struct datapath *dp, struct sk_buff *skb,
 154                         struct sw_flow_key *key,
 155                         u32 recirc_id,
 156                         const struct nlattr *actions, int len,
 157                         bool last, bool clone_flow_key);
 158
 159static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 160                              struct sw_flow_key *key,
 161                              const struct nlattr *attr, int len);
 162
 163static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 164                     const struct ovs_action_push_mpls *mpls)
 165{
 166        int err;
 167
 168        err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
 169                            skb->mac_len);
 170        if (err)
 171                return err;
 172
 173        invalidate_flow_key(key);
 174        return 0;
 175}
 176
 177static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 178                    const __be16 ethertype)
 179{
 180        int err;
 181
 182        err = skb_mpls_pop(skb, ethertype, skb->mac_len);
 183        if (err)
 184                return err;
 185
 186        invalidate_flow_key(key);
 187        return 0;
 188}
 189
 190static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
 191                    const __be32 *mpls_lse, const __be32 *mask)
 192{
 193        struct mpls_shim_hdr *stack;
 194        __be32 lse;
 195        int err;
 196
 197        stack = mpls_hdr(skb);
 198        lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
 199        err = skb_mpls_update_lse(skb, lse);
 200        if (err)
 201                return err;
 202
 203        flow_key->mpls.top_lse = lse;
 204        return 0;
 205}
 206
 207static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
 208{
 209        int err;
 210
 211        err = skb_vlan_pop(skb);
 212        if (skb_vlan_tag_present(skb)) {
 213                invalidate_flow_key(key);
 214        } else {
 215                key->eth.vlan.tci = 0;
 216                key->eth.vlan.tpid = 0;
 217        }
 218        return err;
 219}
 220
 221static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
 222                     const struct ovs_action_push_vlan *vlan)
 223{
 224        if (skb_vlan_tag_present(skb)) {
 225                invalidate_flow_key(key);
 226        } else {
 227                key->eth.vlan.tci = vlan->vlan_tci;
 228                key->eth.vlan.tpid = vlan->vlan_tpid;
 229        }
 230        return skb_vlan_push(skb, vlan->vlan_tpid,
 231                             ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
 232}
 233
 234/* 'src' is already properly masked. */
 235static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
 236{
 237        u16 *dst = (u16 *)dst_;
 238        const u16 *src = (const u16 *)src_;
 239        const u16 *mask = (const u16 *)mask_;
 240
 241        OVS_SET_MASKED(dst[0], src[0], mask[0]);
 242        OVS_SET_MASKED(dst[1], src[1], mask[1]);
 243        OVS_SET_MASKED(dst[2], src[2], mask[2]);
 244}
 245
 246static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
 247                        const struct ovs_key_ethernet *key,
 248                        const struct ovs_key_ethernet *mask)
 249{
 250        int err;
 251
 252        err = skb_ensure_writable(skb, ETH_HLEN);
 253        if (unlikely(err))
 254                return err;
 255
 256        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 257
 258        ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
 259                               mask->eth_src);
 260        ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
 261                               mask->eth_dst);
 262
 263        skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 264
 265        ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
 266        ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
 267        return 0;
 268}
 269
 270/* pop_eth does not support VLAN packets as this action is never called
 271 * for them.
 272 */
 273static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
 274{
 275        skb_pull_rcsum(skb, ETH_HLEN);
 276        skb_reset_mac_header(skb);
 277        skb_reset_mac_len(skb);
 278
 279        /* safe right before invalidate_flow_key */
 280        key->mac_proto = MAC_PROTO_NONE;
 281        invalidate_flow_key(key);
 282        return 0;
 283}
 284
 285static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
 286                    const struct ovs_action_push_eth *ethh)
 287{
 288        struct ethhdr *hdr;
 289
 290        /* Add the new Ethernet header */
 291        if (skb_cow_head(skb, ETH_HLEN) < 0)
 292                return -ENOMEM;
 293
 294        skb_push(skb, ETH_HLEN);
 295        skb_reset_mac_header(skb);
 296        skb_reset_mac_len(skb);
 297
 298        hdr = eth_hdr(skb);
 299        ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
 300        ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
 301        hdr->h_proto = skb->protocol;
 302
 303        skb_postpush_rcsum(skb, hdr, ETH_HLEN);
 304
 305        /* safe right before invalidate_flow_key */
 306        key->mac_proto = MAC_PROTO_ETHERNET;
 307        invalidate_flow_key(key);
 308        return 0;
 309}
 310
 311static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
 312                    const struct nshhdr *nh)
 313{
 314        int err;
 315
 316        err = nsh_push(skb, nh);
 317        if (err)
 318                return err;
 319
 320        /* safe right before invalidate_flow_key */
 321        key->mac_proto = MAC_PROTO_NONE;
 322        invalidate_flow_key(key);
 323        return 0;
 324}
 325
 326static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
 327{
 328        int err;
 329
 330        err = nsh_pop(skb);
 331        if (err)
 332                return err;
 333
 334        /* safe right before invalidate_flow_key */
 335        if (skb->protocol == htons(ETH_P_TEB))
 336                key->mac_proto = MAC_PROTO_ETHERNET;
 337        else
 338                key->mac_proto = MAC_PROTO_NONE;
 339        invalidate_flow_key(key);
 340        return 0;
 341}
 342
 343static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
 344                                  __be32 addr, __be32 new_addr)
 345{
 346        int transport_len = skb->len - skb_transport_offset(skb);
 347
 348        if (nh->frag_off & htons(IP_OFFSET))
 349                return;
 350
 351        if (nh->protocol == IPPROTO_TCP) {
 352                if (likely(transport_len >= sizeof(struct tcphdr)))
 353                        inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
 354                                                 addr, new_addr, true);
 355        } else if (nh->protocol == IPPROTO_UDP) {
 356                if (likely(transport_len >= sizeof(struct udphdr))) {
 357                        struct udphdr *uh = udp_hdr(skb);
 358
 359                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 360                                inet_proto_csum_replace4(&uh->check, skb,
 361                                                         addr, new_addr, true);
 362                                if (!uh->check)
 363                                        uh->check = CSUM_MANGLED_0;
 364                        }
 365                }
 366        }
 367}
 368
 369static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
 370                        __be32 *addr, __be32 new_addr)
 371{
 372        update_ip_l4_checksum(skb, nh, *addr, new_addr);
 373        csum_replace4(&nh->check, *addr, new_addr);
 374        skb_clear_hash(skb);
 375        *addr = new_addr;
 376}
 377
 378static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
 379                                 __be32 addr[4], const __be32 new_addr[4])
 380{
 381        int transport_len = skb->len - skb_transport_offset(skb);
 382
 383        if (l4_proto == NEXTHDR_TCP) {
 384                if (likely(transport_len >= sizeof(struct tcphdr)))
 385                        inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
 386                                                  addr, new_addr, true);
 387        } else if (l4_proto == NEXTHDR_UDP) {
 388                if (likely(transport_len >= sizeof(struct udphdr))) {
 389                        struct udphdr *uh = udp_hdr(skb);
 390
 391                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 392                                inet_proto_csum_replace16(&uh->check, skb,
 393                                                          addr, new_addr, true);
 394                                if (!uh->check)
 395                                        uh->check = CSUM_MANGLED_0;
 396                        }
 397                }
 398        } else if (l4_proto == NEXTHDR_ICMP) {
 399                if (likely(transport_len >= sizeof(struct icmp6hdr)))
 400                        inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
 401                                                  skb, addr, new_addr, true);
 402        }
 403}
 404
 405static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
 406                           const __be32 mask[4], __be32 masked[4])
 407{
 408        masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
 409        masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
 410        masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
 411        masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
 412}
 413
 414static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
 415                          __be32 addr[4], const __be32 new_addr[4],
 416                          bool recalculate_csum)
 417{
 418        if (recalculate_csum)
 419                update_ipv6_checksum(skb, l4_proto, addr, new_addr);
 420
 421        skb_clear_hash(skb);
 422        memcpy(addr, new_addr, sizeof(__be32[4]));
 423}
 424
 425static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
 426{
 427        /* Bits 21-24 are always unmasked, so this retains their values. */
 428        OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
 429        OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
 430        OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
 431}
 432
 433static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
 434                       u8 mask)
 435{
 436        new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
 437
 438        csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
 439        nh->ttl = new_ttl;
 440}
 441
 442static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
 443                    const struct ovs_key_ipv4 *key,
 444                    const struct ovs_key_ipv4 *mask)
 445{
 446        struct iphdr *nh;
 447        __be32 new_addr;
 448        int err;
 449
 450        err = skb_ensure_writable(skb, skb_network_offset(skb) +
 451                                  sizeof(struct iphdr));
 452        if (unlikely(err))
 453                return err;
 454
 455        nh = ip_hdr(skb);
 456
 457        /* Setting an IP addresses is typically only a side effect of
 458         * matching on them in the current userspace implementation, so it
 459         * makes sense to check if the value actually changed.
 460         */
 461        if (mask->ipv4_src) {
 462                new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
 463
 464                if (unlikely(new_addr != nh->saddr)) {
 465                        set_ip_addr(skb, nh, &nh->saddr, new_addr);
 466                        flow_key->ipv4.addr.src = new_addr;
 467                }
 468        }
 469        if (mask->ipv4_dst) {
 470                new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
 471
 472                if (unlikely(new_addr != nh->daddr)) {
 473                        set_ip_addr(skb, nh, &nh->daddr, new_addr);
 474                        flow_key->ipv4.addr.dst = new_addr;
 475                }
 476        }
 477        if (mask->ipv4_tos) {
 478                ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
 479                flow_key->ip.tos = nh->tos;
 480        }
 481        if (mask->ipv4_ttl) {
 482                set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
 483                flow_key->ip.ttl = nh->ttl;
 484        }
 485
 486        return 0;
 487}
 488
 489static bool is_ipv6_mask_nonzero(const __be32 addr[4])
 490{
 491        return !!(addr[0] | addr[1] | addr[2] | addr[3]);
 492}
 493
 494static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
 495                    const struct ovs_key_ipv6 *key,
 496                    const struct ovs_key_ipv6 *mask)
 497{
 498        struct ipv6hdr *nh;
 499        int err;
 500
 501        err = skb_ensure_writable(skb, skb_network_offset(skb) +
 502                                  sizeof(struct ipv6hdr));
 503        if (unlikely(err))
 504                return err;
 505
 506        nh = ipv6_hdr(skb);
 507
 508        /* Setting an IP addresses is typically only a side effect of
 509         * matching on them in the current userspace implementation, so it
 510         * makes sense to check if the value actually changed.
 511         */
 512        if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
 513                __be32 *saddr = (__be32 *)&nh->saddr;
 514                __be32 masked[4];
 515
 516                mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
 517
 518                if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
 519                        set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
 520                                      true);
 521                        memcpy(&flow_key->ipv6.addr.src, masked,
 522                               sizeof(flow_key->ipv6.addr.src));
 523                }
 524        }
 525        if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
 526                unsigned int offset = 0;
 527                int flags = IP6_FH_F_SKIP_RH;
 528                bool recalc_csum = true;
 529                __be32 *daddr = (__be32 *)&nh->daddr;
 530                __be32 masked[4];
 531
 532                mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
 533
 534                if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
 535                        if (ipv6_ext_hdr(nh->nexthdr))
 536                                recalc_csum = (ipv6_find_hdr(skb, &offset,
 537                                                             NEXTHDR_ROUTING,
 538                                                             NULL, &flags)
 539                                               != NEXTHDR_ROUTING);
 540
 541                        set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
 542                                      recalc_csum);
 543                        memcpy(&flow_key->ipv6.addr.dst, masked,
 544                               sizeof(flow_key->ipv6.addr.dst));
 545                }
 546        }
 547        if (mask->ipv6_tclass) {
 548                ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
 549                flow_key->ip.tos = ipv6_get_dsfield(nh);
 550        }
 551        if (mask->ipv6_label) {
 552                set_ipv6_fl(nh, ntohl(key->ipv6_label),
 553                            ntohl(mask->ipv6_label));
 554                flow_key->ipv6.label =
 555                    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
 556        }
 557        if (mask->ipv6_hlimit) {
 558                OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
 559                               mask->ipv6_hlimit);
 560                flow_key->ip.ttl = nh->hop_limit;
 561        }
 562        return 0;
 563}
 564
 565static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
 566                   const struct nlattr *a)
 567{
 568        struct nshhdr *nh;
 569        size_t length;
 570        int err;
 571        u8 flags;
 572        u8 ttl;
 573        int i;
 574
 575        struct ovs_key_nsh key;
 576        struct ovs_key_nsh mask;
 577
 578        err = nsh_key_from_nlattr(a, &key, &mask);
 579        if (err)
 580                return err;
 581
 582        /* Make sure the NSH base header is there */
 583        if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
 584                return -ENOMEM;
 585
 586        nh = nsh_hdr(skb);
 587        length = nsh_hdr_len(nh);
 588
 589        /* Make sure the whole NSH header is there */
 590        err = skb_ensure_writable(skb, skb_network_offset(skb) +
 591                                       length);
 592        if (unlikely(err))
 593                return err;
 594
 595        nh = nsh_hdr(skb);
 596        skb_postpull_rcsum(skb, nh, length);
 597        flags = nsh_get_flags(nh);
 598        flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
 599        flow_key->nsh.base.flags = flags;
 600        ttl = nsh_get_ttl(nh);
 601        ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
 602        flow_key->nsh.base.ttl = ttl;
 603        nsh_set_flags_and_ttl(nh, flags, ttl);
 604        nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
 605                                  mask.base.path_hdr);
 606        flow_key->nsh.base.path_hdr = nh->path_hdr;
 607        switch (nh->mdtype) {
 608        case NSH_M_TYPE1:
 609                for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
 610                        nh->md1.context[i] =
 611                            OVS_MASKED(nh->md1.context[i], key.context[i],
 612                                       mask.context[i]);
 613                }
 614                memcpy(flow_key->nsh.context, nh->md1.context,
 615                       sizeof(nh->md1.context));
 616                break;
 617        case NSH_M_TYPE2:
 618                memset(flow_key->nsh.context, 0,
 619                       sizeof(flow_key->nsh.context));
 620                break;
 621        default:
 622                return -EINVAL;
 623        }
 624        skb_postpush_rcsum(skb, nh, length);
 625        return 0;
 626}
 627
 628/* Must follow skb_ensure_writable() since that can move the skb data. */
 629static void set_tp_port(struct sk_buff *skb, __be16 *port,
 630                        __be16 new_port, __sum16 *check)
 631{
 632        inet_proto_csum_replace2(check, skb, *port, new_port, false);
 633        *port = new_port;
 634}
 635
 636static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 637                   const struct ovs_key_udp *key,
 638                   const struct ovs_key_udp *mask)
 639{
 640        struct udphdr *uh;
 641        __be16 src, dst;
 642        int err;
 643
 644        err = skb_ensure_writable(skb, skb_transport_offset(skb) +
 645                                  sizeof(struct udphdr));
 646        if (unlikely(err))
 647                return err;
 648
 649        uh = udp_hdr(skb);
 650        /* Either of the masks is non-zero, so do not bother checking them. */
 651        src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
 652        dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
 653
 654        if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
 655                if (likely(src != uh->source)) {
 656                        set_tp_port(skb, &uh->source, src, &uh->check);
 657                        flow_key->tp.src = src;
 658                }
 659                if (likely(dst != uh->dest)) {
 660                        set_tp_port(skb, &uh->dest, dst, &uh->check);
 661                        flow_key->tp.dst = dst;
 662                }
 663
 664                if (unlikely(!uh->check))
 665                        uh->check = CSUM_MANGLED_0;
 666        } else {
 667                uh->source = src;
 668                uh->dest = dst;
 669                flow_key->tp.src = src;
 670                flow_key->tp.dst = dst;
 671        }
 672
 673        skb_clear_hash(skb);
 674
 675        return 0;
 676}
 677
 678static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 679                   const struct ovs_key_tcp *key,
 680                   const struct ovs_key_tcp *mask)
 681{
 682        struct tcphdr *th;
 683        __be16 src, dst;
 684        int err;
 685
 686        err = skb_ensure_writable(skb, skb_transport_offset(skb) +
 687                                  sizeof(struct tcphdr));
 688        if (unlikely(err))
 689                return err;
 690
 691        th = tcp_hdr(skb);
 692        src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
 693        if (likely(src != th->source)) {
 694                set_tp_port(skb, &th->source, src, &th->check);
 695                flow_key->tp.src = src;
 696        }
 697        dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
 698        if (likely(dst != th->dest)) {
 699                set_tp_port(skb, &th->dest, dst, &th->check);
 700                flow_key->tp.dst = dst;
 701        }
 702        skb_clear_hash(skb);
 703
 704        return 0;
 705}
 706
 707static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 708                    const struct ovs_key_sctp *key,
 709                    const struct ovs_key_sctp *mask)
 710{
 711        unsigned int sctphoff = skb_transport_offset(skb);
 712        struct sctphdr *sh;
 713        __le32 old_correct_csum, new_csum, old_csum;
 714        int err;
 715
 716        err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
 717        if (unlikely(err))
 718                return err;
 719
 720        sh = sctp_hdr(skb);
 721        old_csum = sh->checksum;
 722        old_correct_csum = sctp_compute_cksum(skb, sctphoff);
 723
 724        sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
 725        sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
 726
 727        new_csum = sctp_compute_cksum(skb, sctphoff);
 728
 729        /* Carry any checksum errors through. */
 730        sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
 731
 732        skb_clear_hash(skb);
 733        flow_key->tp.src = sh->source;
 734        flow_key->tp.dst = sh->dest;
 735
 736        return 0;
 737}
 738
 739static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 740{
 741        struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
 742        struct vport *vport = data->vport;
 743
 744        if (skb_cow_head(skb, data->l2_len) < 0) {
 745                kfree_skb(skb);
 746                return -ENOMEM;
 747        }
 748
 749        __skb_dst_copy(skb, data->dst);
 750        *OVS_CB(skb) = data->cb;
 751        skb->inner_protocol = data->inner_protocol;
 752        if (data->vlan_tci & VLAN_CFI_MASK)
 753                __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
 754        else
 755                __vlan_hwaccel_clear_tag(skb);
 756
 757        /* Reconstruct the MAC header.  */
 758        skb_push(skb, data->l2_len);
 759        memcpy(skb->data, &data->l2_data, data->l2_len);
 760        skb_postpush_rcsum(skb, skb->data, data->l2_len);
 761        skb_reset_mac_header(skb);
 762
 763        if (eth_p_mpls(skb->protocol)) {
 764                skb->inner_network_header = skb->network_header;
 765                skb_set_network_header(skb, data->network_offset);
 766                skb_reset_mac_len(skb);
 767        }
 768
 769        ovs_vport_send(vport, skb, data->mac_proto);
 770        return 0;
 771}
 772
 773static unsigned int
 774ovs_dst_get_mtu(const struct dst_entry *dst)
 775{
 776        return dst->dev->mtu;
 777}
 778
 779static struct dst_ops ovs_dst_ops = {
 780        .family = AF_UNSPEC,
 781        .mtu = ovs_dst_get_mtu,
 782};
 783
 784/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
 785 * ovs_vport_output(), which is called once per fragmented packet.
 786 */
 787static void prepare_frag(struct vport *vport, struct sk_buff *skb,
 788                         u16 orig_network_offset, u8 mac_proto)
 789{
 790        unsigned int hlen = skb_network_offset(skb);
 791        struct ovs_frag_data *data;
 792
 793        data = this_cpu_ptr(&ovs_frag_data_storage);
 794        data->dst = skb->_skb_refdst;
 795        data->vport = vport;
 796        data->cb = *OVS_CB(skb);
 797        data->inner_protocol = skb->inner_protocol;
 798        data->network_offset = orig_network_offset;
 799        if (skb_vlan_tag_present(skb))
 800                data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
 801        else
 802                data->vlan_tci = 0;
 803        data->vlan_proto = skb->vlan_proto;
 804        data->mac_proto = mac_proto;
 805        data->l2_len = hlen;
 806        memcpy(&data->l2_data, skb->data, hlen);
 807
 808        memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
 809        skb_pull(skb, hlen);
 810}
 811
 812static void ovs_fragment(struct net *net, struct vport *vport,
 813                         struct sk_buff *skb, u16 mru,
 814                         struct sw_flow_key *key)
 815{
 816        u16 orig_network_offset = 0;
 817
 818        if (eth_p_mpls(skb->protocol)) {
 819                orig_network_offset = skb_network_offset(skb);
 820                skb->network_header = skb->inner_network_header;
 821        }
 822
 823        if (skb_network_offset(skb) > MAX_L2_LEN) {
 824                OVS_NLERR(1, "L2 header too long to fragment");
 825                goto err;
 826        }
 827
 828        if (key->eth.type == htons(ETH_P_IP)) {
 829                struct dst_entry ovs_dst;
 830                unsigned long orig_dst;
 831
 832                prepare_frag(vport, skb, orig_network_offset,
 833                             ovs_key_mac_proto(key));
 834                dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
 835                         DST_OBSOLETE_NONE, DST_NOCOUNT);
 836                ovs_dst.dev = vport->dev;
 837
 838                orig_dst = skb->_skb_refdst;
 839                skb_dst_set_noref(skb, &ovs_dst);
 840                IPCB(skb)->frag_max_size = mru;
 841
 842                ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
 843                refdst_drop(orig_dst);
 844        } else if (key->eth.type == htons(ETH_P_IPV6)) {
 845                const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
 846                unsigned long orig_dst;
 847                struct rt6_info ovs_rt;
 848
 849                if (!v6ops)
 850                        goto err;
 851
 852                prepare_frag(vport, skb, orig_network_offset,
 853                             ovs_key_mac_proto(key));
 854                memset(&ovs_rt, 0, sizeof(ovs_rt));
 855                dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
 856                         DST_OBSOLETE_NONE, DST_NOCOUNT);
 857                ovs_rt.dst.dev = vport->dev;
 858
 859                orig_dst = skb->_skb_refdst;
 860                skb_dst_set_noref(skb, &ovs_rt.dst);
 861                IP6CB(skb)->frag_max_size = mru;
 862
 863                v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
 864                refdst_drop(orig_dst);
 865        } else {
 866                WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
 867                          ovs_vport_name(vport), ntohs(key->eth.type), mru,
 868                          vport->dev->mtu);
 869                goto err;
 870        }
 871
 872        return;
 873err:
 874        kfree_skb(skb);
 875}
 876
 877static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
 878                      struct sw_flow_key *key)
 879{
 880        struct vport *vport = ovs_vport_rcu(dp, out_port);
 881
 882        if (likely(vport)) {
 883                u16 mru = OVS_CB(skb)->mru;
 884                u32 cutlen = OVS_CB(skb)->cutlen;
 885
 886                if (unlikely(cutlen > 0)) {
 887                        if (skb->len - cutlen > ovs_mac_header_len(key))
 888                                pskb_trim(skb, skb->len - cutlen);
 889                        else
 890                                pskb_trim(skb, ovs_mac_header_len(key));
 891                }
 892
 893                if (likely(!mru ||
 894                           (skb->len <= mru + vport->dev->hard_header_len))) {
 895                        ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
 896                } else if (mru <= vport->dev->mtu) {
 897                        struct net *net = read_pnet(&dp->net);
 898
 899                        ovs_fragment(net, vport, skb, mru, key);
 900                } else {
 901                        kfree_skb(skb);
 902                }
 903        } else {
 904                kfree_skb(skb);
 905        }
 906}
 907
 908static int output_userspace(struct datapath *dp, struct sk_buff *skb,
 909                            struct sw_flow_key *key, const struct nlattr *attr,
 910                            const struct nlattr *actions, int actions_len,
 911                            uint32_t cutlen)
 912{
 913        struct dp_upcall_info upcall;
 914        const struct nlattr *a;
 915        int rem;
 916
 917        memset(&upcall, 0, sizeof(upcall));
 918        upcall.cmd = OVS_PACKET_CMD_ACTION;
 919        upcall.mru = OVS_CB(skb)->mru;
 920
 921        for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
 922                 a = nla_next(a, &rem)) {
 923                switch (nla_type(a)) {
 924                case OVS_USERSPACE_ATTR_USERDATA:
 925                        upcall.userdata = a;
 926                        break;
 927
 928                case OVS_USERSPACE_ATTR_PID:
 929                        upcall.portid = nla_get_u32(a);
 930                        break;
 931
 932                case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
 933                        /* Get out tunnel info. */
 934                        struct vport *vport;
 935
 936                        vport = ovs_vport_rcu(dp, nla_get_u32(a));
 937                        if (vport) {
 938                                int err;
 939
 940                                err = dev_fill_metadata_dst(vport->dev, skb);
 941                                if (!err)
 942                                        upcall.egress_tun_info = skb_tunnel_info(skb);
 943                        }
 944
 945                        break;
 946                }
 947
 948                case OVS_USERSPACE_ATTR_ACTIONS: {
 949                        /* Include actions. */
 950                        upcall.actions = actions;
 951                        upcall.actions_len = actions_len;
 952                        break;
 953                }
 954
 955                } /* End of switch. */
 956        }
 957
 958        return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
 959}
 960
 961/* When 'last' is true, sample() should always consume the 'skb'.
 962 * Otherwise, sample() should keep 'skb' intact regardless what
 963 * actions are executed within sample().
 964 */
 965static int sample(struct datapath *dp, struct sk_buff *skb,
 966                  struct sw_flow_key *key, const struct nlattr *attr,
 967                  bool last)
 968{
 969        struct nlattr *actions;
 970        struct nlattr *sample_arg;
 971        int rem = nla_len(attr);
 972        const struct sample_arg *arg;
 973        bool clone_flow_key;
 974
 975        /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
 976        sample_arg = nla_data(attr);
 977        arg = nla_data(sample_arg);
 978        actions = nla_next(sample_arg, &rem);
 979
 980        if ((arg->probability != U32_MAX) &&
 981            (!arg->probability || prandom_u32() > arg->probability)) {
 982                if (last)
 983                        consume_skb(skb);
 984                return 0;
 985        }
 986
 987        clone_flow_key = !arg->exec;
 988        return clone_execute(dp, skb, key, 0, actions, rem, last,
 989                             clone_flow_key);
 990}
 991
 992/* When 'last' is true, clone() should always consume the 'skb'.
 993 * Otherwise, clone() should keep 'skb' intact regardless what
 994 * actions are executed within clone().
 995 */
 996static int clone(struct datapath *dp, struct sk_buff *skb,
 997                 struct sw_flow_key *key, const struct nlattr *attr,
 998                 bool last)
 999{
1000        struct nlattr *actions;
1001        struct nlattr *clone_arg;
1002        int rem = nla_len(attr);
1003        bool dont_clone_flow_key;
1004
1005        /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
1006        clone_arg = nla_data(attr);
1007        dont_clone_flow_key = nla_get_u32(clone_arg);
1008        actions = nla_next(clone_arg, &rem);
1009
1010        return clone_execute(dp, skb, key, 0, actions, rem, last,
1011                             !dont_clone_flow_key);
1012}
1013
1014static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1015                         const struct nlattr *attr)
1016{
1017        struct ovs_action_hash *hash_act = nla_data(attr);
1018        u32 hash = 0;
1019
1020        /* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
1021        hash = skb_get_hash(skb);
1022        hash = jhash_1word(hash, hash_act->hash_basis);
1023        if (!hash)
1024                hash = 0x1;
1025
1026        key->ovs_flow_hash = hash;
1027}
1028
1029static int execute_set_action(struct sk_buff *skb,
1030                              struct sw_flow_key *flow_key,
1031                              const struct nlattr *a)
1032{
1033        /* Only tunnel set execution is supported without a mask. */
1034        if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1035                struct ovs_tunnel_info *tun = nla_data(a);
1036
1037                skb_dst_drop(skb);
1038                dst_hold((struct dst_entry *)tun->tun_dst);
1039                skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1040                return 0;
1041        }
1042
1043        return -EINVAL;
1044}
1045
1046/* Mask is at the midpoint of the data. */
1047#define get_mask(a, type) ((const type)nla_data(a) + 1)
1048
1049static int execute_masked_set_action(struct sk_buff *skb,
1050                                     struct sw_flow_key *flow_key,
1051                                     const struct nlattr *a)
1052{
1053        int err = 0;
1054
1055        switch (nla_type(a)) {
1056        case OVS_KEY_ATTR_PRIORITY:
1057                OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1058                               *get_mask(a, u32 *));
1059                flow_key->phy.priority = skb->priority;
1060                break;
1061
1062        case OVS_KEY_ATTR_SKB_MARK:
1063                OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1064                flow_key->phy.skb_mark = skb->mark;
1065                break;
1066
1067        case OVS_KEY_ATTR_TUNNEL_INFO:
1068                /* Masked data not supported for tunnel. */
1069                err = -EINVAL;
1070                break;
1071
1072        case OVS_KEY_ATTR_ETHERNET:
1073                err = set_eth_addr(skb, flow_key, nla_data(a),
1074                                   get_mask(a, struct ovs_key_ethernet *));
1075                break;
1076
1077        case OVS_KEY_ATTR_NSH:
1078                err = set_nsh(skb, flow_key, a);
1079                break;
1080
1081        case OVS_KEY_ATTR_IPV4:
1082                err = set_ipv4(skb, flow_key, nla_data(a),
1083                               get_mask(a, struct ovs_key_ipv4 *));
1084                break;
1085
1086        case OVS_KEY_ATTR_IPV6:
1087                err = set_ipv6(skb, flow_key, nla_data(a),
1088                               get_mask(a, struct ovs_key_ipv6 *));
1089                break;
1090
1091        case OVS_KEY_ATTR_TCP:
1092                err = set_tcp(skb, flow_key, nla_data(a),
1093                              get_mask(a, struct ovs_key_tcp *));
1094                break;
1095
1096        case OVS_KEY_ATTR_UDP:
1097                err = set_udp(skb, flow_key, nla_data(a),
1098                              get_mask(a, struct ovs_key_udp *));
1099                break;
1100
1101        case OVS_KEY_ATTR_SCTP:
1102                err = set_sctp(skb, flow_key, nla_data(a),
1103                               get_mask(a, struct ovs_key_sctp *));
1104                break;
1105
1106        case OVS_KEY_ATTR_MPLS:
1107                err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1108                                                                    __be32 *));
1109                break;
1110
1111        case OVS_KEY_ATTR_CT_STATE:
1112        case OVS_KEY_ATTR_CT_ZONE:
1113        case OVS_KEY_ATTR_CT_MARK:
1114        case OVS_KEY_ATTR_CT_LABELS:
1115        case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1116        case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1117                err = -EINVAL;
1118                break;
1119        }
1120
1121        return err;
1122}
1123
1124static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1125                          struct sw_flow_key *key,
1126                          const struct nlattr *a, bool last)
1127{
1128        u32 recirc_id;
1129
1130        if (!is_flow_key_valid(key)) {
1131                int err;
1132
1133                err = ovs_flow_key_update(skb, key);
1134                if (err)
1135                        return err;
1136        }
1137        BUG_ON(!is_flow_key_valid(key));
1138
1139        recirc_id = nla_get_u32(a);
1140        return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1141}
1142
1143static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1144                                 struct sw_flow_key *key,
1145                                 const struct nlattr *attr, bool last)
1146{
1147        const struct nlattr *actions, *cpl_arg;
1148        const struct check_pkt_len_arg *arg;
1149        int rem = nla_len(attr);
1150        bool clone_flow_key;
1151
1152        /* The first netlink attribute in 'attr' is always
1153         * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1154         */
1155        cpl_arg = nla_data(attr);
1156        arg = nla_data(cpl_arg);
1157
1158        if (skb->len <= arg->pkt_len) {
1159                /* Second netlink attribute in 'attr' is always
1160                 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1161                 */
1162                actions = nla_next(cpl_arg, &rem);
1163                clone_flow_key = !arg->exec_for_lesser_equal;
1164        } else {
1165                /* Third netlink attribute in 'attr' is always
1166                 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1167                 */
1168                actions = nla_next(cpl_arg, &rem);
1169                actions = nla_next(actions, &rem);
1170                clone_flow_key = !arg->exec_for_greater;
1171        }
1172
1173        return clone_execute(dp, skb, key, 0, nla_data(actions),
1174                             nla_len(actions), last, clone_flow_key);
1175}
1176
1177/* Execute a list of actions against 'skb'. */
1178static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1179                              struct sw_flow_key *key,
1180                              const struct nlattr *attr, int len)
1181{
1182        const struct nlattr *a;
1183        int rem;
1184
1185        for (a = attr, rem = len; rem > 0;
1186             a = nla_next(a, &rem)) {
1187                int err = 0;
1188
1189                switch (nla_type(a)) {
1190                case OVS_ACTION_ATTR_OUTPUT: {
1191                        int port = nla_get_u32(a);
1192                        struct sk_buff *clone;
1193
1194                        /* Every output action needs a separate clone
1195                         * of 'skb', In case the output action is the
1196                         * last action, cloning can be avoided.
1197                         */
1198                        if (nla_is_last(a, rem)) {
1199                                do_output(dp, skb, port, key);
1200                                /* 'skb' has been used for output.
1201                                 */
1202                                return 0;
1203                        }
1204
1205                        clone = skb_clone(skb, GFP_ATOMIC);
1206                        if (clone)
1207                                do_output(dp, clone, port, key);
1208                        OVS_CB(skb)->cutlen = 0;
1209                        break;
1210                }
1211
1212                case OVS_ACTION_ATTR_TRUNC: {
1213                        struct ovs_action_trunc *trunc = nla_data(a);
1214
1215                        if (skb->len > trunc->max_len)
1216                                OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1217                        break;
1218                }
1219
1220                case OVS_ACTION_ATTR_USERSPACE:
1221                        output_userspace(dp, skb, key, a, attr,
1222                                                     len, OVS_CB(skb)->cutlen);
1223                        OVS_CB(skb)->cutlen = 0;
1224                        break;
1225
1226                case OVS_ACTION_ATTR_HASH:
1227                        execute_hash(skb, key, a);
1228                        break;
1229
1230                case OVS_ACTION_ATTR_PUSH_MPLS:
1231                        err = push_mpls(skb, key, nla_data(a));
1232                        break;
1233
1234                case OVS_ACTION_ATTR_POP_MPLS:
1235                        err = pop_mpls(skb, key, nla_get_be16(a));
1236                        break;
1237
1238                case OVS_ACTION_ATTR_PUSH_VLAN:
1239                        err = push_vlan(skb, key, nla_data(a));
1240                        break;
1241
1242                case OVS_ACTION_ATTR_POP_VLAN:
1243                        err = pop_vlan(skb, key);
1244                        break;
1245
1246                case OVS_ACTION_ATTR_RECIRC: {
1247                        bool last = nla_is_last(a, rem);
1248
1249                        err = execute_recirc(dp, skb, key, a, last);
1250                        if (last) {
1251                                /* If this is the last action, the skb has
1252                                 * been consumed or freed.
1253                                 * Return immediately.
1254                                 */
1255                                return err;
1256                        }
1257                        break;
1258                }
1259
1260                case OVS_ACTION_ATTR_SET:
1261                        err = execute_set_action(skb, key, nla_data(a));
1262                        break;
1263
1264                case OVS_ACTION_ATTR_SET_MASKED:
1265                case OVS_ACTION_ATTR_SET_TO_MASKED:
1266                        err = execute_masked_set_action(skb, key, nla_data(a));
1267                        break;
1268
1269                case OVS_ACTION_ATTR_SAMPLE: {
1270                        bool last = nla_is_last(a, rem);
1271
1272                        err = sample(dp, skb, key, a, last);
1273                        if (last)
1274                                return err;
1275
1276                        break;
1277                }
1278
1279                case OVS_ACTION_ATTR_CT:
1280                        if (!is_flow_key_valid(key)) {
1281                                err = ovs_flow_key_update(skb, key);
1282                                if (err)
1283                                        return err;
1284                        }
1285
1286                        err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1287                                             nla_data(a));
1288
1289                        /* Hide stolen IP fragments from user space. */
1290                        if (err)
1291                                return err == -EINPROGRESS ? 0 : err;
1292                        break;
1293
1294                case OVS_ACTION_ATTR_CT_CLEAR:
1295                        err = ovs_ct_clear(skb, key);
1296                        break;
1297
1298                case OVS_ACTION_ATTR_PUSH_ETH:
1299                        err = push_eth(skb, key, nla_data(a));
1300                        break;
1301
1302                case OVS_ACTION_ATTR_POP_ETH:
1303                        err = pop_eth(skb, key);
1304                        break;
1305
1306                case OVS_ACTION_ATTR_PUSH_NSH: {
1307                        u8 buffer[NSH_HDR_MAX_LEN];
1308                        struct nshhdr *nh = (struct nshhdr *)buffer;
1309
1310                        err = nsh_hdr_from_nlattr(nla_data(a), nh,
1311                                                  NSH_HDR_MAX_LEN);
1312                        if (unlikely(err))
1313                                break;
1314                        err = push_nsh(skb, key, nh);
1315                        break;
1316                }
1317
1318                case OVS_ACTION_ATTR_POP_NSH:
1319                        err = pop_nsh(skb, key);
1320                        break;
1321
1322                case OVS_ACTION_ATTR_METER:
1323                        if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1324                                consume_skb(skb);
1325                                return 0;
1326                        }
1327                        break;
1328
1329                case OVS_ACTION_ATTR_CLONE: {
1330                        bool last = nla_is_last(a, rem);
1331
1332                        err = clone(dp, skb, key, a, last);
1333                        if (last)
1334                                return err;
1335
1336                        break;
1337                }
1338
1339                case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1340                        bool last = nla_is_last(a, rem);
1341
1342                        err = execute_check_pkt_len(dp, skb, key, a, last);
1343                        if (last)
1344                                return err;
1345
1346                        break;
1347                }
1348                }
1349
1350                if (unlikely(err)) {
1351                        kfree_skb(skb);
1352                        return err;
1353                }
1354        }
1355
1356        consume_skb(skb);
1357        return 0;
1358}
1359
1360/* Execute the actions on the clone of the packet. The effect of the
1361 * execution does not affect the original 'skb' nor the original 'key'.
1362 *
1363 * The execution may be deferred in case the actions can not be executed
1364 * immediately.
1365 */
1366static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1367                         struct sw_flow_key *key, u32 recirc_id,
1368                         const struct nlattr *actions, int len,
1369                         bool last, bool clone_flow_key)
1370{
1371        struct deferred_action *da;
1372        struct sw_flow_key *clone;
1373
1374        skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1375        if (!skb) {
1376                /* Out of memory, skip this action.
1377                 */
1378                return 0;
1379        }
1380
1381        /* When clone_flow_key is false, the 'key' will not be change
1382         * by the actions, then the 'key' can be used directly.
1383         * Otherwise, try to clone key from the next recursion level of
1384         * 'flow_keys'. If clone is successful, execute the actions
1385         * without deferring.
1386         */
1387        clone = clone_flow_key ? clone_key(key) : key;
1388        if (clone) {
1389                int err = 0;
1390
1391                if (actions) { /* Sample action */
1392                        if (clone_flow_key)
1393                                __this_cpu_inc(exec_actions_level);
1394
1395                        err = do_execute_actions(dp, skb, clone,
1396                                                 actions, len);
1397
1398                        if (clone_flow_key)
1399                                __this_cpu_dec(exec_actions_level);
1400                } else { /* Recirc action */
1401                        clone->recirc_id = recirc_id;
1402                        ovs_dp_process_packet(skb, clone);
1403                }
1404                return err;
1405        }
1406
1407        /* Out of 'flow_keys' space. Defer actions */
1408        da = add_deferred_actions(skb, key, actions, len);
1409        if (da) {
1410                if (!actions) { /* Recirc action */
1411                        key = &da->pkt_key;
1412                        key->recirc_id = recirc_id;
1413                }
1414        } else {
1415                /* Out of per CPU action FIFO space. Drop the 'skb' and
1416                 * log an error.
1417                 */
1418                kfree_skb(skb);
1419
1420                if (net_ratelimit()) {
1421                        if (actions) { /* Sample action */
1422                                pr_warn("%s: deferred action limit reached, drop sample action\n",
1423                                        ovs_dp_name(dp));
1424                        } else {  /* Recirc action */
1425                                pr_warn("%s: deferred action limit reached, drop recirc action\n",
1426                                        ovs_dp_name(dp));
1427                        }
1428                }
1429        }
1430        return 0;
1431}
1432
1433static void process_deferred_actions(struct datapath *dp)
1434{
1435        struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1436
1437        /* Do not touch the FIFO in case there is no deferred actions. */
1438        if (action_fifo_is_empty(fifo))
1439                return;
1440
1441        /* Finishing executing all deferred actions. */
1442        do {
1443                struct deferred_action *da = action_fifo_get(fifo);
1444                struct sk_buff *skb = da->skb;
1445                struct sw_flow_key *key = &da->pkt_key;
1446                const struct nlattr *actions = da->actions;
1447                int actions_len = da->actions_len;
1448
1449                if (actions)
1450                        do_execute_actions(dp, skb, key, actions, actions_len);
1451                else
1452                        ovs_dp_process_packet(skb, key);
1453        } while (!action_fifo_is_empty(fifo));
1454
1455        /* Reset FIFO for the next packet.  */
1456        action_fifo_init(fifo);
1457}
1458
1459/* Execute a list of actions against 'skb'. */
1460int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1461                        const struct sw_flow_actions *acts,
1462                        struct sw_flow_key *key)
1463{
1464        int err, level;
1465
1466        level = __this_cpu_inc_return(exec_actions_level);
1467        if (unlikely(level > OVS_RECURSION_LIMIT)) {
1468                net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1469                                     ovs_dp_name(dp));
1470                kfree_skb(skb);
1471                err = -ENETDOWN;
1472                goto out;
1473        }
1474
1475        OVS_CB(skb)->acts_origlen = acts->orig_len;
1476        err = do_execute_actions(dp, skb, key,
1477                                 acts->actions, acts->actions_len);
1478
1479        if (level == 1)
1480                process_deferred_actions(dp);
1481
1482out:
1483        __this_cpu_dec(exec_actions_level);
1484        return err;
1485}
1486
1487int action_fifos_init(void)
1488{
1489        action_fifos = alloc_percpu(struct action_fifo);
1490        if (!action_fifos)
1491                return -ENOMEM;
1492
1493        flow_keys = alloc_percpu(struct action_flow_keys);
1494        if (!flow_keys) {
1495                free_percpu(action_fifos);
1496                return -ENOMEM;
1497        }
1498
1499        return 0;
1500}
1501
1502void action_fifos_exit(void)
1503{
1504        free_percpu(action_fifos);
1505        free_percpu(flow_keys);
1506}
1507