linux/net/openvswitch/actions.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2007-2017 Nicira, Inc.
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/skbuff.h>
   9#include <linux/in.h>
  10#include <linux/ip.h>
  11#include <linux/openvswitch.h>
  12#include <linux/netfilter_ipv6.h>
  13#include <linux/sctp.h>
  14#include <linux/tcp.h>
  15#include <linux/udp.h>
  16#include <linux/in6.h>
  17#include <linux/if_arp.h>
  18#include <linux/if_vlan.h>
  19
  20#include <net/dst.h>
  21#include <net/ip.h>
  22#include <net/ipv6.h>
  23#include <net/ip6_fib.h>
  24#include <net/checksum.h>
  25#include <net/dsfield.h>
  26#include <net/mpls.h>
  27#include <net/sctp/checksum.h>
  28
  29#include "datapath.h"
  30#include "flow.h"
  31#include "conntrack.h"
  32#include "vport.h"
  33#include "flow_netlink.h"
  34
  35struct deferred_action {
  36        struct sk_buff *skb;
  37        const struct nlattr *actions;
  38        int actions_len;
  39
  40        /* Store pkt_key clone when creating deferred action. */
  41        struct sw_flow_key pkt_key;
  42};
  43
  44#define MAX_L2_LEN      (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
  45struct ovs_frag_data {
  46        unsigned long dst;
  47        struct vport *vport;
  48        struct ovs_skb_cb cb;
  49        __be16 inner_protocol;
  50        u16 network_offset;     /* valid only for MPLS */
  51        u16 vlan_tci;
  52        __be16 vlan_proto;
  53        unsigned int l2_len;
  54        u8 mac_proto;
  55        u8 l2_data[MAX_L2_LEN];
  56};
  57
  58static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
  59
  60#define DEFERRED_ACTION_FIFO_SIZE 10
  61#define OVS_RECURSION_LIMIT 5
  62#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
  63struct action_fifo {
  64        int head;
  65        int tail;
  66        /* Deferred action fifo queue storage. */
  67        struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
  68};
  69
  70struct action_flow_keys {
  71        struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
  72};
  73
  74static struct action_fifo __percpu *action_fifos;
  75static struct action_flow_keys __percpu *flow_keys;
  76static DEFINE_PER_CPU(int, exec_actions_level);
  77
  78/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
  79 * space. Return NULL if out of key spaces.
  80 */
  81static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
  82{
  83        struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
  84        int level = this_cpu_read(exec_actions_level);
  85        struct sw_flow_key *key = NULL;
  86
  87        if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
  88                key = &keys->key[level - 1];
  89                *key = *key_;
  90        }
  91
  92        return key;
  93}
  94
  95static void action_fifo_init(struct action_fifo *fifo)
  96{
  97        fifo->head = 0;
  98        fifo->tail = 0;
  99}
 100
 101static bool action_fifo_is_empty(const struct action_fifo *fifo)
 102{
 103        return (fifo->head == fifo->tail);
 104}
 105
 106static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
 107{
 108        if (action_fifo_is_empty(fifo))
 109                return NULL;
 110
 111        return &fifo->fifo[fifo->tail++];
 112}
 113
 114static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
 115{
 116        if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
 117                return NULL;
 118
 119        return &fifo->fifo[fifo->head++];
 120}
 121
 122/* Return true if fifo is not full */
 123static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
 124                                    const struct sw_flow_key *key,
 125                                    const struct nlattr *actions,
 126                                    const int actions_len)
 127{
 128        struct action_fifo *fifo;
 129        struct deferred_action *da;
 130
 131        fifo = this_cpu_ptr(action_fifos);
 132        da = action_fifo_put(fifo);
 133        if (da) {
 134                da->skb = skb;
 135                da->actions = actions;
 136                da->actions_len = actions_len;
 137                da->pkt_key = *key;
 138        }
 139
 140        return da;
 141}
 142
 143static void invalidate_flow_key(struct sw_flow_key *key)
 144{
 145        key->mac_proto |= SW_FLOW_KEY_INVALID;
 146}
 147
 148static bool is_flow_key_valid(const struct sw_flow_key *key)
 149{
 150        return !(key->mac_proto & SW_FLOW_KEY_INVALID);
 151}
 152
 153static int clone_execute(struct datapath *dp, struct sk_buff *skb,
 154                         struct sw_flow_key *key,
 155                         u32 recirc_id,
 156                         const struct nlattr *actions, int len,
 157                         bool last, bool clone_flow_key);
 158
 159static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 160                              struct sw_flow_key *key,
 161                              const struct nlattr *attr, int len);
 162
 163static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 164                     __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
 165{
 166        int err;
 167
 168        err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
 169        if (err)
 170                return err;
 171
 172        if (!mac_len)
 173                key->mac_proto = MAC_PROTO_NONE;
 174
 175        invalidate_flow_key(key);
 176        return 0;
 177}
 178
 179static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 180                    const __be16 ethertype)
 181{
 182        int err;
 183
 184        err = skb_mpls_pop(skb, ethertype, skb->mac_len,
 185                           ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
 186        if (err)
 187                return err;
 188
 189        if (ethertype == htons(ETH_P_TEB))
 190                key->mac_proto = MAC_PROTO_ETHERNET;
 191
 192        invalidate_flow_key(key);
 193        return 0;
 194}
 195
 196static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
 197                    const __be32 *mpls_lse, const __be32 *mask)
 198{
 199        struct mpls_shim_hdr *stack;
 200        __be32 lse;
 201        int err;
 202
 203        stack = mpls_hdr(skb);
 204        lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
 205        err = skb_mpls_update_lse(skb, lse);
 206        if (err)
 207                return err;
 208
 209        flow_key->mpls.lse[0] = lse;
 210        return 0;
 211}
 212
 213static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
 214{
 215        int err;
 216
 217        err = skb_vlan_pop(skb);
 218        if (skb_vlan_tag_present(skb)) {
 219                invalidate_flow_key(key);
 220        } else {
 221                key->eth.vlan.tci = 0;
 222                key->eth.vlan.tpid = 0;
 223        }
 224        return err;
 225}
 226
 227static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
 228                     const struct ovs_action_push_vlan *vlan)
 229{
 230        if (skb_vlan_tag_present(skb)) {
 231                invalidate_flow_key(key);
 232        } else {
 233                key->eth.vlan.tci = vlan->vlan_tci;
 234                key->eth.vlan.tpid = vlan->vlan_tpid;
 235        }
 236        return skb_vlan_push(skb, vlan->vlan_tpid,
 237                             ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
 238}
 239
 240/* 'src' is already properly masked. */
 241static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
 242{
 243        u16 *dst = (u16 *)dst_;
 244        const u16 *src = (const u16 *)src_;
 245        const u16 *mask = (const u16 *)mask_;
 246
 247        OVS_SET_MASKED(dst[0], src[0], mask[0]);
 248        OVS_SET_MASKED(dst[1], src[1], mask[1]);
 249        OVS_SET_MASKED(dst[2], src[2], mask[2]);
 250}
 251
 252static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
 253                        const struct ovs_key_ethernet *key,
 254                        const struct ovs_key_ethernet *mask)
 255{
 256        int err;
 257
 258        err = skb_ensure_writable(skb, ETH_HLEN);
 259        if (unlikely(err))
 260                return err;
 261
 262        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 263
 264        ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
 265                               mask->eth_src);
 266        ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
 267                               mask->eth_dst);
 268
 269        skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 270
 271        ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
 272        ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
 273        return 0;
 274}
 275
 276/* pop_eth does not support VLAN packets as this action is never called
 277 * for them.
 278 */
 279static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
 280{
 281        skb_pull_rcsum(skb, ETH_HLEN);
 282        skb_reset_mac_header(skb);
 283        skb_reset_mac_len(skb);
 284
 285        /* safe right before invalidate_flow_key */
 286        key->mac_proto = MAC_PROTO_NONE;
 287        invalidate_flow_key(key);
 288        return 0;
 289}
 290
 291static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
 292                    const struct ovs_action_push_eth *ethh)
 293{
 294        struct ethhdr *hdr;
 295
 296        /* Add the new Ethernet header */
 297        if (skb_cow_head(skb, ETH_HLEN) < 0)
 298                return -ENOMEM;
 299
 300        skb_push(skb, ETH_HLEN);
 301        skb_reset_mac_header(skb);
 302        skb_reset_mac_len(skb);
 303
 304        hdr = eth_hdr(skb);
 305        ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
 306        ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
 307        hdr->h_proto = skb->protocol;
 308
 309        skb_postpush_rcsum(skb, hdr, ETH_HLEN);
 310
 311        /* safe right before invalidate_flow_key */
 312        key->mac_proto = MAC_PROTO_ETHERNET;
 313        invalidate_flow_key(key);
 314        return 0;
 315}
 316
 317static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
 318                    const struct nshhdr *nh)
 319{
 320        int err;
 321
 322        err = nsh_push(skb, nh);
 323        if (err)
 324                return err;
 325
 326        /* safe right before invalidate_flow_key */
 327        key->mac_proto = MAC_PROTO_NONE;
 328        invalidate_flow_key(key);
 329        return 0;
 330}
 331
 332static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
 333{
 334        int err;
 335
 336        err = nsh_pop(skb);
 337        if (err)
 338                return err;
 339
 340        /* safe right before invalidate_flow_key */
 341        if (skb->protocol == htons(ETH_P_TEB))
 342                key->mac_proto = MAC_PROTO_ETHERNET;
 343        else
 344                key->mac_proto = MAC_PROTO_NONE;
 345        invalidate_flow_key(key);
 346        return 0;
 347}
 348
 349static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
 350                                  __be32 addr, __be32 new_addr)
 351{
 352        int transport_len = skb->len - skb_transport_offset(skb);
 353
 354        if (nh->frag_off & htons(IP_OFFSET))
 355                return;
 356
 357        if (nh->protocol == IPPROTO_TCP) {
 358                if (likely(transport_len >= sizeof(struct tcphdr)))
 359                        inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
 360                                                 addr, new_addr, true);
 361        } else if (nh->protocol == IPPROTO_UDP) {
 362                if (likely(transport_len >= sizeof(struct udphdr))) {
 363                        struct udphdr *uh = udp_hdr(skb);
 364
 365                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 366                                inet_proto_csum_replace4(&uh->check, skb,
 367                                                         addr, new_addr, true);
 368                                if (!uh->check)
 369                                        uh->check = CSUM_MANGLED_0;
 370                        }
 371                }
 372        }
 373}
 374
 375static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
 376                        __be32 *addr, __be32 new_addr)
 377{
 378        update_ip_l4_checksum(skb, nh, *addr, new_addr);
 379        csum_replace4(&nh->check, *addr, new_addr);
 380        skb_clear_hash(skb);
 381        *addr = new_addr;
 382}
 383
 384static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
 385                                 __be32 addr[4], const __be32 new_addr[4])
 386{
 387        int transport_len = skb->len - skb_transport_offset(skb);
 388
 389        if (l4_proto == NEXTHDR_TCP) {
 390                if (likely(transport_len >= sizeof(struct tcphdr)))
 391                        inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
 392                                                  addr, new_addr, true);
 393        } else if (l4_proto == NEXTHDR_UDP) {
 394                if (likely(transport_len >= sizeof(struct udphdr))) {
 395                        struct udphdr *uh = udp_hdr(skb);
 396
 397                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 398                                inet_proto_csum_replace16(&uh->check, skb,
 399                                                          addr, new_addr, true);
 400                                if (!uh->check)
 401                                        uh->check = CSUM_MANGLED_0;
 402                        }
 403                }
 404        } else if (l4_proto == NEXTHDR_ICMP) {
 405                if (likely(transport_len >= sizeof(struct icmp6hdr)))
 406                        inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
 407                                                  skb, addr, new_addr, true);
 408        }
 409}
 410
 411static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
 412                           const __be32 mask[4], __be32 masked[4])
 413{
 414        masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
 415        masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
 416        masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
 417        masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
 418}
 419
 420static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
 421                          __be32 addr[4], const __be32 new_addr[4],
 422                          bool recalculate_csum)
 423{
 424        if (recalculate_csum)
 425                update_ipv6_checksum(skb, l4_proto, addr, new_addr);
 426
 427        skb_clear_hash(skb);
 428        memcpy(addr, new_addr, sizeof(__be32[4]));
 429}
 430
 431static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
 432{
 433        /* Bits 21-24 are always unmasked, so this retains their values. */
 434        OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
 435        OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
 436        OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
 437}
 438
 439static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
 440                       u8 mask)
 441{
 442        new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
 443
 444        csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
 445        nh->ttl = new_ttl;
 446}
 447
 448static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
 449                    const struct ovs_key_ipv4 *key,
 450                    const struct ovs_key_ipv4 *mask)
 451{
 452        struct iphdr *nh;
 453        __be32 new_addr;
 454        int err;
 455
 456        err = skb_ensure_writable(skb, skb_network_offset(skb) +
 457                                  sizeof(struct iphdr));
 458        if (unlikely(err))
 459                return err;
 460
 461        nh = ip_hdr(skb);
 462
 463        /* Setting an IP addresses is typically only a side effect of
 464         * matching on them in the current userspace implementation, so it
 465         * makes sense to check if the value actually changed.
 466         */
 467        if (mask->ipv4_src) {
 468                new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
 469
 470                if (unlikely(new_addr != nh->saddr)) {
 471                        set_ip_addr(skb, nh, &nh->saddr, new_addr);
 472                        flow_key->ipv4.addr.src = new_addr;
 473                }
 474        }
 475        if (mask->ipv4_dst) {
 476                new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
 477
 478                if (unlikely(new_addr != nh->daddr)) {
 479                        set_ip_addr(skb, nh, &nh->daddr, new_addr);
 480                        flow_key->ipv4.addr.dst = new_addr;
 481                }
 482        }
 483        if (mask->ipv4_tos) {
 484                ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
 485                flow_key->ip.tos = nh->tos;
 486        }
 487        if (mask->ipv4_ttl) {
 488                set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
 489                flow_key->ip.ttl = nh->ttl;
 490        }
 491
 492        return 0;
 493}
 494
 495static bool is_ipv6_mask_nonzero(const __be32 addr[4])
 496{
 497        return !!(addr[0] | addr[1] | addr[2] | addr[3]);
 498}
 499
 500static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
 501                    const struct ovs_key_ipv6 *key,
 502                    const struct ovs_key_ipv6 *mask)
 503{
 504        struct ipv6hdr *nh;
 505        int err;
 506
 507        err = skb_ensure_writable(skb, skb_network_offset(skb) +
 508                                  sizeof(struct ipv6hdr));
 509        if (unlikely(err))
 510                return err;
 511
 512        nh = ipv6_hdr(skb);
 513
 514        /* Setting an IP addresses is typically only a side effect of
 515         * matching on them in the current userspace implementation, so it
 516         * makes sense to check if the value actually changed.
 517         */
 518        if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
 519                __be32 *saddr = (__be32 *)&nh->saddr;
 520                __be32 masked[4];
 521
 522                mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
 523
 524                if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
 525                        set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
 526                                      true);
 527                        memcpy(&flow_key->ipv6.addr.src, masked,
 528                               sizeof(flow_key->ipv6.addr.src));
 529                }
 530        }
 531        if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
 532                unsigned int offset = 0;
 533                int flags = IP6_FH_F_SKIP_RH;
 534                bool recalc_csum = true;
 535                __be32 *daddr = (__be32 *)&nh->daddr;
 536                __be32 masked[4];
 537
 538                mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
 539
 540                if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
 541                        if (ipv6_ext_hdr(nh->nexthdr))
 542                                recalc_csum = (ipv6_find_hdr(skb, &offset,
 543                                                             NEXTHDR_ROUTING,
 544                                                             NULL, &flags)
 545                                               != NEXTHDR_ROUTING);
 546
 547                        set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
 548                                      recalc_csum);
 549                        memcpy(&flow_key->ipv6.addr.dst, masked,
 550                               sizeof(flow_key->ipv6.addr.dst));
 551                }
 552        }
 553        if (mask->ipv6_tclass) {
 554                ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
 555                flow_key->ip.tos = ipv6_get_dsfield(nh);
 556        }
 557        if (mask->ipv6_label) {
 558                set_ipv6_fl(nh, ntohl(key->ipv6_label),
 559                            ntohl(mask->ipv6_label));
 560                flow_key->ipv6.label =
 561                    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
 562        }
 563        if (mask->ipv6_hlimit) {
 564                OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
 565                               mask->ipv6_hlimit);
 566                flow_key->ip.ttl = nh->hop_limit;
 567        }
 568        return 0;
 569}
 570
 571static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
 572                   const struct nlattr *a)
 573{
 574        struct nshhdr *nh;
 575        size_t length;
 576        int err;
 577        u8 flags;
 578        u8 ttl;
 579        int i;
 580
 581        struct ovs_key_nsh key;
 582        struct ovs_key_nsh mask;
 583
 584        err = nsh_key_from_nlattr(a, &key, &mask);
 585        if (err)
 586                return err;
 587
 588        /* Make sure the NSH base header is there */
 589        if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
 590                return -ENOMEM;
 591
 592        nh = nsh_hdr(skb);
 593        length = nsh_hdr_len(nh);
 594
 595        /* Make sure the whole NSH header is there */
 596        err = skb_ensure_writable(skb, skb_network_offset(skb) +
 597                                       length);
 598        if (unlikely(err))
 599                return err;
 600
 601        nh = nsh_hdr(skb);
 602        skb_postpull_rcsum(skb, nh, length);
 603        flags = nsh_get_flags(nh);
 604        flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
 605        flow_key->nsh.base.flags = flags;
 606        ttl = nsh_get_ttl(nh);
 607        ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
 608        flow_key->nsh.base.ttl = ttl;
 609        nsh_set_flags_and_ttl(nh, flags, ttl);
 610        nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
 611                                  mask.base.path_hdr);
 612        flow_key->nsh.base.path_hdr = nh->path_hdr;
 613        switch (nh->mdtype) {
 614        case NSH_M_TYPE1:
 615                for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
 616                        nh->md1.context[i] =
 617                            OVS_MASKED(nh->md1.context[i], key.context[i],
 618                                       mask.context[i]);
 619                }
 620                memcpy(flow_key->nsh.context, nh->md1.context,
 621                       sizeof(nh->md1.context));
 622                break;
 623        case NSH_M_TYPE2:
 624                memset(flow_key->nsh.context, 0,
 625                       sizeof(flow_key->nsh.context));
 626                break;
 627        default:
 628                return -EINVAL;
 629        }
 630        skb_postpush_rcsum(skb, nh, length);
 631        return 0;
 632}
 633
 634/* Must follow skb_ensure_writable() since that can move the skb data. */
 635static void set_tp_port(struct sk_buff *skb, __be16 *port,
 636                        __be16 new_port, __sum16 *check)
 637{
 638        inet_proto_csum_replace2(check, skb, *port, new_port, false);
 639        *port = new_port;
 640}
 641
 642static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 643                   const struct ovs_key_udp *key,
 644                   const struct ovs_key_udp *mask)
 645{
 646        struct udphdr *uh;
 647        __be16 src, dst;
 648        int err;
 649
 650        err = skb_ensure_writable(skb, skb_transport_offset(skb) +
 651                                  sizeof(struct udphdr));
 652        if (unlikely(err))
 653                return err;
 654
 655        uh = udp_hdr(skb);
 656        /* Either of the masks is non-zero, so do not bother checking them. */
 657        src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
 658        dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
 659
 660        if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
 661                if (likely(src != uh->source)) {
 662                        set_tp_port(skb, &uh->source, src, &uh->check);
 663                        flow_key->tp.src = src;
 664                }
 665                if (likely(dst != uh->dest)) {
 666                        set_tp_port(skb, &uh->dest, dst, &uh->check);
 667                        flow_key->tp.dst = dst;
 668                }
 669
 670                if (unlikely(!uh->check))
 671                        uh->check = CSUM_MANGLED_0;
 672        } else {
 673                uh->source = src;
 674                uh->dest = dst;
 675                flow_key->tp.src = src;
 676                flow_key->tp.dst = dst;
 677        }
 678
 679        skb_clear_hash(skb);
 680
 681        return 0;
 682}
 683
 684static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 685                   const struct ovs_key_tcp *key,
 686                   const struct ovs_key_tcp *mask)
 687{
 688        struct tcphdr *th;
 689        __be16 src, dst;
 690        int err;
 691
 692        err = skb_ensure_writable(skb, skb_transport_offset(skb) +
 693                                  sizeof(struct tcphdr));
 694        if (unlikely(err))
 695                return err;
 696
 697        th = tcp_hdr(skb);
 698        src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
 699        if (likely(src != th->source)) {
 700                set_tp_port(skb, &th->source, src, &th->check);
 701                flow_key->tp.src = src;
 702        }
 703        dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
 704        if (likely(dst != th->dest)) {
 705                set_tp_port(skb, &th->dest, dst, &th->check);
 706                flow_key->tp.dst = dst;
 707        }
 708        skb_clear_hash(skb);
 709
 710        return 0;
 711}
 712
 713static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
 714                    const struct ovs_key_sctp *key,
 715                    const struct ovs_key_sctp *mask)
 716{
 717        unsigned int sctphoff = skb_transport_offset(skb);
 718        struct sctphdr *sh;
 719        __le32 old_correct_csum, new_csum, old_csum;
 720        int err;
 721
 722        err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
 723        if (unlikely(err))
 724                return err;
 725
 726        sh = sctp_hdr(skb);
 727        old_csum = sh->checksum;
 728        old_correct_csum = sctp_compute_cksum(skb, sctphoff);
 729
 730        sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
 731        sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
 732
 733        new_csum = sctp_compute_cksum(skb, sctphoff);
 734
 735        /* Carry any checksum errors through. */
 736        sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
 737
 738        skb_clear_hash(skb);
 739        flow_key->tp.src = sh->source;
 740        flow_key->tp.dst = sh->dest;
 741
 742        return 0;
 743}
 744
 745static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 746{
 747        struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
 748        struct vport *vport = data->vport;
 749
 750        if (skb_cow_head(skb, data->l2_len) < 0) {
 751                kfree_skb(skb);
 752                return -ENOMEM;
 753        }
 754
 755        __skb_dst_copy(skb, data->dst);
 756        *OVS_CB(skb) = data->cb;
 757        skb->inner_protocol = data->inner_protocol;
 758        if (data->vlan_tci & VLAN_CFI_MASK)
 759                __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
 760        else
 761                __vlan_hwaccel_clear_tag(skb);
 762
 763        /* Reconstruct the MAC header.  */
 764        skb_push(skb, data->l2_len);
 765        memcpy(skb->data, &data->l2_data, data->l2_len);
 766        skb_postpush_rcsum(skb, skb->data, data->l2_len);
 767        skb_reset_mac_header(skb);
 768
 769        if (eth_p_mpls(skb->protocol)) {
 770                skb->inner_network_header = skb->network_header;
 771                skb_set_network_header(skb, data->network_offset);
 772                skb_reset_mac_len(skb);
 773        }
 774
 775        ovs_vport_send(vport, skb, data->mac_proto);
 776        return 0;
 777}
 778
 779static unsigned int
 780ovs_dst_get_mtu(const struct dst_entry *dst)
 781{
 782        return dst->dev->mtu;
 783}
 784
 785static struct dst_ops ovs_dst_ops = {
 786        .family = AF_UNSPEC,
 787        .mtu = ovs_dst_get_mtu,
 788};
 789
 790/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
 791 * ovs_vport_output(), which is called once per fragmented packet.
 792 */
 793static void prepare_frag(struct vport *vport, struct sk_buff *skb,
 794                         u16 orig_network_offset, u8 mac_proto)
 795{
 796        unsigned int hlen = skb_network_offset(skb);
 797        struct ovs_frag_data *data;
 798
 799        data = this_cpu_ptr(&ovs_frag_data_storage);
 800        data->dst = skb->_skb_refdst;
 801        data->vport = vport;
 802        data->cb = *OVS_CB(skb);
 803        data->inner_protocol = skb->inner_protocol;
 804        data->network_offset = orig_network_offset;
 805        if (skb_vlan_tag_present(skb))
 806                data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
 807        else
 808                data->vlan_tci = 0;
 809        data->vlan_proto = skb->vlan_proto;
 810        data->mac_proto = mac_proto;
 811        data->l2_len = hlen;
 812        memcpy(&data->l2_data, skb->data, hlen);
 813
 814        memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
 815        skb_pull(skb, hlen);
 816}
 817
 818static void ovs_fragment(struct net *net, struct vport *vport,
 819                         struct sk_buff *skb, u16 mru,
 820                         struct sw_flow_key *key)
 821{
 822        u16 orig_network_offset = 0;
 823
 824        if (eth_p_mpls(skb->protocol)) {
 825                orig_network_offset = skb_network_offset(skb);
 826                skb->network_header = skb->inner_network_header;
 827        }
 828
 829        if (skb_network_offset(skb) > MAX_L2_LEN) {
 830                OVS_NLERR(1, "L2 header too long to fragment");
 831                goto err;
 832        }
 833
 834        if (key->eth.type == htons(ETH_P_IP)) {
 835                struct dst_entry ovs_dst;
 836                unsigned long orig_dst;
 837
 838                prepare_frag(vport, skb, orig_network_offset,
 839                             ovs_key_mac_proto(key));
 840                dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
 841                         DST_OBSOLETE_NONE, DST_NOCOUNT);
 842                ovs_dst.dev = vport->dev;
 843
 844                orig_dst = skb->_skb_refdst;
 845                skb_dst_set_noref(skb, &ovs_dst);
 846                IPCB(skb)->frag_max_size = mru;
 847
 848                ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
 849                refdst_drop(orig_dst);
 850        } else if (key->eth.type == htons(ETH_P_IPV6)) {
 851                const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
 852                unsigned long orig_dst;
 853                struct rt6_info ovs_rt;
 854
 855                if (!v6ops)
 856                        goto err;
 857
 858                prepare_frag(vport, skb, orig_network_offset,
 859                             ovs_key_mac_proto(key));
 860                memset(&ovs_rt, 0, sizeof(ovs_rt));
 861                dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
 862                         DST_OBSOLETE_NONE, DST_NOCOUNT);
 863                ovs_rt.dst.dev = vport->dev;
 864
 865                orig_dst = skb->_skb_refdst;
 866                skb_dst_set_noref(skb, &ovs_rt.dst);
 867                IP6CB(skb)->frag_max_size = mru;
 868
 869                v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
 870                refdst_drop(orig_dst);
 871        } else {
 872                WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
 873                          ovs_vport_name(vport), ntohs(key->eth.type), mru,
 874                          vport->dev->mtu);
 875                goto err;
 876        }
 877
 878        return;
 879err:
 880        kfree_skb(skb);
 881}
 882
 883static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
 884                      struct sw_flow_key *key)
 885{
 886        struct vport *vport = ovs_vport_rcu(dp, out_port);
 887
 888        if (likely(vport)) {
 889                u16 mru = OVS_CB(skb)->mru;
 890                u32 cutlen = OVS_CB(skb)->cutlen;
 891
 892                if (unlikely(cutlen > 0)) {
 893                        if (skb->len - cutlen > ovs_mac_header_len(key))
 894                                pskb_trim(skb, skb->len - cutlen);
 895                        else
 896                                pskb_trim(skb, ovs_mac_header_len(key));
 897                }
 898
 899                if (likely(!mru ||
 900                           (skb->len <= mru + vport->dev->hard_header_len))) {
 901                        ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
 902                } else if (mru <= vport->dev->mtu) {
 903                        struct net *net = read_pnet(&dp->net);
 904
 905                        ovs_fragment(net, vport, skb, mru, key);
 906                } else {
 907                        kfree_skb(skb);
 908                }
 909        } else {
 910                kfree_skb(skb);
 911        }
 912}
 913
 914static int output_userspace(struct datapath *dp, struct sk_buff *skb,
 915                            struct sw_flow_key *key, const struct nlattr *attr,
 916                            const struct nlattr *actions, int actions_len,
 917                            uint32_t cutlen)
 918{
 919        struct dp_upcall_info upcall;
 920        const struct nlattr *a;
 921        int rem;
 922
 923        memset(&upcall, 0, sizeof(upcall));
 924        upcall.cmd = OVS_PACKET_CMD_ACTION;
 925        upcall.mru = OVS_CB(skb)->mru;
 926
 927        for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
 928                 a = nla_next(a, &rem)) {
 929                switch (nla_type(a)) {
 930                case OVS_USERSPACE_ATTR_USERDATA:
 931                        upcall.userdata = a;
 932                        break;
 933
 934                case OVS_USERSPACE_ATTR_PID:
 935                        upcall.portid = nla_get_u32(a);
 936                        break;
 937
 938                case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
 939                        /* Get out tunnel info. */
 940                        struct vport *vport;
 941
 942                        vport = ovs_vport_rcu(dp, nla_get_u32(a));
 943                        if (vport) {
 944                                int err;
 945
 946                                err = dev_fill_metadata_dst(vport->dev, skb);
 947                                if (!err)
 948                                        upcall.egress_tun_info = skb_tunnel_info(skb);
 949                        }
 950
 951                        break;
 952                }
 953
 954                case OVS_USERSPACE_ATTR_ACTIONS: {
 955                        /* Include actions. */
 956                        upcall.actions = actions;
 957                        upcall.actions_len = actions_len;
 958                        break;
 959                }
 960
 961                } /* End of switch. */
 962        }
 963
 964        return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
 965}
 966
 967static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
 968                                     struct sw_flow_key *key,
 969                                     const struct nlattr *attr, bool last)
 970{
 971        /* The first action is always 'OVS_DEC_TTL_ATTR_ARG'. */
 972        struct nlattr *dec_ttl_arg = nla_data(attr);
 973        int rem = nla_len(attr);
 974
 975        if (nla_len(dec_ttl_arg)) {
 976                struct nlattr *actions = nla_next(dec_ttl_arg, &rem);
 977
 978                if (actions)
 979                        return clone_execute(dp, skb, key, 0, actions, rem,
 980                                             last, false);
 981        }
 982        consume_skb(skb);
 983        return 0;
 984}
 985
 986/* When 'last' is true, sample() should always consume the 'skb'.
 987 * Otherwise, sample() should keep 'skb' intact regardless what
 988 * actions are executed within sample().
 989 */
 990static int sample(struct datapath *dp, struct sk_buff *skb,
 991                  struct sw_flow_key *key, const struct nlattr *attr,
 992                  bool last)
 993{
 994        struct nlattr *actions;
 995        struct nlattr *sample_arg;
 996        int rem = nla_len(attr);
 997        const struct sample_arg *arg;
 998        bool clone_flow_key;
 999
1000        /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1001        sample_arg = nla_data(attr);
1002        arg = nla_data(sample_arg);
1003        actions = nla_next(sample_arg, &rem);
1004
1005        if ((arg->probability != U32_MAX) &&
1006            (!arg->probability || prandom_u32() > arg->probability)) {
1007                if (last)
1008                        consume_skb(skb);
1009                return 0;
1010        }
1011
1012        clone_flow_key = !arg->exec;
1013        return clone_execute(dp, skb, key, 0, actions, rem, last,
1014                             clone_flow_key);
1015}
1016
1017/* When 'last' is true, clone() should always consume the 'skb'.
1018 * Otherwise, clone() should keep 'skb' intact regardless what
1019 * actions are executed within clone().
1020 */
1021static int clone(struct datapath *dp, struct sk_buff *skb,
1022                 struct sw_flow_key *key, const struct nlattr *attr,
1023                 bool last)
1024{
1025        struct nlattr *actions;
1026        struct nlattr *clone_arg;
1027        int rem = nla_len(attr);
1028        bool dont_clone_flow_key;
1029
1030        /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
1031        clone_arg = nla_data(attr);
1032        dont_clone_flow_key = nla_get_u32(clone_arg);
1033        actions = nla_next(clone_arg, &rem);
1034
1035        return clone_execute(dp, skb, key, 0, actions, rem, last,
1036                             !dont_clone_flow_key);
1037}
1038
1039static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1040                         const struct nlattr *attr)
1041{
1042        struct ovs_action_hash *hash_act = nla_data(attr);
1043        u32 hash = 0;
1044
1045        /* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
1046        hash = skb_get_hash(skb);
1047        hash = jhash_1word(hash, hash_act->hash_basis);
1048        if (!hash)
1049                hash = 0x1;
1050
1051        key->ovs_flow_hash = hash;
1052}
1053
1054static int execute_set_action(struct sk_buff *skb,
1055                              struct sw_flow_key *flow_key,
1056                              const struct nlattr *a)
1057{
1058        /* Only tunnel set execution is supported without a mask. */
1059        if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1060                struct ovs_tunnel_info *tun = nla_data(a);
1061
1062                skb_dst_drop(skb);
1063                dst_hold((struct dst_entry *)tun->tun_dst);
1064                skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1065                return 0;
1066        }
1067
1068        return -EINVAL;
1069}
1070
1071/* Mask is at the midpoint of the data. */
1072#define get_mask(a, type) ((const type)nla_data(a) + 1)
1073
1074static int execute_masked_set_action(struct sk_buff *skb,
1075                                     struct sw_flow_key *flow_key,
1076                                     const struct nlattr *a)
1077{
1078        int err = 0;
1079
1080        switch (nla_type(a)) {
1081        case OVS_KEY_ATTR_PRIORITY:
1082                OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1083                               *get_mask(a, u32 *));
1084                flow_key->phy.priority = skb->priority;
1085                break;
1086
1087        case OVS_KEY_ATTR_SKB_MARK:
1088                OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1089                flow_key->phy.skb_mark = skb->mark;
1090                break;
1091
1092        case OVS_KEY_ATTR_TUNNEL_INFO:
1093                /* Masked data not supported for tunnel. */
1094                err = -EINVAL;
1095                break;
1096
1097        case OVS_KEY_ATTR_ETHERNET:
1098                err = set_eth_addr(skb, flow_key, nla_data(a),
1099                                   get_mask(a, struct ovs_key_ethernet *));
1100                break;
1101
1102        case OVS_KEY_ATTR_NSH:
1103                err = set_nsh(skb, flow_key, a);
1104                break;
1105
1106        case OVS_KEY_ATTR_IPV4:
1107                err = set_ipv4(skb, flow_key, nla_data(a),
1108                               get_mask(a, struct ovs_key_ipv4 *));
1109                break;
1110
1111        case OVS_KEY_ATTR_IPV6:
1112                err = set_ipv6(skb, flow_key, nla_data(a),
1113                               get_mask(a, struct ovs_key_ipv6 *));
1114                break;
1115
1116        case OVS_KEY_ATTR_TCP:
1117                err = set_tcp(skb, flow_key, nla_data(a),
1118                              get_mask(a, struct ovs_key_tcp *));
1119                break;
1120
1121        case OVS_KEY_ATTR_UDP:
1122                err = set_udp(skb, flow_key, nla_data(a),
1123                              get_mask(a, struct ovs_key_udp *));
1124                break;
1125
1126        case OVS_KEY_ATTR_SCTP:
1127                err = set_sctp(skb, flow_key, nla_data(a),
1128                               get_mask(a, struct ovs_key_sctp *));
1129                break;
1130
1131        case OVS_KEY_ATTR_MPLS:
1132                err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1133                                                                    __be32 *));
1134                break;
1135
1136        case OVS_KEY_ATTR_CT_STATE:
1137        case OVS_KEY_ATTR_CT_ZONE:
1138        case OVS_KEY_ATTR_CT_MARK:
1139        case OVS_KEY_ATTR_CT_LABELS:
1140        case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1141        case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1142                err = -EINVAL;
1143                break;
1144        }
1145
1146        return err;
1147}
1148
1149static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1150                          struct sw_flow_key *key,
1151                          const struct nlattr *a, bool last)
1152{
1153        u32 recirc_id;
1154
1155        if (!is_flow_key_valid(key)) {
1156                int err;
1157
1158                err = ovs_flow_key_update(skb, key);
1159                if (err)
1160                        return err;
1161        }
1162        BUG_ON(!is_flow_key_valid(key));
1163
1164        recirc_id = nla_get_u32(a);
1165        return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1166}
1167
1168static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1169                                 struct sw_flow_key *key,
1170                                 const struct nlattr *attr, bool last)
1171{
1172        struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1173        const struct nlattr *actions, *cpl_arg;
1174        int len, max_len, rem = nla_len(attr);
1175        const struct check_pkt_len_arg *arg;
1176        bool clone_flow_key;
1177
1178        /* The first netlink attribute in 'attr' is always
1179         * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1180         */
1181        cpl_arg = nla_data(attr);
1182        arg = nla_data(cpl_arg);
1183
1184        len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1185        max_len = arg->pkt_len;
1186
1187        if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1188            len <= max_len) {
1189                /* Second netlink attribute in 'attr' is always
1190                 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1191                 */
1192                actions = nla_next(cpl_arg, &rem);
1193                clone_flow_key = !arg->exec_for_lesser_equal;
1194        } else {
1195                /* Third netlink attribute in 'attr' is always
1196                 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1197                 */
1198                actions = nla_next(cpl_arg, &rem);
1199                actions = nla_next(actions, &rem);
1200                clone_flow_key = !arg->exec_for_greater;
1201        }
1202
1203        return clone_execute(dp, skb, key, 0, nla_data(actions),
1204                             nla_len(actions), last, clone_flow_key);
1205}
1206
1207static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1208{
1209        int err;
1210
1211        if (skb->protocol == htons(ETH_P_IPV6)) {
1212                struct ipv6hdr *nh;
1213
1214                err = skb_ensure_writable(skb, skb_network_offset(skb) +
1215                                          sizeof(*nh));
1216                if (unlikely(err))
1217                        return err;
1218
1219                nh = ipv6_hdr(skb);
1220
1221                if (nh->hop_limit <= 1)
1222                        return -EHOSTUNREACH;
1223
1224                key->ip.ttl = --nh->hop_limit;
1225        } else {
1226                struct iphdr *nh;
1227                u8 old_ttl;
1228
1229                err = skb_ensure_writable(skb, skb_network_offset(skb) +
1230                                          sizeof(*nh));
1231                if (unlikely(err))
1232                        return err;
1233
1234                nh = ip_hdr(skb);
1235                if (nh->ttl <= 1)
1236                        return -EHOSTUNREACH;
1237
1238                old_ttl = nh->ttl--;
1239                csum_replace2(&nh->check, htons(old_ttl << 8),
1240                              htons(nh->ttl << 8));
1241                key->ip.ttl = nh->ttl;
1242        }
1243        return 0;
1244}
1245
1246/* Execute a list of actions against 'skb'. */
1247static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1248                              struct sw_flow_key *key,
1249                              const struct nlattr *attr, int len)
1250{
1251        const struct nlattr *a;
1252        int rem;
1253
1254        for (a = attr, rem = len; rem > 0;
1255             a = nla_next(a, &rem)) {
1256                int err = 0;
1257
1258                switch (nla_type(a)) {
1259                case OVS_ACTION_ATTR_OUTPUT: {
1260                        int port = nla_get_u32(a);
1261                        struct sk_buff *clone;
1262
1263                        /* Every output action needs a separate clone
1264                         * of 'skb', In case the output action is the
1265                         * last action, cloning can be avoided.
1266                         */
1267                        if (nla_is_last(a, rem)) {
1268                                do_output(dp, skb, port, key);
1269                                /* 'skb' has been used for output.
1270                                 */
1271                                return 0;
1272                        }
1273
1274                        clone = skb_clone(skb, GFP_ATOMIC);
1275                        if (clone)
1276                                do_output(dp, clone, port, key);
1277                        OVS_CB(skb)->cutlen = 0;
1278                        break;
1279                }
1280
1281                case OVS_ACTION_ATTR_TRUNC: {
1282                        struct ovs_action_trunc *trunc = nla_data(a);
1283
1284                        if (skb->len > trunc->max_len)
1285                                OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1286                        break;
1287                }
1288
1289                case OVS_ACTION_ATTR_USERSPACE:
1290                        output_userspace(dp, skb, key, a, attr,
1291                                                     len, OVS_CB(skb)->cutlen);
1292                        OVS_CB(skb)->cutlen = 0;
1293                        break;
1294
1295                case OVS_ACTION_ATTR_HASH:
1296                        execute_hash(skb, key, a);
1297                        break;
1298
1299                case OVS_ACTION_ATTR_PUSH_MPLS: {
1300                        struct ovs_action_push_mpls *mpls = nla_data(a);
1301
1302                        err = push_mpls(skb, key, mpls->mpls_lse,
1303                                        mpls->mpls_ethertype, skb->mac_len);
1304                        break;
1305                }
1306                case OVS_ACTION_ATTR_ADD_MPLS: {
1307                        struct ovs_action_add_mpls *mpls = nla_data(a);
1308                        __u16 mac_len = 0;
1309
1310                        if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1311                                mac_len = skb->mac_len;
1312
1313                        err = push_mpls(skb, key, mpls->mpls_lse,
1314                                        mpls->mpls_ethertype, mac_len);
1315                        break;
1316                }
1317                case OVS_ACTION_ATTR_POP_MPLS:
1318                        err = pop_mpls(skb, key, nla_get_be16(a));
1319                        break;
1320
1321                case OVS_ACTION_ATTR_PUSH_VLAN:
1322                        err = push_vlan(skb, key, nla_data(a));
1323                        break;
1324
1325                case OVS_ACTION_ATTR_POP_VLAN:
1326                        err = pop_vlan(skb, key);
1327                        break;
1328
1329                case OVS_ACTION_ATTR_RECIRC: {
1330                        bool last = nla_is_last(a, rem);
1331
1332                        err = execute_recirc(dp, skb, key, a, last);
1333                        if (last) {
1334                                /* If this is the last action, the skb has
1335                                 * been consumed or freed.
1336                                 * Return immediately.
1337                                 */
1338                                return err;
1339                        }
1340                        break;
1341                }
1342
1343                case OVS_ACTION_ATTR_SET:
1344                        err = execute_set_action(skb, key, nla_data(a));
1345                        break;
1346
1347                case OVS_ACTION_ATTR_SET_MASKED:
1348                case OVS_ACTION_ATTR_SET_TO_MASKED:
1349                        err = execute_masked_set_action(skb, key, nla_data(a));
1350                        break;
1351
1352                case OVS_ACTION_ATTR_SAMPLE: {
1353                        bool last = nla_is_last(a, rem);
1354
1355                        err = sample(dp, skb, key, a, last);
1356                        if (last)
1357                                return err;
1358
1359                        break;
1360                }
1361
1362                case OVS_ACTION_ATTR_CT:
1363                        if (!is_flow_key_valid(key)) {
1364                                err = ovs_flow_key_update(skb, key);
1365                                if (err)
1366                                        return err;
1367                        }
1368
1369                        err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1370                                             nla_data(a));
1371
1372                        /* Hide stolen IP fragments from user space. */
1373                        if (err)
1374                                return err == -EINPROGRESS ? 0 : err;
1375                        break;
1376
1377                case OVS_ACTION_ATTR_CT_CLEAR:
1378                        err = ovs_ct_clear(skb, key);
1379                        break;
1380
1381                case OVS_ACTION_ATTR_PUSH_ETH:
1382                        err = push_eth(skb, key, nla_data(a));
1383                        break;
1384
1385                case OVS_ACTION_ATTR_POP_ETH:
1386                        err = pop_eth(skb, key);
1387                        break;
1388
1389                case OVS_ACTION_ATTR_PUSH_NSH: {
1390                        u8 buffer[NSH_HDR_MAX_LEN];
1391                        struct nshhdr *nh = (struct nshhdr *)buffer;
1392
1393                        err = nsh_hdr_from_nlattr(nla_data(a), nh,
1394                                                  NSH_HDR_MAX_LEN);
1395                        if (unlikely(err))
1396                                break;
1397                        err = push_nsh(skb, key, nh);
1398                        break;
1399                }
1400
1401                case OVS_ACTION_ATTR_POP_NSH:
1402                        err = pop_nsh(skb, key);
1403                        break;
1404
1405                case OVS_ACTION_ATTR_METER:
1406                        if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1407                                consume_skb(skb);
1408                                return 0;
1409                        }
1410                        break;
1411
1412                case OVS_ACTION_ATTR_CLONE: {
1413                        bool last = nla_is_last(a, rem);
1414
1415                        err = clone(dp, skb, key, a, last);
1416                        if (last)
1417                                return err;
1418
1419                        break;
1420                }
1421
1422                case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1423                        bool last = nla_is_last(a, rem);
1424
1425                        err = execute_check_pkt_len(dp, skb, key, a, last);
1426                        if (last)
1427                                return err;
1428
1429                        break;
1430                }
1431
1432                case OVS_ACTION_ATTR_DEC_TTL:
1433                        err = execute_dec_ttl(skb, key);
1434                        if (err == -EHOSTUNREACH) {
1435                                err = dec_ttl_exception_handler(dp, skb, key,
1436                                                                a, true);
1437                                return err;
1438                        }
1439                        break;
1440                }
1441
1442                if (unlikely(err)) {
1443                        kfree_skb(skb);
1444                        return err;
1445                }
1446        }
1447
1448        consume_skb(skb);
1449        return 0;
1450}
1451
1452/* Execute the actions on the clone of the packet. The effect of the
1453 * execution does not affect the original 'skb' nor the original 'key'.
1454 *
1455 * The execution may be deferred in case the actions can not be executed
1456 * immediately.
1457 */
1458static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1459                         struct sw_flow_key *key, u32 recirc_id,
1460                         const struct nlattr *actions, int len,
1461                         bool last, bool clone_flow_key)
1462{
1463        struct deferred_action *da;
1464        struct sw_flow_key *clone;
1465
1466        skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1467        if (!skb) {
1468                /* Out of memory, skip this action.
1469                 */
1470                return 0;
1471        }
1472
1473        /* When clone_flow_key is false, the 'key' will not be change
1474         * by the actions, then the 'key' can be used directly.
1475         * Otherwise, try to clone key from the next recursion level of
1476         * 'flow_keys'. If clone is successful, execute the actions
1477         * without deferring.
1478         */
1479        clone = clone_flow_key ? clone_key(key) : key;
1480        if (clone) {
1481                int err = 0;
1482
1483                if (actions) { /* Sample action */
1484                        if (clone_flow_key)
1485                                __this_cpu_inc(exec_actions_level);
1486
1487                        err = do_execute_actions(dp, skb, clone,
1488                                                 actions, len);
1489
1490                        if (clone_flow_key)
1491                                __this_cpu_dec(exec_actions_level);
1492                } else { /* Recirc action */
1493                        clone->recirc_id = recirc_id;
1494                        ovs_dp_process_packet(skb, clone);
1495                }
1496                return err;
1497        }
1498
1499        /* Out of 'flow_keys' space. Defer actions */
1500        da = add_deferred_actions(skb, key, actions, len);
1501        if (da) {
1502                if (!actions) { /* Recirc action */
1503                        key = &da->pkt_key;
1504                        key->recirc_id = recirc_id;
1505                }
1506        } else {
1507                /* Out of per CPU action FIFO space. Drop the 'skb' and
1508                 * log an error.
1509                 */
1510                kfree_skb(skb);
1511
1512                if (net_ratelimit()) {
1513                        if (actions) { /* Sample action */
1514                                pr_warn("%s: deferred action limit reached, drop sample action\n",
1515                                        ovs_dp_name(dp));
1516                        } else {  /* Recirc action */
1517                                pr_warn("%s: deferred action limit reached, drop recirc action\n",
1518                                        ovs_dp_name(dp));
1519                        }
1520                }
1521        }
1522        return 0;
1523}
1524
1525static void process_deferred_actions(struct datapath *dp)
1526{
1527        struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1528
1529        /* Do not touch the FIFO in case there is no deferred actions. */
1530        if (action_fifo_is_empty(fifo))
1531                return;
1532
1533        /* Finishing executing all deferred actions. */
1534        do {
1535                struct deferred_action *da = action_fifo_get(fifo);
1536                struct sk_buff *skb = da->skb;
1537                struct sw_flow_key *key = &da->pkt_key;
1538                const struct nlattr *actions = da->actions;
1539                int actions_len = da->actions_len;
1540
1541                if (actions)
1542                        do_execute_actions(dp, skb, key, actions, actions_len);
1543                else
1544                        ovs_dp_process_packet(skb, key);
1545        } while (!action_fifo_is_empty(fifo));
1546
1547        /* Reset FIFO for the next packet.  */
1548        action_fifo_init(fifo);
1549}
1550
1551/* Execute a list of actions against 'skb'. */
1552int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1553                        const struct sw_flow_actions *acts,
1554                        struct sw_flow_key *key)
1555{
1556        int err, level;
1557
1558        level = __this_cpu_inc_return(exec_actions_level);
1559        if (unlikely(level > OVS_RECURSION_LIMIT)) {
1560                net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1561                                     ovs_dp_name(dp));
1562                kfree_skb(skb);
1563                err = -ENETDOWN;
1564                goto out;
1565        }
1566
1567        OVS_CB(skb)->acts_origlen = acts->orig_len;
1568        err = do_execute_actions(dp, skb, key,
1569                                 acts->actions, acts->actions_len);
1570
1571        if (level == 1)
1572                process_deferred_actions(dp);
1573
1574out:
1575        __this_cpu_dec(exec_actions_level);
1576        return err;
1577}
1578
1579int action_fifos_init(void)
1580{
1581        action_fifos = alloc_percpu(struct action_fifo);
1582        if (!action_fifos)
1583                return -ENOMEM;
1584
1585        flow_keys = alloc_percpu(struct action_flow_keys);
1586        if (!flow_keys) {
1587                free_percpu(action_fifos);
1588                return -ENOMEM;
1589        }
1590
1591        return 0;
1592}
1593
1594void action_fifos_exit(void)
1595{
1596        free_percpu(action_fifos);
1597        free_percpu(flow_keys);
1598}
1599