qemu/hw/net/rocker/rocker_of_dpa.c
<<
>>
Prefs
   1/*
   2 * QEMU rocker switch emulation - OF-DPA flow processing support
   3 *
   4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 * GNU General Public License for more details.
  15 */
  16
  17#include "qemu/osdep.h"
  18#include "net/eth.h"
  19#include "qapi/error.h"
  20#include "qapi/qapi-commands-rocker.h"
  21#include "qemu/iov.h"
  22#include "qemu/timer.h"
  23
  24#include "rocker.h"
  25#include "rocker_hw.h"
  26#include "rocker_fp.h"
  27#include "rocker_tlv.h"
  28#include "rocker_world.h"
  29#include "rocker_desc.h"
  30#include "rocker_of_dpa.h"
  31
  32static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
  33static const MACAddr ff_mac =   { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
  34
  35typedef struct of_dpa {
  36    World *world;
  37    GHashTable *flow_tbl;
  38    GHashTable *group_tbl;
  39    unsigned int flow_tbl_max_size;
  40    unsigned int group_tbl_max_size;
  41} OfDpa;
  42
  43/* flow_key stolen mostly from OVS
  44 *
  45 * Note: fields that compare with network packet header fields
  46 * are stored in network order (BE) to avoid per-packet field
  47 * byte-swaps.
  48 */
  49
  50typedef struct of_dpa_flow_key {
  51    uint32_t in_pport;               /* ingress port */
  52    uint32_t tunnel_id;              /* overlay tunnel id */
  53    uint32_t tbl_id;                 /* table id */
  54    struct {
  55        __be16 vlan_id;              /* 0 if no VLAN */
  56        MACAddr src;                 /* ethernet source address */
  57        MACAddr dst;                 /* ethernet destination address */
  58        __be16 type;                 /* ethernet frame type */
  59    } eth;
  60    struct {
  61        uint8_t proto;               /* IP protocol or ARP opcode */
  62        uint8_t tos;                 /* IP ToS */
  63        uint8_t ttl;                 /* IP TTL/hop limit */
  64        uint8_t frag;                /* one of FRAG_TYPE_* */
  65    } ip;
  66    union {
  67        struct {
  68            struct {
  69                __be32 src;          /* IP source address */
  70                __be32 dst;          /* IP destination address */
  71            } addr;
  72            union {
  73                struct {
  74                    __be16 src;      /* TCP/UDP/SCTP source port */
  75                    __be16 dst;      /* TCP/UDP/SCTP destination port */
  76                    __be16 flags;    /* TCP flags */
  77                } tp;
  78                struct {
  79                    MACAddr sha;     /* ARP source hardware address */
  80                    MACAddr tha;     /* ARP target hardware address */
  81                } arp;
  82            };
  83        } ipv4;
  84        struct {
  85            struct {
  86                Ipv6Addr src;       /* IPv6 source address */
  87                Ipv6Addr dst;       /* IPv6 destination address */
  88            } addr;
  89            __be32 label;            /* IPv6 flow label */
  90            struct {
  91                __be16 src;          /* TCP/UDP/SCTP source port */
  92                __be16 dst;          /* TCP/UDP/SCTP destination port */
  93                __be16 flags;        /* TCP flags */
  94            } tp;
  95            struct {
  96                Ipv6Addr target;    /* ND target address */
  97                MACAddr sll;         /* ND source link layer address */
  98                MACAddr tll;         /* ND target link layer address */
  99            } nd;
 100        } ipv6;
 101    };
 102    int width;                       /* how many uint64_t's in key? */
 103} OfDpaFlowKey;
 104
 105/* Width of key which includes field 'f' in u64s, rounded up */
 106#define FLOW_KEY_WIDTH(f) \
 107    DIV_ROUND_UP(offsetof(OfDpaFlowKey, f) + sizeof_field(OfDpaFlowKey, f), \
 108    sizeof(uint64_t))
 109
 110typedef struct of_dpa_flow_action {
 111    uint32_t goto_tbl;
 112    struct {
 113        uint32_t group_id;
 114        uint32_t tun_log_lport;
 115        __be16 vlan_id;
 116    } write;
 117    struct {
 118        __be16 new_vlan_id;
 119        uint32_t out_pport;
 120        uint8_t copy_to_cpu;
 121        __be16 vlan_id;
 122    } apply;
 123} OfDpaFlowAction;
 124
 125typedef struct of_dpa_flow {
 126    uint32_t lpm;
 127    uint32_t priority;
 128    uint32_t hardtime;
 129    uint32_t idletime;
 130    uint64_t cookie;
 131    OfDpaFlowKey key;
 132    OfDpaFlowKey mask;
 133    OfDpaFlowAction action;
 134    struct {
 135        uint64_t hits;
 136        int64_t install_time;
 137        int64_t refresh_time;
 138        uint64_t rx_pkts;
 139        uint64_t tx_pkts;
 140    } stats;
 141} OfDpaFlow;
 142
 143typedef struct of_dpa_flow_pkt_fields {
 144    uint32_t tunnel_id;
 145    struct eth_header *ethhdr;
 146    __be16 *h_proto;
 147    struct vlan_header *vlanhdr;
 148    struct ip_header *ipv4hdr;
 149    struct ip6_header *ipv6hdr;
 150    Ipv6Addr *ipv6_src_addr;
 151    Ipv6Addr *ipv6_dst_addr;
 152} OfDpaFlowPktFields;
 153
 154typedef struct of_dpa_flow_context {
 155    uint32_t in_pport;
 156    uint32_t tunnel_id;
 157    struct iovec *iov;
 158    int iovcnt;
 159    struct eth_header ethhdr_rewrite;
 160    struct vlan_header vlanhdr_rewrite;
 161    struct vlan_header vlanhdr;
 162    OfDpa *of_dpa;
 163    OfDpaFlowPktFields fields;
 164    OfDpaFlowAction action_set;
 165} OfDpaFlowContext;
 166
 167typedef struct of_dpa_flow_match {
 168    OfDpaFlowKey value;
 169    OfDpaFlow *best;
 170} OfDpaFlowMatch;
 171
 172typedef struct of_dpa_group {
 173    uint32_t id;
 174    union {
 175        struct {
 176            uint32_t out_pport;
 177            uint8_t pop_vlan;
 178        } l2_interface;
 179        struct {
 180            uint32_t group_id;
 181            MACAddr src_mac;
 182            MACAddr dst_mac;
 183            __be16 vlan_id;
 184        } l2_rewrite;
 185        struct {
 186            uint16_t group_count;
 187            uint32_t *group_ids;
 188        } l2_flood;
 189        struct {
 190            uint32_t group_id;
 191            MACAddr src_mac;
 192            MACAddr dst_mac;
 193            __be16 vlan_id;
 194            uint8_t ttl_check;
 195        } l3_unicast;
 196    };
 197} OfDpaGroup;
 198
 199static int of_dpa_mask2prefix(__be32 mask)
 200{
 201    int i;
 202    int count = 32;
 203
 204    for (i = 0; i < 32; i++) {
 205        if (!(ntohl(mask) & ((2 << i) - 1))) {
 206            count--;
 207        }
 208    }
 209
 210    return count;
 211}
 212
 213#if defined(DEBUG_ROCKER)
 214static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask)
 215{
 216    char buf[512], *b = buf, *mac;
 217
 218    b += sprintf(b, " tbl %2d", key->tbl_id);
 219
 220    if (key->in_pport || (mask && mask->in_pport)) {
 221        b += sprintf(b, " in_pport %2d", key->in_pport);
 222        if (mask && mask->in_pport != 0xffffffff) {
 223            b += sprintf(b, "/0x%08x", key->in_pport);
 224        }
 225    }
 226
 227    if (key->tunnel_id || (mask && mask->tunnel_id)) {
 228        b += sprintf(b, " tun %8d", key->tunnel_id);
 229        if (mask && mask->tunnel_id != 0xffffffff) {
 230            b += sprintf(b, "/0x%08x", key->tunnel_id);
 231        }
 232    }
 233
 234    if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) {
 235        b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id));
 236        if (mask && mask->eth.vlan_id != 0xffff) {
 237            b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id));
 238        }
 239    }
 240
 241    if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
 242        (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) {
 243        mac = qemu_mac_strdup_printf(key->eth.src.a);
 244        b += sprintf(b, " src %s", mac);
 245        g_free(mac);
 246        if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
 247            mac = qemu_mac_strdup_printf(mask->eth.src.a);
 248            b += sprintf(b, "/%s", mac);
 249            g_free(mac);
 250        }
 251    }
 252
 253    if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
 254        (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) {
 255        mac = qemu_mac_strdup_printf(key->eth.dst.a);
 256        b += sprintf(b, " dst %s", mac);
 257        g_free(mac);
 258        if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
 259            mac = qemu_mac_strdup_printf(mask->eth.dst.a);
 260            b += sprintf(b, "/%s", mac);
 261            g_free(mac);
 262        }
 263    }
 264
 265    if (key->eth.type || (mask && mask->eth.type)) {
 266        b += sprintf(b, " type 0x%04x", ntohs(key->eth.type));
 267        if (mask && mask->eth.type != 0xffff) {
 268            b += sprintf(b, "/0x%04x", ntohs(mask->eth.type));
 269        }
 270        switch (ntohs(key->eth.type)) {
 271        case 0x0800:
 272        case 0x86dd:
 273            if (key->ip.proto || (mask && mask->ip.proto)) {
 274                b += sprintf(b, " ip proto %2d", key->ip.proto);
 275                if (mask && mask->ip.proto != 0xff) {
 276                    b += sprintf(b, "/0x%02x", mask->ip.proto);
 277                }
 278            }
 279            if (key->ip.tos || (mask && mask->ip.tos)) {
 280                b += sprintf(b, " ip tos %2d", key->ip.tos);
 281                if (mask && mask->ip.tos != 0xff) {
 282                    b += sprintf(b, "/0x%02x", mask->ip.tos);
 283                }
 284            }
 285            break;
 286        }
 287        switch (ntohs(key->eth.type)) {
 288        case 0x0800:
 289            if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) {
 290                b += sprintf(b, " dst %s",
 291                    inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst));
 292                if (mask) {
 293                    b += sprintf(b, "/%d",
 294                                 of_dpa_mask2prefix(mask->ipv4.addr.dst));
 295                }
 296            }
 297            break;
 298        }
 299    }
 300
 301    DPRINTF("%s\n", buf);
 302}
 303#else
 304#define of_dpa_flow_key_dump(k, m)
 305#endif
 306
 307static void _of_dpa_flow_match(void *key, void *value, void *user_data)
 308{
 309    OfDpaFlow *flow = value;
 310    OfDpaFlowMatch *match = user_data;
 311    uint64_t *k = (uint64_t *)&flow->key;
 312    uint64_t *m = (uint64_t *)&flow->mask;
 313    uint64_t *v = (uint64_t *)&match->value;
 314    int i;
 315
 316    if (flow->key.tbl_id == match->value.tbl_id) {
 317        of_dpa_flow_key_dump(&flow->key, &flow->mask);
 318    }
 319
 320    if (flow->key.width > match->value.width) {
 321        return;
 322    }
 323
 324    for (i = 0; i < flow->key.width; i++, k++, m++, v++) {
 325        if ((~*k & *m & *v) | (*k & *m & ~*v)) {
 326            return;
 327        }
 328    }
 329
 330    DPRINTF("match\n");
 331
 332    if (!match->best ||
 333        flow->priority > match->best->priority ||
 334        flow->lpm > match->best->lpm) {
 335        match->best = flow;
 336    }
 337}
 338
 339static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match)
 340{
 341    DPRINTF("\nnew search\n");
 342    of_dpa_flow_key_dump(&match->value, NULL);
 343
 344    g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match);
 345
 346    return match->best;
 347}
 348
 349static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie)
 350{
 351    return g_hash_table_lookup(of_dpa->flow_tbl, &cookie);
 352}
 353
 354static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow)
 355{
 356    g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow);
 357
 358    return ROCKER_OK;
 359}
 360
 361static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow)
 362{
 363    g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie);
 364}
 365
 366static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie)
 367{
 368    OfDpaFlow *flow;
 369    int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
 370
 371    flow = g_new0(OfDpaFlow, 1);
 372
 373    flow->cookie = cookie;
 374    flow->mask.tbl_id = 0xffffffff;
 375
 376    flow->stats.install_time = flow->stats.refresh_time = now;
 377
 378    return flow;
 379}
 380
 381static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc)
 382{
 383    OfDpaFlowPktFields *fields = &fc->fields;
 384
 385    fc->iov[0].iov_base = fields->ethhdr;
 386    fc->iov[0].iov_len = sizeof(struct eth_header);
 387    fc->iov[1].iov_base = fields->vlanhdr;
 388    fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0;
 389}
 390
 391static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
 392                                  const struct iovec *iov, int iovcnt)
 393{
 394    OfDpaFlowPktFields *fields = &fc->fields;
 395    size_t sofar = 0;
 396    int i;
 397
 398    sofar += sizeof(struct eth_header);
 399    if (iov->iov_len < sofar) {
 400        DPRINTF("flow_pkt_parse underrun on eth_header\n");
 401        return;
 402    }
 403
 404    fields->ethhdr = iov->iov_base;
 405    fields->h_proto = &fields->ethhdr->h_proto;
 406
 407    if (ntohs(*fields->h_proto) == ETH_P_VLAN) {
 408        sofar += sizeof(struct vlan_header);
 409        if (iov->iov_len < sofar) {
 410            DPRINTF("flow_pkt_parse underrun on vlan_header\n");
 411            return;
 412        }
 413        fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1);
 414        fields->h_proto = &fields->vlanhdr->h_proto;
 415    }
 416
 417    switch (ntohs(*fields->h_proto)) {
 418    case ETH_P_IP:
 419        sofar += sizeof(struct ip_header);
 420        if (iov->iov_len < sofar) {
 421            DPRINTF("flow_pkt_parse underrun on ip_header\n");
 422            return;
 423        }
 424        fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1);
 425        break;
 426    case ETH_P_IPV6:
 427        sofar += sizeof(struct ip6_header);
 428        if (iov->iov_len < sofar) {
 429            DPRINTF("flow_pkt_parse underrun on ip6_header\n");
 430            return;
 431        }
 432        fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1);
 433        break;
 434    }
 435
 436    /* To facilitate (potential) VLAN tag insertion, Make a
 437     * copy of the iov and insert two new vectors at the
 438     * beginning for eth hdr and vlan hdr.  No data is copied,
 439     * just the vectors.
 440     */
 441
 442    of_dpa_flow_pkt_hdr_reset(fc);
 443
 444    fc->iov[2].iov_base = fields->h_proto + 1;
 445    fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len;
 446
 447    for (i = 1; i < iovcnt; i++) {
 448        fc->iov[i+2] = iov[i];
 449    }
 450
 451    fc->iovcnt = iovcnt + 2;
 452}
 453
 454static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
 455{
 456    OfDpaFlowPktFields *fields = &fc->fields;
 457    uint16_t h_proto = fields->ethhdr->h_proto;
 458
 459    if (fields->vlanhdr) {
 460        DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
 461        return;
 462    }
 463
 464    fields->ethhdr->h_proto = htons(ETH_P_VLAN);
 465    fields->vlanhdr = &fc->vlanhdr;
 466    fields->vlanhdr->h_tci = vlan_id;
 467    fields->vlanhdr->h_proto = h_proto;
 468    fields->h_proto = &fields->vlanhdr->h_proto;
 469
 470    fc->iov[1].iov_base = fields->vlanhdr;
 471    fc->iov[1].iov_len = sizeof(struct vlan_header);
 472}
 473
 474static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
 475{
 476    OfDpaFlowPktFields *fields = &fc->fields;
 477
 478    if (!fields->vlanhdr) {
 479        return;
 480    }
 481
 482    fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto);
 483    fc->iov[1].iov_base = fields->h_proto;
 484    fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto);
 485}
 486
 487static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
 488                                        uint8_t *src_mac, uint8_t *dst_mac,
 489                                        __be16 vlan_id)
 490{
 491    OfDpaFlowPktFields *fields = &fc->fields;
 492
 493    if (src_mac || dst_mac) {
 494        memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header));
 495        if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) {
 496            memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN);
 497        }
 498        if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) {
 499            memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN);
 500        }
 501        fc->iov[0].iov_base = &fc->ethhdr_rewrite;
 502    }
 503
 504    if (vlan_id && fields->vlanhdr) {
 505        fc->vlanhdr_rewrite = fc->vlanhdr;
 506        fc->vlanhdr_rewrite.h_tci = vlan_id;
 507        fc->iov[1].iov_base = &fc->vlanhdr_rewrite;
 508    }
 509}
 510
 511static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id);
 512
 513static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc,
 514                                       OfDpaFlowMatch *match)
 515{
 516    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
 517    match->value.in_pport = fc->in_pport;
 518    match->value.width = FLOW_KEY_WIDTH(tbl_id);
 519}
 520
 521static void of_dpa_ig_port_miss(OfDpaFlowContext *fc)
 522{
 523    uint32_t port;
 524
 525    /* The default on miss is for packets from physical ports
 526     * to go to the VLAN Flow Table. There is no default rule
 527     * for packets from logical ports, which are dropped on miss.
 528     */
 529
 530    if (fp_port_from_pport(fc->in_pport, &port)) {
 531        of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN);
 532    }
 533}
 534
 535static void of_dpa_vlan_build_match(OfDpaFlowContext *fc,
 536                                    OfDpaFlowMatch *match)
 537{
 538    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
 539    match->value.in_pport = fc->in_pport;
 540    if (fc->fields.vlanhdr) {
 541        match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 542    }
 543    match->value.width = FLOW_KEY_WIDTH(eth.vlan_id);
 544}
 545
 546static void of_dpa_vlan_insert(OfDpaFlowContext *fc,
 547                               OfDpaFlow *flow)
 548{
 549    if (flow->action.apply.new_vlan_id) {
 550        of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id);
 551    }
 552}
 553
 554static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc,
 555                                        OfDpaFlowMatch *match)
 556{
 557    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
 558    match->value.in_pport = fc->in_pport;
 559    match->value.eth.type = *fc->fields.h_proto;
 560    match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 561    memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
 562           sizeof(match->value.eth.dst.a));
 563    match->value.width = FLOW_KEY_WIDTH(eth.type);
 564}
 565
 566static void of_dpa_term_mac_miss(OfDpaFlowContext *fc)
 567{
 568    of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING);
 569}
 570
 571static void of_dpa_apply_actions(OfDpaFlowContext *fc,
 572                                 OfDpaFlow *flow)
 573{
 574    fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu;
 575    fc->action_set.apply.vlan_id = flow->key.eth.vlan_id;
 576}
 577
 578static void of_dpa_bridging_build_match(OfDpaFlowContext *fc,
 579                                        OfDpaFlowMatch *match)
 580{
 581    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
 582    if (fc->fields.vlanhdr) {
 583        match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 584    } else if (fc->tunnel_id) {
 585        match->value.tunnel_id = fc->tunnel_id;
 586    }
 587    memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
 588           sizeof(match->value.eth.dst.a));
 589    match->value.width = FLOW_KEY_WIDTH(eth.dst);
 590}
 591
 592static void of_dpa_bridging_learn(OfDpaFlowContext *fc,
 593                                  OfDpaFlow *dst_flow)
 594{
 595    OfDpaFlowMatch match = { { 0, }, };
 596    OfDpaFlow *flow;
 597    uint8_t *addr;
 598    uint16_t vlan_id;
 599    int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
 600    int64_t refresh_delay = 1;
 601
 602    /* Do a lookup in bridge table by src_mac/vlan */
 603
 604    addr = fc->fields.ethhdr->h_source;
 605    vlan_id = fc->fields.vlanhdr->h_tci;
 606
 607    match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
 608    match.value.eth.vlan_id = vlan_id;
 609    memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a));
 610    match.value.width = FLOW_KEY_WIDTH(eth.dst);
 611
 612    flow = of_dpa_flow_match(fc->of_dpa, &match);
 613    if (flow) {
 614        if (!memcmp(flow->mask.eth.dst.a, ff_mac.a,
 615                    sizeof(flow->mask.eth.dst.a))) {
 616            /* src_mac/vlan already learned; if in_port and out_port
 617             * don't match, the end station has moved and the port
 618             * needs updating */
 619            /* XXX implement the in_port/out_port check */
 620            if (now - flow->stats.refresh_time < refresh_delay) {
 621                return;
 622            }
 623            flow->stats.refresh_time = now;
 624        }
 625    }
 626
 627    /* Let driver know about mac/vlan.  This may be a new mac/vlan
 628     * or a refresh of existing mac/vlan that's been hit after the
 629     * refresh_delay.
 630     */
 631
 632    rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world),
 633                               fc->in_pport, addr, vlan_id);
 634}
 635
 636static void of_dpa_bridging_miss(OfDpaFlowContext *fc)
 637{
 638    of_dpa_bridging_learn(fc, NULL);
 639    of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
 640}
 641
 642static void of_dpa_bridging_action_write(OfDpaFlowContext *fc,
 643                                         OfDpaFlow *flow)
 644{
 645    if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
 646        fc->action_set.write.group_id = flow->action.write.group_id;
 647    }
 648    fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport;
 649}
 650
 651static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc,
 652                                               OfDpaFlowMatch *match)
 653{
 654    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
 655    match->value.eth.type = *fc->fields.h_proto;
 656    if (fc->fields.ipv4hdr) {
 657        match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
 658    }
 659    if (fc->fields.ipv6_dst_addr) {
 660        memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
 661               sizeof(match->value.ipv6.addr.dst));
 662    }
 663    match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
 664}
 665
 666static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc)
 667{
 668    of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
 669}
 670
 671static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc,
 672                                                OfDpaFlow *flow)
 673{
 674    if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
 675        fc->action_set.write.group_id = flow->action.write.group_id;
 676    }
 677}
 678
 679static void
 680of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc,
 681                                     OfDpaFlowMatch *match)
 682{
 683    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
 684    match->value.eth.type = *fc->fields.h_proto;
 685    match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 686    if (fc->fields.ipv4hdr) {
 687        match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src;
 688        match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
 689    }
 690    if (fc->fields.ipv6_src_addr) {
 691        memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr,
 692               sizeof(match->value.ipv6.addr.src));
 693    }
 694    if (fc->fields.ipv6_dst_addr) {
 695        memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
 696               sizeof(match->value.ipv6.addr.dst));
 697    }
 698    match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
 699}
 700
 701static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc)
 702{
 703    of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
 704}
 705
 706static void
 707of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc,
 708                                      OfDpaFlow *flow)
 709{
 710    if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
 711        fc->action_set.write.group_id = flow->action.write.group_id;
 712    }
 713    fc->action_set.write.vlan_id = flow->action.write.vlan_id;
 714}
 715
 716static void of_dpa_acl_build_match(OfDpaFlowContext *fc,
 717                                   OfDpaFlowMatch *match)
 718{
 719    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
 720    match->value.in_pport = fc->in_pport;
 721    memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source,
 722           sizeof(match->value.eth.src.a));
 723    memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
 724           sizeof(match->value.eth.dst.a));
 725    match->value.eth.type = *fc->fields.h_proto;
 726    match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 727    match->value.width = FLOW_KEY_WIDTH(eth.type);
 728    if (fc->fields.ipv4hdr) {
 729        match->value.ip.proto = fc->fields.ipv4hdr->ip_p;
 730        match->value.ip.tos = fc->fields.ipv4hdr->ip_tos;
 731        match->value.width = FLOW_KEY_WIDTH(ip.tos);
 732    } else if (fc->fields.ipv6hdr) {
 733        match->value.ip.proto =
 734            fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt;
 735        match->value.ip.tos = 0; /* XXX what goes here? */
 736        match->value.width = FLOW_KEY_WIDTH(ip.tos);
 737    }
 738}
 739
 740static void of_dpa_eg(OfDpaFlowContext *fc);
 741static void of_dpa_acl_hit(OfDpaFlowContext *fc,
 742                           OfDpaFlow *dst_flow)
 743{
 744    of_dpa_eg(fc);
 745}
 746
 747static void of_dpa_acl_action_write(OfDpaFlowContext *fc,
 748                                    OfDpaFlow *flow)
 749{
 750    if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
 751        fc->action_set.write.group_id = flow->action.write.group_id;
 752    }
 753}
 754
 755static void of_dpa_drop(OfDpaFlowContext *fc)
 756{
 757    /* drop packet */
 758}
 759
 760static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa,
 761                                              uint32_t group_id)
 762{
 763    return g_hash_table_lookup(of_dpa->group_tbl, &group_id);
 764}
 765
 766static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group)
 767{
 768    g_hash_table_insert(of_dpa->group_tbl, &group->id, group);
 769
 770    return 0;
 771}
 772
 773#if 0
 774static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group)
 775{
 776    OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id);
 777
 778    if (!old_group) {
 779        return -ENOENT;
 780    }
 781
 782    /* XXX */
 783
 784    return 0;
 785}
 786#endif
 787
 788static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group)
 789{
 790    g_hash_table_remove(of_dpa->group_tbl, &group->id);
 791
 792    return 0;
 793}
 794
 795#if 0
 796static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id)
 797{
 798    OfDpaGroup *group = of_dpa_group_find(of_dpa, id);
 799
 800    if (!group) {
 801        return -ENOENT;
 802    }
 803
 804    /* XXX get/return stats */
 805
 806    return 0;
 807}
 808#endif
 809
 810static OfDpaGroup *of_dpa_group_alloc(uint32_t id)
 811{
 812    OfDpaGroup *group = g_new0(OfDpaGroup, 1);
 813
 814    group->id = id;
 815
 816    return group;
 817}
 818
 819static void of_dpa_output_l2_interface(OfDpaFlowContext *fc,
 820                                       OfDpaGroup *group)
 821{
 822    uint8_t copy_to_cpu = fc->action_set.apply.copy_to_cpu;
 823
 824    if (group->l2_interface.pop_vlan) {
 825        of_dpa_flow_pkt_strip_vlan(fc);
 826    }
 827
 828    /* Note: By default, and as per the OpenFlow 1.3.1
 829     * specification, a packet cannot be forwarded back
 830     * to the IN_PORT from which it came in. An action
 831     * bucket that specifies the particular packet's
 832     * egress port is not evaluated.
 833     */
 834
 835    if (group->l2_interface.out_pport == 0) {
 836        rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt,
 837                   copy_to_cpu);
 838    } else if (group->l2_interface.out_pport != fc->in_pport) {
 839        rocker_port_eg(world_rocker(fc->of_dpa->world),
 840                       group->l2_interface.out_pport,
 841                       fc->iov, fc->iovcnt);
 842    }
 843}
 844
 845static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc,
 846                                     OfDpaGroup *group)
 847{
 848    OfDpaGroup *l2_group =
 849        of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id);
 850
 851    if (!l2_group) {
 852        return;
 853    }
 854
 855    of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a,
 856                         group->l2_rewrite.dst_mac.a,
 857                         group->l2_rewrite.vlan_id);
 858    of_dpa_output_l2_interface(fc, l2_group);
 859}
 860
 861static void of_dpa_output_l2_flood(OfDpaFlowContext *fc,
 862                                   OfDpaGroup *group)
 863{
 864    OfDpaGroup *l2_group;
 865    int i;
 866
 867    for (i = 0; i < group->l2_flood.group_count; i++) {
 868        of_dpa_flow_pkt_hdr_reset(fc);
 869        l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]);
 870        if (!l2_group) {
 871            continue;
 872        }
 873        switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) {
 874        case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
 875            of_dpa_output_l2_interface(fc, l2_group);
 876            break;
 877        case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
 878            of_dpa_output_l2_rewrite(fc, l2_group);
 879            break;
 880        }
 881    }
 882}
 883
 884static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group)
 885{
 886    OfDpaGroup *l2_group =
 887        of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id);
 888
 889    if (!l2_group) {
 890        return;
 891    }
 892
 893    of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a,
 894                                group->l3_unicast.dst_mac.a,
 895                                group->l3_unicast.vlan_id);
 896    /* XXX need ttl_check */
 897    of_dpa_output_l2_interface(fc, l2_group);
 898}
 899
 900static void of_dpa_eg(OfDpaFlowContext *fc)
 901{
 902    OfDpaFlowAction *set = &fc->action_set;
 903    OfDpaGroup *group;
 904    uint32_t group_id;
 905
 906    /* send a copy of pkt to CPU (controller)? */
 907
 908    if (set->apply.copy_to_cpu) {
 909        group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0);
 910        group = of_dpa_group_find(fc->of_dpa, group_id);
 911        if (group) {
 912            of_dpa_output_l2_interface(fc, group);
 913            of_dpa_flow_pkt_hdr_reset(fc);
 914        }
 915    }
 916
 917    /* process group write actions */
 918
 919    if (!set->write.group_id) {
 920        return;
 921    }
 922
 923    group = of_dpa_group_find(fc->of_dpa, set->write.group_id);
 924    if (!group) {
 925        return;
 926    }
 927
 928    switch (ROCKER_GROUP_TYPE_GET(group->id)) {
 929    case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
 930        of_dpa_output_l2_interface(fc, group);
 931        break;
 932    case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
 933        of_dpa_output_l2_rewrite(fc, group);
 934        break;
 935    case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
 936    case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
 937        of_dpa_output_l2_flood(fc, group);
 938        break;
 939    case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
 940        of_dpa_output_l3_unicast(fc, group);
 941        break;
 942    }
 943}
 944
 945typedef struct of_dpa_flow_tbl_ops {
 946    void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match);
 947    void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow);
 948    void (*miss)(OfDpaFlowContext *fc);
 949    void (*hit_no_goto)(OfDpaFlowContext *fc);
 950    void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow);
 951    void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow);
 952} OfDpaFlowTblOps;
 953
 954static OfDpaFlowTblOps of_dpa_tbl_ops[] = {
 955    [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = {
 956        .build_match = of_dpa_ig_port_build_match,
 957        .miss = of_dpa_ig_port_miss,
 958        .hit_no_goto = of_dpa_drop,
 959    },
 960    [ROCKER_OF_DPA_TABLE_ID_VLAN] = {
 961        .build_match = of_dpa_vlan_build_match,
 962        .hit_no_goto = of_dpa_drop,
 963        .action_apply = of_dpa_vlan_insert,
 964    },
 965    [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = {
 966        .build_match = of_dpa_term_mac_build_match,
 967        .miss = of_dpa_term_mac_miss,
 968        .hit_no_goto = of_dpa_drop,
 969        .action_apply = of_dpa_apply_actions,
 970    },
 971    [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = {
 972        .build_match = of_dpa_bridging_build_match,
 973        .hit = of_dpa_bridging_learn,
 974        .miss = of_dpa_bridging_miss,
 975        .hit_no_goto = of_dpa_drop,
 976        .action_apply = of_dpa_apply_actions,
 977        .action_write = of_dpa_bridging_action_write,
 978    },
 979    [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = {
 980        .build_match = of_dpa_unicast_routing_build_match,
 981        .miss = of_dpa_unicast_routing_miss,
 982        .hit_no_goto = of_dpa_drop,
 983        .action_write = of_dpa_unicast_routing_action_write,
 984    },
 985    [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = {
 986        .build_match = of_dpa_multicast_routing_build_match,
 987        .miss = of_dpa_multicast_routing_miss,
 988        .hit_no_goto = of_dpa_drop,
 989        .action_write = of_dpa_multicast_routing_action_write,
 990    },
 991    [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = {
 992        .build_match = of_dpa_acl_build_match,
 993        .hit = of_dpa_acl_hit,
 994        .miss = of_dpa_eg,
 995        .action_apply = of_dpa_apply_actions,
 996        .action_write = of_dpa_acl_action_write,
 997    },
 998};
 999
1000static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id)
1001{
1002    OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id];
1003    OfDpaFlowMatch match = { { 0, }, };
1004    OfDpaFlow *flow;
1005
1006    if (ops->build_match) {
1007        ops->build_match(fc, &match);
1008    } else {
1009        return;
1010    }
1011
1012    flow = of_dpa_flow_match(fc->of_dpa, &match);
1013    if (!flow) {
1014        if (ops->miss) {
1015            ops->miss(fc);
1016        }
1017        return;
1018    }
1019
1020    flow->stats.hits++;
1021
1022    if (ops->action_apply) {
1023        ops->action_apply(fc, flow);
1024    }
1025
1026    if (ops->action_write) {
1027        ops->action_write(fc, flow);
1028    }
1029
1030    if (ops->hit) {
1031        ops->hit(fc, flow);
1032    }
1033
1034    if (flow->action.goto_tbl) {
1035        of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl);
1036    } else if (ops->hit_no_goto) {
1037        ops->hit_no_goto(fc);
1038    }
1039
1040    /* drop packet */
1041}
1042
1043static ssize_t of_dpa_ig(World *world, uint32_t pport,
1044                         const struct iovec *iov, int iovcnt)
1045{
1046    struct iovec iov_copy[iovcnt + 2];
1047    OfDpaFlowContext fc = {
1048        .of_dpa = world_private(world),
1049        .in_pport = pport,
1050        .iov = iov_copy,
1051        .iovcnt = iovcnt + 2,
1052    };
1053
1054    of_dpa_flow_pkt_parse(&fc, iov, iovcnt);
1055    of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT);
1056
1057    return iov_size(iov, iovcnt);
1058}
1059
1060#define ROCKER_TUNNEL_LPORT 0x00010000
1061
1062static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1063{
1064    OfDpaFlowKey *key = &flow->key;
1065    OfDpaFlowKey *mask = &flow->mask;
1066    OfDpaFlowAction *action = &flow->action;
1067    bool overlay_tunnel;
1068
1069    if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1070        !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1071        return -ROCKER_EINVAL;
1072    }
1073
1074    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
1075    key->width = FLOW_KEY_WIDTH(tbl_id);
1076
1077    key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1078    if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1079        mask->in_pport =
1080            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1081    }
1082
1083    overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT);
1084
1085    action->goto_tbl =
1086        rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1087
1088    if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) {
1089        return -ROCKER_EINVAL;
1090    }
1091
1092    if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) {
1093        return -ROCKER_EINVAL;
1094    }
1095
1096    return ROCKER_OK;
1097}
1098
1099static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1100{
1101    OfDpaFlowKey *key = &flow->key;
1102    OfDpaFlowKey *mask = &flow->mask;
1103    OfDpaFlowAction *action = &flow->action;
1104    uint32_t port;
1105    bool untagged;
1106
1107    if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1108        !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1109        DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1110        return -ROCKER_EINVAL;
1111    }
1112
1113    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
1114    key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1115
1116    key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1117    if (!fp_port_from_pport(key->in_pport, &port)) {
1118        DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport);
1119        return -ROCKER_EINVAL;
1120    }
1121    mask->in_pport = 0xffffffff;
1122
1123    key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1124
1125    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1126        mask->eth.vlan_id =
1127            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1128    }
1129
1130    if (key->eth.vlan_id) {
1131        untagged = false; /* filtering */
1132    } else {
1133        untagged = true;
1134    }
1135
1136    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1137        action->goto_tbl =
1138            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1139        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1140            DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl);
1141            return -ROCKER_EINVAL;
1142        }
1143    }
1144
1145    if (untagged) {
1146        if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) {
1147            DPRINTF("Must specify new vlan_id if untagged\n");
1148            return -ROCKER_EINVAL;
1149        }
1150        action->apply.new_vlan_id =
1151            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]);
1152        if (1 > ntohs(action->apply.new_vlan_id) ||
1153            ntohs(action->apply.new_vlan_id) > 4095) {
1154            DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1155                    ntohs(action->apply.new_vlan_id));
1156            return -ROCKER_EINVAL;
1157        }
1158    }
1159
1160    return ROCKER_OK;
1161}
1162
1163static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1164{
1165    OfDpaFlowKey *key = &flow->key;
1166    OfDpaFlowKey *mask = &flow->mask;
1167    OfDpaFlowAction *action = &flow->action;
1168    const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1169    const MACAddr ipv4_mask =  { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1170    const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1171    const MACAddr ipv6_mask =  { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1172    uint32_t port;
1173    bool unicast = false;
1174    bool multicast = false;
1175
1176    if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1177        !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] ||
1178        !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1179        !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] ||
1180        !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] ||
1181        !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] ||
1182        !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1183        return -ROCKER_EINVAL;
1184    }
1185
1186    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1187    key->width = FLOW_KEY_WIDTH(eth.type);
1188
1189    key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1190    if (!fp_port_from_pport(key->in_pport, &port)) {
1191        return -ROCKER_EINVAL;
1192    }
1193    mask->in_pport =
1194        rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1195
1196    key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1197    if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) {
1198        return -ROCKER_EINVAL;
1199    }
1200    mask->eth.type = htons(0xffff);
1201
1202    memcpy(key->eth.dst.a,
1203           rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1204           sizeof(key->eth.dst.a));
1205    memcpy(mask->eth.dst.a,
1206           rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1207           sizeof(mask->eth.dst.a));
1208
1209    if ((key->eth.dst.a[0] & 0x01) == 0x00) {
1210        unicast = true;
1211    }
1212
1213    /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1214    if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1215        memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1216        multicast = true;
1217    }
1218    if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1219        memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1220        multicast = true;
1221    }
1222
1223    if (!unicast && !multicast) {
1224        return -ROCKER_EINVAL;
1225    }
1226
1227    key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1228    mask->eth.vlan_id =
1229        rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1230
1231    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1232        action->goto_tbl =
1233            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1234
1235        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING &&
1236            action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1237            return -ROCKER_EINVAL;
1238        }
1239
1240        if (unicast &&
1241            action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) {
1242            return -ROCKER_EINVAL;
1243        }
1244
1245        if (multicast &&
1246            action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1247            return -ROCKER_EINVAL;
1248        }
1249    }
1250
1251    if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1252        action->apply.copy_to_cpu =
1253            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1254    }
1255
1256    return ROCKER_OK;
1257}
1258
1259static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1260{
1261    OfDpaFlowKey *key = &flow->key;
1262    OfDpaFlowKey *mask = &flow->mask;
1263    OfDpaFlowAction *action = &flow->action;
1264    bool unicast = false;
1265    bool dst_mac = false;
1266    bool dst_mac_mask = false;
1267    enum {
1268        BRIDGING_MODE_UNKNOWN,
1269        BRIDGING_MODE_VLAN_UCAST,
1270        BRIDGING_MODE_VLAN_MCAST,
1271        BRIDGING_MODE_VLAN_DFLT,
1272        BRIDGING_MODE_TUNNEL_UCAST,
1273        BRIDGING_MODE_TUNNEL_MCAST,
1274        BRIDGING_MODE_TUNNEL_DFLT,
1275    } mode = BRIDGING_MODE_UNKNOWN;
1276
1277    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1278
1279    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1280        key->eth.vlan_id =
1281            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1282        mask->eth.vlan_id = 0xffff;
1283        key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1284    }
1285
1286    if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1287        key->tunnel_id =
1288            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]);
1289        mask->tunnel_id = 0xffffffff;
1290        key->width = FLOW_KEY_WIDTH(tunnel_id);
1291    }
1292
1293    /* can't do VLAN bridging and tunnel bridging at same time */
1294    if (key->eth.vlan_id && key->tunnel_id) {
1295        DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1296        return -ROCKER_EINVAL;
1297    }
1298
1299    if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1300        memcpy(key->eth.dst.a,
1301               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1302               sizeof(key->eth.dst.a));
1303        key->width = FLOW_KEY_WIDTH(eth.dst);
1304        dst_mac = true;
1305        unicast = (key->eth.dst.a[0] & 0x01) == 0x00;
1306    }
1307
1308    if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1309        memcpy(mask->eth.dst.a,
1310               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1311               sizeof(mask->eth.dst.a));
1312        key->width = FLOW_KEY_WIDTH(eth.dst);
1313        dst_mac_mask = true;
1314    } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1315        memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a));
1316    }
1317
1318    if (key->eth.vlan_id) {
1319        if (dst_mac && !dst_mac_mask) {
1320            mode = unicast ? BRIDGING_MODE_VLAN_UCAST :
1321                             BRIDGING_MODE_VLAN_MCAST;
1322        } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1323            mode = BRIDGING_MODE_VLAN_DFLT;
1324        }
1325    } else if (key->tunnel_id) {
1326        if (dst_mac && !dst_mac_mask) {
1327            mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST :
1328                             BRIDGING_MODE_TUNNEL_MCAST;
1329        } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1330            mode = BRIDGING_MODE_TUNNEL_DFLT;
1331        }
1332    }
1333
1334    if (mode == BRIDGING_MODE_UNKNOWN) {
1335        DPRINTF("Unknown bridging mode\n");
1336        return -ROCKER_EINVAL;
1337    }
1338
1339    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1340        action->goto_tbl =
1341            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1342        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1343            DPRINTF("Briding goto tbl must be ACL policy\n");
1344            return -ROCKER_EINVAL;
1345        }
1346    }
1347
1348    if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1349        action->write.group_id =
1350            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1351        switch (mode) {
1352        case BRIDGING_MODE_VLAN_UCAST:
1353            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1354                ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1355                DPRINTF("Bridging mode vlan ucast needs L2 "
1356                        "interface group (0x%08x)\n",
1357                        action->write.group_id);
1358                return -ROCKER_EINVAL;
1359            }
1360            break;
1361        case BRIDGING_MODE_VLAN_MCAST:
1362            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1363                ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) {
1364                DPRINTF("Bridging mode vlan mcast needs L2 "
1365                        "mcast group (0x%08x)\n",
1366                        action->write.group_id);
1367                return -ROCKER_EINVAL;
1368            }
1369            break;
1370        case BRIDGING_MODE_VLAN_DFLT:
1371            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1372                ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) {
1373                DPRINTF("Bridging mode vlan dflt needs L2 "
1374                        "flood group (0x%08x)\n",
1375                        action->write.group_id);
1376                return -ROCKER_EINVAL;
1377            }
1378            break;
1379        case BRIDGING_MODE_TUNNEL_MCAST:
1380            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1381                ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1382                DPRINTF("Bridging mode tunnel mcast needs L2 "
1383                        "overlay group (0x%08x)\n",
1384                        action->write.group_id);
1385                return -ROCKER_EINVAL;
1386            }
1387            break;
1388        case BRIDGING_MODE_TUNNEL_DFLT:
1389            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1390                ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1391                DPRINTF("Bridging mode tunnel dflt needs L2 "
1392                        "overlay group (0x%08x)\n",
1393                        action->write.group_id);
1394                return -ROCKER_EINVAL;
1395            }
1396            break;
1397        default:
1398            return -ROCKER_EINVAL;
1399        }
1400    }
1401
1402    if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) {
1403        action->write.tun_log_lport =
1404            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]);
1405        if (mode != BRIDGING_MODE_TUNNEL_UCAST) {
1406            DPRINTF("Have tunnel logical port but not "
1407                    "in bridging tunnel mode\n");
1408            return -ROCKER_EINVAL;
1409        }
1410    }
1411
1412    if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1413        action->apply.copy_to_cpu =
1414            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1415    }
1416
1417    return ROCKER_OK;
1418}
1419
1420static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow,
1421                                          RockerTlv **flow_tlvs)
1422{
1423    OfDpaFlowKey *key = &flow->key;
1424    OfDpaFlowKey *mask = &flow->mask;
1425    OfDpaFlowAction *action = &flow->action;
1426    enum {
1427        UNICAST_ROUTING_MODE_UNKNOWN,
1428        UNICAST_ROUTING_MODE_IPV4,
1429        UNICAST_ROUTING_MODE_IPV6,
1430    } mode = UNICAST_ROUTING_MODE_UNKNOWN;
1431    uint8_t type;
1432
1433    if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1434        return -ROCKER_EINVAL;
1435    }
1436
1437    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1438    key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1439
1440    key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1441    switch (ntohs(key->eth.type)) {
1442    case 0x0800:
1443        mode = UNICAST_ROUTING_MODE_IPV4;
1444        break;
1445    case 0x86dd:
1446        mode = UNICAST_ROUTING_MODE_IPV6;
1447        break;
1448    default:
1449        return -ROCKER_EINVAL;
1450    }
1451    mask->eth.type = htons(0xffff);
1452
1453    switch (mode) {
1454    case UNICAST_ROUTING_MODE_IPV4:
1455        if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1456            return -ROCKER_EINVAL;
1457        }
1458        key->ipv4.addr.dst =
1459            rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1460        if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1461            return -ROCKER_EINVAL;
1462        }
1463        flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff));
1464        if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) {
1465            mask->ipv4.addr.dst =
1466                rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]);
1467            flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst);
1468        }
1469        break;
1470    case UNICAST_ROUTING_MODE_IPV6:
1471        if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1472            return -ROCKER_EINVAL;
1473        }
1474        memcpy(&key->ipv6.addr.dst,
1475               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1476               sizeof(key->ipv6.addr.dst));
1477        if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1478            return -ROCKER_EINVAL;
1479        }
1480        if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) {
1481            memcpy(&mask->ipv6.addr.dst,
1482                   rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]),
1483                   sizeof(mask->ipv6.addr.dst));
1484        }
1485        break;
1486    default:
1487        return -ROCKER_EINVAL;
1488    }
1489
1490    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1491        action->goto_tbl =
1492            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1493        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1494            return -ROCKER_EINVAL;
1495        }
1496    }
1497
1498    if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1499        action->write.group_id =
1500            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1501        type = ROCKER_GROUP_TYPE_GET(action->write.group_id);
1502        if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE &&
1503            type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST &&
1504            type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) {
1505            return -ROCKER_EINVAL;
1506        }
1507    }
1508
1509    return ROCKER_OK;
1510}
1511
1512static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
1513                                            RockerTlv **flow_tlvs)
1514{
1515    OfDpaFlowKey *key = &flow->key;
1516    OfDpaFlowKey *mask = &flow->mask;
1517    OfDpaFlowAction *action = &flow->action;
1518    enum {
1519        MULTICAST_ROUTING_MODE_UNKNOWN,
1520        MULTICAST_ROUTING_MODE_IPV4,
1521        MULTICAST_ROUTING_MODE_IPV6,
1522    } mode = MULTICAST_ROUTING_MODE_UNKNOWN;
1523
1524    if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1525        !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1526        return -ROCKER_EINVAL;
1527    }
1528
1529    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
1530    key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1531
1532    key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1533    switch (ntohs(key->eth.type)) {
1534    case 0x0800:
1535        mode = MULTICAST_ROUTING_MODE_IPV4;
1536        break;
1537    case 0x86dd:
1538        mode = MULTICAST_ROUTING_MODE_IPV6;
1539        break;
1540    default:
1541        return -ROCKER_EINVAL;
1542    }
1543
1544    key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1545
1546    switch (mode) {
1547    case MULTICAST_ROUTING_MODE_IPV4:
1548
1549        if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1550            key->ipv4.addr.src =
1551                rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]);
1552        }
1553
1554        if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) {
1555            mask->ipv4.addr.src =
1556                rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]);
1557        }
1558
1559        if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1560            if (mask->ipv4.addr.src != 0) {
1561                return -ROCKER_EINVAL;
1562            }
1563        }
1564
1565        if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1566            return -ROCKER_EINVAL;
1567        }
1568
1569        key->ipv4.addr.dst =
1570            rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1571        if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1572            return -ROCKER_EINVAL;
1573        }
1574
1575        break;
1576
1577    case MULTICAST_ROUTING_MODE_IPV6:
1578
1579        if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1580            memcpy(&key->ipv6.addr.src,
1581                   rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]),
1582                   sizeof(key->ipv6.addr.src));
1583        }
1584
1585        if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) {
1586            memcpy(&mask->ipv6.addr.src,
1587                   rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]),
1588                   sizeof(mask->ipv6.addr.src));
1589        }
1590
1591        if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1592            if (mask->ipv6.addr.src.addr32[0] != 0 &&
1593                mask->ipv6.addr.src.addr32[1] != 0 &&
1594                mask->ipv6.addr.src.addr32[2] != 0 &&
1595                mask->ipv6.addr.src.addr32[3] != 0) {
1596                return -ROCKER_EINVAL;
1597            }
1598        }
1599
1600        if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1601            return -ROCKER_EINVAL;
1602        }
1603
1604        memcpy(&key->ipv6.addr.dst,
1605               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1606               sizeof(key->ipv6.addr.dst));
1607        if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1608            return -ROCKER_EINVAL;
1609        }
1610
1611        break;
1612
1613    default:
1614        return -ROCKER_EINVAL;
1615    }
1616
1617    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1618        action->goto_tbl =
1619            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1620        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1621            return -ROCKER_EINVAL;
1622        }
1623    }
1624
1625    if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1626        action->write.group_id =
1627            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1628        if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1629            ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) {
1630            return -ROCKER_EINVAL;
1631        }
1632        action->write.vlan_id = key->eth.vlan_id;
1633    }
1634
1635    return ROCKER_OK;
1636}
1637
1638static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
1639                                 RockerTlv **flow_tlvs)
1640{
1641    key->width = FLOW_KEY_WIDTH(ip.tos);
1642
1643    key->ip.proto = 0;
1644    key->ip.tos = 0;
1645    mask->ip.proto = 0;
1646    mask->ip.tos = 0;
1647
1648    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) {
1649        key->ip.proto =
1650            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]);
1651    }
1652    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) {
1653        mask->ip.proto =
1654            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]);
1655    }
1656    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) {
1657        key->ip.tos =
1658            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]);
1659    }
1660    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) {
1661        mask->ip.tos =
1662            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]);
1663    }
1664    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) {
1665        key->ip.tos |=
1666            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6;
1667    }
1668    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) {
1669        mask->ip.tos |=
1670            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
1671    }
1672
1673    return ROCKER_OK;
1674}
1675
1676static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1677{
1678    OfDpaFlowKey *key = &flow->key;
1679    OfDpaFlowKey *mask = &flow->mask;
1680    OfDpaFlowAction *action = &flow->action;
1681    enum {
1682        ACL_MODE_UNKNOWN,
1683        ACL_MODE_IPV4_VLAN,
1684        ACL_MODE_IPV6_VLAN,
1685        ACL_MODE_IPV4_TENANT,
1686        ACL_MODE_IPV6_TENANT,
1687        ACL_MODE_NON_IP_VLAN,
1688        ACL_MODE_NON_IP_TENANT,
1689        ACL_MODE_ANY_VLAN,
1690        ACL_MODE_ANY_TENANT,
1691    } mode = ACL_MODE_UNKNOWN;
1692    int err = ROCKER_OK;
1693
1694    if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1695        !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1696        return -ROCKER_EINVAL;
1697    }
1698
1699    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] &&
1700        flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1701        return -ROCKER_EINVAL;
1702    }
1703
1704    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1705    key->width = FLOW_KEY_WIDTH(eth.type);
1706
1707    key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1708    if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1709        mask->in_pport =
1710            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1711    }
1712
1713    if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1714        memcpy(key->eth.src.a,
1715               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1716               sizeof(key->eth.src.a));
1717    }
1718
1719    if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) {
1720        memcpy(mask->eth.src.a,
1721               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]),
1722               sizeof(mask->eth.src.a));
1723    }
1724
1725    if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1726        memcpy(key->eth.dst.a,
1727               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1728               sizeof(key->eth.dst.a));
1729    }
1730
1731    if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1732        memcpy(mask->eth.dst.a,
1733               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1734               sizeof(mask->eth.dst.a));
1735    }
1736
1737    key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1738    if (key->eth.type) {
1739        mask->eth.type = 0xffff;
1740    }
1741
1742    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1743        key->eth.vlan_id =
1744            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1745    }
1746
1747    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1748        mask->eth.vlan_id =
1749            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1750    }
1751
1752    switch (ntohs(key->eth.type)) {
1753    case 0x0000:
1754        mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT;
1755        break;
1756    case 0x0800:
1757        mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT;
1758        break;
1759    case 0x86dd:
1760        mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT;
1761        break;
1762    default:
1763        mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN :
1764                                    ACL_MODE_NON_IP_TENANT;
1765        break;
1766    }
1767
1768    /* XXX only supporting VLAN modes for now */
1769    if (mode != ACL_MODE_IPV4_VLAN &&
1770        mode != ACL_MODE_IPV6_VLAN &&
1771        mode != ACL_MODE_NON_IP_VLAN &&
1772        mode != ACL_MODE_ANY_VLAN) {
1773        return -ROCKER_EINVAL;
1774    }
1775
1776    switch (ntohs(key->eth.type)) {
1777    case 0x0800:
1778    case 0x86dd:
1779        err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
1780        break;
1781    }
1782
1783    if (err) {
1784        return err;
1785    }
1786
1787    if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1788        action->write.group_id =
1789            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1790    }
1791
1792    if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1793        action->apply.copy_to_cpu =
1794            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1795    }
1796
1797    return ROCKER_OK;
1798}
1799
1800static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow,
1801                                   RockerTlv **flow_tlvs)
1802{
1803    enum rocker_of_dpa_table_id tbl;
1804    int err = ROCKER_OK;
1805
1806    if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] ||
1807        !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] ||
1808        !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) {
1809        return -ROCKER_EINVAL;
1810    }
1811
1812    tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]);
1813    flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]);
1814    flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]);
1815
1816    if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) {
1817        if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT ||
1818            tbl == ROCKER_OF_DPA_TABLE_ID_VLAN ||
1819            tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1820            return -ROCKER_EINVAL;
1821        }
1822        flow->idletime =
1823            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]);
1824    }
1825
1826    switch (tbl) {
1827    case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1828        err = of_dpa_cmd_add_ig_port(flow, flow_tlvs);
1829        break;
1830    case ROCKER_OF_DPA_TABLE_ID_VLAN:
1831        err = of_dpa_cmd_add_vlan(flow, flow_tlvs);
1832        break;
1833    case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1834        err = of_dpa_cmd_add_term_mac(flow, flow_tlvs);
1835        break;
1836    case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1837        err = of_dpa_cmd_add_bridging(flow, flow_tlvs);
1838        break;
1839    case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1840        err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs);
1841        break;
1842    case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING:
1843        err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs);
1844        break;
1845    case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1846        err = of_dpa_cmd_add_acl(flow, flow_tlvs);
1847        break;
1848    }
1849
1850    return err;
1851}
1852
1853static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie,
1854                               RockerTlv **flow_tlvs)
1855{
1856    OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1857    int err = ROCKER_OK;
1858
1859    if (flow) {
1860        return -ROCKER_EEXIST;
1861    }
1862
1863    flow = of_dpa_flow_alloc(cookie);
1864
1865    err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1866    if (err) {
1867        g_free(flow);
1868        return err;
1869    }
1870
1871    return of_dpa_flow_add(of_dpa, flow);
1872}
1873
1874static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie,
1875                               RockerTlv **flow_tlvs)
1876{
1877    OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1878
1879    if (!flow) {
1880        return -ROCKER_ENOENT;
1881    }
1882
1883    return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1884}
1885
1886static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie)
1887{
1888    OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1889
1890    if (!flow) {
1891        return -ROCKER_ENOENT;
1892    }
1893
1894    of_dpa_flow_del(of_dpa, flow);
1895
1896    return ROCKER_OK;
1897}
1898
1899static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie,
1900                                     struct desc_info *info, char *buf)
1901{
1902    OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1903    size_t tlv_size;
1904    int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
1905    int pos;
1906
1907    if (!flow) {
1908        return -ROCKER_ENOENT;
1909    }
1910
1911    tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) +  /* duration */
1912               rocker_tlv_total_size(sizeof(uint64_t)) +  /* rx_pkts */
1913               rocker_tlv_total_size(sizeof(uint64_t));   /* tx_ptks */
1914
1915    if (tlv_size > desc_buf_size(info)) {
1916        return -ROCKER_EMSGSIZE;
1917    }
1918
1919    pos = 0;
1920    rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,
1921                        (int32_t)(now - flow->stats.install_time));
1922    rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,
1923                        flow->stats.rx_pkts);
1924    rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,
1925                        flow->stats.tx_pkts);
1926
1927    return desc_set_buf(info, tlv_size);
1928}
1929
1930static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info,
1931                           char *buf, uint16_t cmd,
1932                           RockerTlv **flow_tlvs)
1933{
1934    uint64_t cookie;
1935
1936    if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) {
1937        return -ROCKER_EINVAL;
1938    }
1939
1940    cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]);
1941
1942    switch (cmd) {
1943    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
1944        return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs);
1945    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
1946        return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs);
1947    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
1948        return of_dpa_cmd_flow_del(of_dpa, cookie);
1949    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
1950        return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf);
1951    }
1952
1953    return -ROCKER_ENOTSUP;
1954}
1955
1956static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group,
1957                                       RockerTlv **group_tlvs)
1958{
1959    if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] ||
1960        !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) {
1961        return -ROCKER_EINVAL;
1962    }
1963
1964    group->l2_interface.out_pport =
1965        rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]);
1966    group->l2_interface.pop_vlan =
1967        rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]);
1968
1969    return ROCKER_OK;
1970}
1971
1972static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group,
1973                                     RockerTlv **group_tlvs)
1974{
1975    OfDpaGroup *l2_interface_group;
1976
1977    if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
1978        return -ROCKER_EINVAL;
1979    }
1980
1981    group->l2_rewrite.group_id =
1982        rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
1983
1984    l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id);
1985    if (!l2_interface_group ||
1986        ROCKER_GROUP_TYPE_GET(l2_interface_group->id) !=
1987                              ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1988        DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1989        return -ROCKER_EINVAL;
1990    }
1991
1992    if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1993        memcpy(group->l2_rewrite.src_mac.a,
1994               rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1995               sizeof(group->l2_rewrite.src_mac.a));
1996    }
1997
1998    if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1999        memcpy(group->l2_rewrite.dst_mac.a,
2000               rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2001               sizeof(group->l2_rewrite.dst_mac.a));
2002    }
2003
2004    if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2005        group->l2_rewrite.vlan_id =
2006            rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2007        if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) !=
2008            (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) {
2009            DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2010            return -ROCKER_EINVAL;
2011        }
2012    }
2013
2014    return ROCKER_OK;
2015}
2016
2017static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group,
2018                                   RockerTlv **group_tlvs)
2019{
2020    OfDpaGroup *l2_group;
2021    RockerTlv **tlvs;
2022    int err;
2023    int i;
2024
2025    if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] ||
2026        !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) {
2027        return -ROCKER_EINVAL;
2028    }
2029
2030    group->l2_flood.group_count =
2031        rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
2032
2033    tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1);
2034
2035    g_free(group->l2_flood.group_ids);
2036    group->l2_flood.group_ids =
2037        g_new0(uint32_t, group->l2_flood.group_count);
2038
2039    rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
2040                            group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
2041
2042    for (i = 0; i < group->l2_flood.group_count; i++) {
2043        group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]);
2044    }
2045
2046    /* All of the L2 interface groups referenced by the L2 flood
2047     * must have same VLAN
2048     */
2049
2050    for (i = 0; i < group->l2_flood.group_count; i++) {
2051        l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]);
2052        if (!l2_group) {
2053            continue;
2054        }
2055        if ((ROCKER_GROUP_TYPE_GET(l2_group->id) ==
2056             ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) &&
2057            (ROCKER_GROUP_VLAN_GET(l2_group->id) !=
2058             ROCKER_GROUP_VLAN_GET(group->id))) {
2059            DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2060                    "flood group 0x%08x\n",
2061                    group->l2_flood.group_ids[i], group->id);
2062            err = -ROCKER_EINVAL;
2063            goto err_out;
2064        }
2065    }
2066
2067    g_free(tlvs);
2068    return ROCKER_OK;
2069
2070err_out:
2071    group->l2_flood.group_count = 0;
2072    g_free(group->l2_flood.group_ids);
2073    g_free(tlvs);
2074
2075    return err;
2076}
2077
2078static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs)
2079{
2080    if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
2081        return -ROCKER_EINVAL;
2082    }
2083
2084    group->l3_unicast.group_id =
2085        rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
2086
2087    if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2088        memcpy(group->l3_unicast.src_mac.a,
2089               rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2090               sizeof(group->l3_unicast.src_mac.a));
2091    }
2092
2093    if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2094        memcpy(group->l3_unicast.dst_mac.a,
2095               rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2096               sizeof(group->l3_unicast.dst_mac.a));
2097    }
2098
2099    if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2100        group->l3_unicast.vlan_id =
2101            rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2102    }
2103
2104    if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) {
2105        group->l3_unicast.ttl_check =
2106            rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]);
2107    }
2108
2109    return ROCKER_OK;
2110}
2111
2112static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id,
2113                               OfDpaGroup *group, RockerTlv **group_tlvs)
2114{
2115    uint8_t type = ROCKER_GROUP_TYPE_GET(group_id);
2116
2117    switch (type) {
2118    case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2119        return of_dpa_cmd_add_l2_interface(group, group_tlvs);
2120    case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2121        return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs);
2122    case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2123    /* Treat L2 multicast group same as a L2 flood group */
2124    case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2125        return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs);
2126    case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2127        return of_dpa_cmd_add_l3_unicast(group, group_tlvs);
2128    }
2129
2130    return -ROCKER_ENOTSUP;
2131}
2132
2133static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
2134                                RockerTlv **group_tlvs)
2135{
2136    OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2137    int err;
2138
2139    if (group) {
2140        return -ROCKER_EEXIST;
2141    }
2142
2143    group = of_dpa_group_alloc(group_id);
2144
2145    err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2146    if (err) {
2147        goto err_cmd_add;
2148    }
2149
2150    err = of_dpa_group_add(of_dpa, group);
2151    if (err) {
2152        goto err_cmd_add;
2153    }
2154
2155    return ROCKER_OK;
2156
2157err_cmd_add:
2158    g_free(group);
2159    return err;
2160}
2161
2162static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id,
2163                                RockerTlv **group_tlvs)
2164{
2165    OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2166
2167    if (!group) {
2168        return -ROCKER_ENOENT;
2169    }
2170
2171    return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2172}
2173
2174static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id)
2175{
2176    OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2177
2178    if (!group) {
2179        return -ROCKER_ENOENT;
2180    }
2181
2182    return of_dpa_group_del(of_dpa, group);
2183}
2184
2185static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id,
2186                                      struct desc_info *info, char *buf)
2187{
2188    return -ROCKER_ENOTSUP;
2189}
2190
2191static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info,
2192                            char *buf, uint16_t cmd, RockerTlv **group_tlvs)
2193{
2194    uint32_t group_id;
2195
2196    if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
2197        return -ROCKER_EINVAL;
2198    }
2199
2200    group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
2201
2202    switch (cmd) {
2203    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2204        return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs);
2205    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2206        return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs);
2207    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2208        return of_dpa_cmd_group_del(of_dpa, group_id);
2209    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2210        return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf);
2211    }
2212
2213    return -ROCKER_ENOTSUP;
2214}
2215
2216static int of_dpa_cmd(World *world, struct desc_info *info,
2217                      char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv)
2218{
2219    OfDpa *of_dpa = world_private(world);
2220    RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1];
2221
2222    rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv);
2223
2224    switch (cmd) {
2225    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
2226    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
2227    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
2228    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
2229        return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs);
2230    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2231    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2232    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2233    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2234        return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs);
2235    }
2236
2237    return -ROCKER_ENOTSUP;
2238}
2239
2240static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2)
2241{
2242    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
2243}
2244
2245static guint rocker_int64_hash(gconstpointer v)
2246{
2247    return (guint)*(const uint64_t *)v;
2248}
2249
2250static int of_dpa_init(World *world)
2251{
2252    OfDpa *of_dpa = world_private(world);
2253
2254    of_dpa->world = world;
2255
2256    of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash,
2257                                             rocker_int64_equal,
2258                                             NULL, g_free);
2259    if (!of_dpa->flow_tbl) {
2260        return -ENOMEM;
2261    }
2262
2263    of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal,
2264                                              NULL, g_free);
2265    if (!of_dpa->group_tbl) {
2266        goto err_group_tbl;
2267    }
2268
2269    /* XXX hardcode some artificial table max values */
2270    of_dpa->flow_tbl_max_size = 100;
2271    of_dpa->group_tbl_max_size = 100;
2272
2273    return 0;
2274
2275err_group_tbl:
2276    g_hash_table_destroy(of_dpa->flow_tbl);
2277    return -ENOMEM;
2278}
2279
2280static void of_dpa_uninit(World *world)
2281{
2282    OfDpa *of_dpa = world_private(world);
2283
2284    g_hash_table_destroy(of_dpa->group_tbl);
2285    g_hash_table_destroy(of_dpa->flow_tbl);
2286}
2287
2288struct of_dpa_flow_fill_context {
2289    RockerOfDpaFlowList *list;
2290    uint32_t tbl_id;
2291};
2292
2293static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
2294{
2295    struct of_dpa_flow *flow = value;
2296    struct of_dpa_flow_key *key = &flow->key;
2297    struct of_dpa_flow_key *mask = &flow->mask;
2298    struct of_dpa_flow_fill_context *flow_context = user_data;
2299    RockerOfDpaFlowList *new;
2300    RockerOfDpaFlow *nflow;
2301    RockerOfDpaFlowKey *nkey;
2302    RockerOfDpaFlowMask *nmask;
2303    RockerOfDpaFlowAction *naction;
2304
2305    if (flow_context->tbl_id != -1 &&
2306        flow_context->tbl_id != key->tbl_id) {
2307        return;
2308    }
2309
2310    new = g_malloc0(sizeof(*new));
2311    nflow = new->value = g_malloc0(sizeof(*nflow));
2312    nkey = nflow->key = g_malloc0(sizeof(*nkey));
2313    nmask = nflow->mask = g_malloc0(sizeof(*nmask));
2314    naction = nflow->action = g_malloc0(sizeof(*naction));
2315
2316    nflow->cookie = flow->cookie;
2317    nflow->hits = flow->stats.hits;
2318    nkey->priority = flow->priority;
2319    nkey->tbl_id = key->tbl_id;
2320
2321    if (key->in_pport || mask->in_pport) {
2322        nkey->has_in_pport = true;
2323        nkey->in_pport = key->in_pport;
2324    }
2325
2326    if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
2327        nmask->has_in_pport = true;
2328        nmask->in_pport = mask->in_pport;
2329    }
2330
2331    if (key->eth.vlan_id || mask->eth.vlan_id) {
2332        nkey->has_vlan_id = true;
2333        nkey->vlan_id = ntohs(key->eth.vlan_id);
2334    }
2335
2336    if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
2337        nmask->has_vlan_id = true;
2338        nmask->vlan_id = ntohs(mask->eth.vlan_id);
2339    }
2340
2341    if (key->tunnel_id || mask->tunnel_id) {
2342        nkey->has_tunnel_id = true;
2343        nkey->tunnel_id = key->tunnel_id;
2344    }
2345
2346    if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
2347        nmask->has_tunnel_id = true;
2348        nmask->tunnel_id = mask->tunnel_id;
2349    }
2350
2351    if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
2352        memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
2353        nkey->has_eth_src = true;
2354        nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
2355    }
2356
2357    if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
2358        nmask->has_eth_src = true;
2359        nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
2360    }
2361
2362    if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
2363        memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
2364        nkey->has_eth_dst = true;
2365        nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
2366    }
2367
2368    if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
2369        nmask->has_eth_dst = true;
2370        nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
2371    }
2372
2373    if (key->eth.type) {
2374
2375        nkey->has_eth_type = true;
2376        nkey->eth_type = ntohs(key->eth.type);
2377
2378        switch (ntohs(key->eth.type)) {
2379        case 0x0800:
2380        case 0x86dd:
2381            if (key->ip.proto || mask->ip.proto) {
2382                nkey->has_ip_proto = true;
2383                nkey->ip_proto = key->ip.proto;
2384            }
2385            if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
2386                nmask->has_ip_proto = true;
2387                nmask->ip_proto = mask->ip.proto;
2388            }
2389            if (key->ip.tos || mask->ip.tos) {
2390                nkey->has_ip_tos = true;
2391                nkey->ip_tos = key->ip.tos;
2392            }
2393            if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
2394                nmask->has_ip_tos = true;
2395                nmask->ip_tos = mask->ip.tos;
2396            }
2397            break;
2398        }
2399
2400        switch (ntohs(key->eth.type)) {
2401        case 0x0800:
2402            if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
2403                char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
2404                int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
2405                nkey->has_ip_dst = true;
2406                nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
2407            }
2408            break;
2409        }
2410    }
2411
2412    if (flow->action.goto_tbl) {
2413        naction->has_goto_tbl = true;
2414        naction->goto_tbl = flow->action.goto_tbl;
2415    }
2416
2417    if (flow->action.write.group_id) {
2418        naction->has_group_id = true;
2419        naction->group_id = flow->action.write.group_id;
2420    }
2421
2422    if (flow->action.apply.new_vlan_id) {
2423        naction->has_new_vlan_id = true;
2424        naction->new_vlan_id = flow->action.apply.new_vlan_id;
2425    }
2426
2427    new->next = flow_context->list;
2428    flow_context->list = new;
2429}
2430
2431RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
2432                                                   bool has_tbl_id,
2433                                                   uint32_t tbl_id,
2434                                                   Error **errp)
2435{
2436    struct rocker *r;
2437    struct world *w;
2438    struct of_dpa *of_dpa;
2439    struct of_dpa_flow_fill_context fill_context = {
2440        .list = NULL,
2441        .tbl_id = tbl_id,
2442    };
2443
2444    r = rocker_find(name);
2445    if (!r) {
2446        error_setg(errp, "rocker %s not found", name);
2447        return NULL;
2448    }
2449
2450    w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2451    if (!w) {
2452        error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2453        return NULL;
2454    }
2455
2456    of_dpa = world_private(w);
2457
2458    g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
2459
2460    return fill_context.list;
2461}
2462
2463struct of_dpa_group_fill_context {
2464    RockerOfDpaGroupList *list;
2465    uint8_t type;
2466};
2467
2468static void of_dpa_group_fill(void *key, void *value, void *user_data)
2469{
2470    struct of_dpa_group *group = value;
2471    struct of_dpa_group_fill_context *flow_context = user_data;
2472    RockerOfDpaGroupList *new;
2473    RockerOfDpaGroup *ngroup;
2474    struct uint32List *id;
2475    int i;
2476
2477    if (flow_context->type != 9 &&
2478        flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
2479        return;
2480    }
2481
2482    new = g_malloc0(sizeof(*new));
2483    ngroup = new->value = g_malloc0(sizeof(*ngroup));
2484
2485    ngroup->id = group->id;
2486
2487    ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
2488
2489    switch (ngroup->type) {
2490    case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2491        ngroup->has_vlan_id = true;
2492        ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2493        ngroup->has_pport = true;
2494        ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
2495        ngroup->has_out_pport = true;
2496        ngroup->out_pport = group->l2_interface.out_pport;
2497        ngroup->has_pop_vlan = true;
2498        ngroup->pop_vlan = group->l2_interface.pop_vlan;
2499        break;
2500    case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2501        ngroup->has_index = true;
2502        ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2503        ngroup->has_group_id = true;
2504        ngroup->group_id = group->l2_rewrite.group_id;
2505        if (group->l2_rewrite.vlan_id) {
2506            ngroup->has_set_vlan_id = true;
2507            ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
2508        }
2509        if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
2510            ngroup->has_set_eth_src = true;
2511            ngroup->set_eth_src =
2512                qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
2513        }
2514        if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2515            ngroup->has_set_eth_dst = true;
2516            ngroup->set_eth_dst =
2517                qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
2518        }
2519        break;
2520    case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2521    case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2522        ngroup->has_vlan_id = true;
2523        ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2524        ngroup->has_index = true;
2525        ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
2526        for (i = 0; i < group->l2_flood.group_count; i++) {
2527            ngroup->has_group_ids = true;
2528            id = g_malloc0(sizeof(*id));
2529            id->value = group->l2_flood.group_ids[i];
2530            id->next = ngroup->group_ids;
2531            ngroup->group_ids = id;
2532        }
2533        break;
2534    case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2535        ngroup->has_index = true;
2536        ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2537        ngroup->has_group_id = true;
2538        ngroup->group_id = group->l3_unicast.group_id;
2539        if (group->l3_unicast.vlan_id) {
2540            ngroup->has_set_vlan_id = true;
2541            ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
2542        }
2543        if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
2544            ngroup->has_set_eth_src = true;
2545            ngroup->set_eth_src =
2546                qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
2547        }
2548        if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2549            ngroup->has_set_eth_dst = true;
2550            ngroup->set_eth_dst =
2551                qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
2552        }
2553        if (group->l3_unicast.ttl_check) {
2554            ngroup->has_ttl_check = true;
2555            ngroup->ttl_check = group->l3_unicast.ttl_check;
2556        }
2557        break;
2558    }
2559
2560    new->next = flow_context->list;
2561    flow_context->list = new;
2562}
2563
2564RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
2565                                                     bool has_type,
2566                                                     uint8_t type,
2567                                                     Error **errp)
2568{
2569    struct rocker *r;
2570    struct world *w;
2571    struct of_dpa *of_dpa;
2572    struct of_dpa_group_fill_context fill_context = {
2573        .list = NULL,
2574        .type = type,
2575    };
2576
2577    r = rocker_find(name);
2578    if (!r) {
2579        error_setg(errp, "rocker %s not found", name);
2580        return NULL;
2581    }
2582
2583    w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2584    if (!w) {
2585        error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2586        return NULL;
2587    }
2588
2589    of_dpa = world_private(w);
2590
2591    g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
2592
2593    return fill_context.list;
2594}
2595
2596static WorldOps of_dpa_ops = {
2597    .name = "ofdpa",
2598    .init = of_dpa_init,
2599    .uninit = of_dpa_uninit,
2600    .ig = of_dpa_ig,
2601    .cmd = of_dpa_cmd,
2602};
2603
2604World *of_dpa_world_alloc(Rocker *r)
2605{
2606    return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops);
2607}
2608