qemu/hw/net/rocker/rocker_of_dpa.c
<<
>>
Prefs
   1/*
   2 * QEMU rocker switch emulation - OF-DPA flow processing support
   3 *
   4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 * GNU General Public License for more details.
  15 */
  16
  17#include "qemu/osdep.h"
  18#include "net/eth.h"
  19#include "qemu/iov.h"
  20#include "qemu/timer.h"
  21#include "qmp-commands.h"
  22
  23#include "rocker.h"
  24#include "rocker_hw.h"
  25#include "rocker_fp.h"
  26#include "rocker_tlv.h"
  27#include "rocker_world.h"
  28#include "rocker_desc.h"
  29#include "rocker_of_dpa.h"
  30
  31static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
  32static const MACAddr ff_mac =   { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
  33
  34typedef struct of_dpa {
  35    World *world;
  36    GHashTable *flow_tbl;
  37    GHashTable *group_tbl;
  38    unsigned int flow_tbl_max_size;
  39    unsigned int group_tbl_max_size;
  40} OfDpa;
  41
  42/* flow_key stolen mostly from OVS
  43 *
  44 * Note: fields that compare with network packet header fields
  45 * are stored in network order (BE) to avoid per-packet field
  46 * byte-swaps.
  47 */
  48
  49typedef struct of_dpa_flow_key {
  50    uint32_t in_pport;               /* ingress port */
  51    uint32_t tunnel_id;              /* overlay tunnel id */
  52    uint32_t tbl_id;                 /* table id */
  53    struct {
  54        __be16 vlan_id;              /* 0 if no VLAN */
  55        MACAddr src;                 /* ethernet source address */
  56        MACAddr dst;                 /* ethernet destination address */
  57        __be16 type;                 /* ethernet frame type */
  58    } eth;
  59    struct {
  60        uint8_t proto;               /* IP protocol or ARP opcode */
  61        uint8_t tos;                 /* IP ToS */
  62        uint8_t ttl;                 /* IP TTL/hop limit */
  63        uint8_t frag;                /* one of FRAG_TYPE_* */
  64    } ip;
  65    union {
  66        struct {
  67            struct {
  68                __be32 src;          /* IP source address */
  69                __be32 dst;          /* IP destination address */
  70            } addr;
  71            union {
  72                struct {
  73                    __be16 src;      /* TCP/UDP/SCTP source port */
  74                    __be16 dst;      /* TCP/UDP/SCTP destination port */
  75                    __be16 flags;    /* TCP flags */
  76                } tp;
  77                struct {
  78                    MACAddr sha;     /* ARP source hardware address */
  79                    MACAddr tha;     /* ARP target hardware address */
  80                } arp;
  81            };
  82        } ipv4;
  83        struct {
  84            struct {
  85                Ipv6Addr src;       /* IPv6 source address */
  86                Ipv6Addr dst;       /* IPv6 destination address */
  87            } addr;
  88            __be32 label;            /* IPv6 flow label */
  89            struct {
  90                __be16 src;          /* TCP/UDP/SCTP source port */
  91                __be16 dst;          /* TCP/UDP/SCTP destination port */
  92                __be16 flags;        /* TCP flags */
  93            } tp;
  94            struct {
  95                Ipv6Addr target;    /* ND target address */
  96                MACAddr sll;         /* ND source link layer address */
  97                MACAddr tll;         /* ND target link layer address */
  98            } nd;
  99        } ipv6;
 100    };
 101    int width;                       /* how many uint64_t's in key? */
 102} OfDpaFlowKey;
 103
 104/* Width of key which includes field 'f' in u64s, rounded up */
 105#define FLOW_KEY_WIDTH(f) \
 106    DIV_ROUND_UP(offsetof(OfDpaFlowKey, f) + sizeof(((OfDpaFlowKey *)0)->f), \
 107    sizeof(uint64_t))
 108
 109typedef struct of_dpa_flow_action {
 110    uint32_t goto_tbl;
 111    struct {
 112        uint32_t group_id;
 113        uint32_t tun_log_lport;
 114        __be16 vlan_id;
 115    } write;
 116    struct {
 117        __be16 new_vlan_id;
 118        uint32_t out_pport;
 119        uint8_t copy_to_cpu;
 120        __be16 vlan_id;
 121    } apply;
 122} OfDpaFlowAction;
 123
 124typedef struct of_dpa_flow {
 125    uint32_t lpm;
 126    uint32_t priority;
 127    uint32_t hardtime;
 128    uint32_t idletime;
 129    uint64_t cookie;
 130    OfDpaFlowKey key;
 131    OfDpaFlowKey mask;
 132    OfDpaFlowAction action;
 133    struct {
 134        uint64_t hits;
 135        int64_t install_time;
 136        int64_t refresh_time;
 137        uint64_t rx_pkts;
 138        uint64_t tx_pkts;
 139    } stats;
 140} OfDpaFlow;
 141
 142typedef struct of_dpa_flow_pkt_fields {
 143    uint32_t tunnel_id;
 144    struct eth_header *ethhdr;
 145    __be16 *h_proto;
 146    struct vlan_header *vlanhdr;
 147    struct ip_header *ipv4hdr;
 148    struct ip6_header *ipv6hdr;
 149    Ipv6Addr *ipv6_src_addr;
 150    Ipv6Addr *ipv6_dst_addr;
 151} OfDpaFlowPktFields;
 152
 153typedef struct of_dpa_flow_context {
 154    uint32_t in_pport;
 155    uint32_t tunnel_id;
 156    struct iovec *iov;
 157    int iovcnt;
 158    struct eth_header ethhdr_rewrite;
 159    struct vlan_header vlanhdr_rewrite;
 160    struct vlan_header vlanhdr;
 161    OfDpa *of_dpa;
 162    OfDpaFlowPktFields fields;
 163    OfDpaFlowAction action_set;
 164} OfDpaFlowContext;
 165
 166typedef struct of_dpa_flow_match {
 167    OfDpaFlowKey value;
 168    OfDpaFlow *best;
 169} OfDpaFlowMatch;
 170
 171typedef struct of_dpa_group {
 172    uint32_t id;
 173    union {
 174        struct {
 175            uint32_t out_pport;
 176            uint8_t pop_vlan;
 177        } l2_interface;
 178        struct {
 179            uint32_t group_id;
 180            MACAddr src_mac;
 181            MACAddr dst_mac;
 182            __be16 vlan_id;
 183        } l2_rewrite;
 184        struct {
 185            uint16_t group_count;
 186            uint32_t *group_ids;
 187        } l2_flood;
 188        struct {
 189            uint32_t group_id;
 190            MACAddr src_mac;
 191            MACAddr dst_mac;
 192            __be16 vlan_id;
 193            uint8_t ttl_check;
 194        } l3_unicast;
 195    };
 196} OfDpaGroup;
 197
 198static int of_dpa_mask2prefix(__be32 mask)
 199{
 200    int i;
 201    int count = 32;
 202
 203    for (i = 0; i < 32; i++) {
 204        if (!(ntohl(mask) & ((2 << i) - 1))) {
 205            count--;
 206        }
 207    }
 208
 209    return count;
 210}
 211
 212#if defined(DEBUG_ROCKER)
 213static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask)
 214{
 215    char buf[512], *b = buf, *mac;
 216
 217    b += sprintf(b, " tbl %2d", key->tbl_id);
 218
 219    if (key->in_pport || (mask && mask->in_pport)) {
 220        b += sprintf(b, " in_pport %2d", key->in_pport);
 221        if (mask && mask->in_pport != 0xffffffff) {
 222            b += sprintf(b, "/0x%08x", key->in_pport);
 223        }
 224    }
 225
 226    if (key->tunnel_id || (mask && mask->tunnel_id)) {
 227        b += sprintf(b, " tun %8d", key->tunnel_id);
 228        if (mask && mask->tunnel_id != 0xffffffff) {
 229            b += sprintf(b, "/0x%08x", key->tunnel_id);
 230        }
 231    }
 232
 233    if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) {
 234        b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id));
 235        if (mask && mask->eth.vlan_id != 0xffff) {
 236            b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id));
 237        }
 238    }
 239
 240    if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
 241        (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) {
 242        mac = qemu_mac_strdup_printf(key->eth.src.a);
 243        b += sprintf(b, " src %s", mac);
 244        g_free(mac);
 245        if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
 246            mac = qemu_mac_strdup_printf(mask->eth.src.a);
 247            b += sprintf(b, "/%s", mac);
 248            g_free(mac);
 249        }
 250    }
 251
 252    if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
 253        (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) {
 254        mac = qemu_mac_strdup_printf(key->eth.dst.a);
 255        b += sprintf(b, " dst %s", mac);
 256        g_free(mac);
 257        if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
 258            mac = qemu_mac_strdup_printf(mask->eth.dst.a);
 259            b += sprintf(b, "/%s", mac);
 260            g_free(mac);
 261        }
 262    }
 263
 264    if (key->eth.type || (mask && mask->eth.type)) {
 265        b += sprintf(b, " type 0x%04x", ntohs(key->eth.type));
 266        if (mask && mask->eth.type != 0xffff) {
 267            b += sprintf(b, "/0x%04x", ntohs(mask->eth.type));
 268        }
 269        switch (ntohs(key->eth.type)) {
 270        case 0x0800:
 271        case 0x86dd:
 272            if (key->ip.proto || (mask && mask->ip.proto)) {
 273                b += sprintf(b, " ip proto %2d", key->ip.proto);
 274                if (mask && mask->ip.proto != 0xff) {
 275                    b += sprintf(b, "/0x%02x", mask->ip.proto);
 276                }
 277            }
 278            if (key->ip.tos || (mask && mask->ip.tos)) {
 279                b += sprintf(b, " ip tos %2d", key->ip.tos);
 280                if (mask && mask->ip.tos != 0xff) {
 281                    b += sprintf(b, "/0x%02x", mask->ip.tos);
 282                }
 283            }
 284            break;
 285        }
 286        switch (ntohs(key->eth.type)) {
 287        case 0x0800:
 288            if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) {
 289                b += sprintf(b, " dst %s",
 290                    inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst));
 291                if (mask) {
 292                    b += sprintf(b, "/%d",
 293                                 of_dpa_mask2prefix(mask->ipv4.addr.dst));
 294                }
 295            }
 296            break;
 297        }
 298    }
 299
 300    DPRINTF("%s\n", buf);
 301}
 302#else
 303#define of_dpa_flow_key_dump(k, m)
 304#endif
 305
 306static void _of_dpa_flow_match(void *key, void *value, void *user_data)
 307{
 308    OfDpaFlow *flow = value;
 309    OfDpaFlowMatch *match = user_data;
 310    uint64_t *k = (uint64_t *)&flow->key;
 311    uint64_t *m = (uint64_t *)&flow->mask;
 312    uint64_t *v = (uint64_t *)&match->value;
 313    int i;
 314
 315    if (flow->key.tbl_id == match->value.tbl_id) {
 316        of_dpa_flow_key_dump(&flow->key, &flow->mask);
 317    }
 318
 319    if (flow->key.width > match->value.width) {
 320        return;
 321    }
 322
 323    for (i = 0; i < flow->key.width; i++, k++, m++, v++) {
 324        if ((~*k & *m & *v) | (*k & *m & ~*v)) {
 325            return;
 326        }
 327    }
 328
 329    DPRINTF("match\n");
 330
 331    if (!match->best ||
 332        flow->priority > match->best->priority ||
 333        flow->lpm > match->best->lpm) {
 334        match->best = flow;
 335    }
 336}
 337
 338static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match)
 339{
 340    DPRINTF("\nnew search\n");
 341    of_dpa_flow_key_dump(&match->value, NULL);
 342
 343    g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match);
 344
 345    return match->best;
 346}
 347
 348static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie)
 349{
 350    return g_hash_table_lookup(of_dpa->flow_tbl, &cookie);
 351}
 352
 353static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow)
 354{
 355    g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow);
 356
 357    return ROCKER_OK;
 358}
 359
 360static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow)
 361{
 362    g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie);
 363}
 364
 365static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie)
 366{
 367    OfDpaFlow *flow;
 368    int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
 369
 370    flow = g_new0(OfDpaFlow, 1);
 371    if (!flow) {
 372        return NULL;
 373    }
 374
 375    flow->cookie = cookie;
 376    flow->mask.tbl_id = 0xffffffff;
 377
 378    flow->stats.install_time = flow->stats.refresh_time = now;
 379
 380    return flow;
 381}
 382
 383static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc)
 384{
 385    OfDpaFlowPktFields *fields = &fc->fields;
 386
 387    fc->iov[0].iov_base = fields->ethhdr;
 388    fc->iov[0].iov_len = sizeof(struct eth_header);
 389    fc->iov[1].iov_base = fields->vlanhdr;
 390    fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0;
 391}
 392
 393static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
 394                                  const struct iovec *iov, int iovcnt)
 395{
 396    OfDpaFlowPktFields *fields = &fc->fields;
 397    size_t sofar = 0;
 398    int i;
 399
 400    sofar += sizeof(struct eth_header);
 401    if (iov->iov_len < sofar) {
 402        DPRINTF("flow_pkt_parse underrun on eth_header\n");
 403        return;
 404    }
 405
 406    fields->ethhdr = iov->iov_base;
 407    fields->h_proto = &fields->ethhdr->h_proto;
 408
 409    if (ntohs(*fields->h_proto) == ETH_P_VLAN) {
 410        sofar += sizeof(struct vlan_header);
 411        if (iov->iov_len < sofar) {
 412            DPRINTF("flow_pkt_parse underrun on vlan_header\n");
 413            return;
 414        }
 415        fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1);
 416        fields->h_proto = &fields->vlanhdr->h_proto;
 417    }
 418
 419    switch (ntohs(*fields->h_proto)) {
 420    case ETH_P_IP:
 421        sofar += sizeof(struct ip_header);
 422        if (iov->iov_len < sofar) {
 423            DPRINTF("flow_pkt_parse underrun on ip_header\n");
 424            return;
 425        }
 426        fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1);
 427        break;
 428    case ETH_P_IPV6:
 429        sofar += sizeof(struct ip6_header);
 430        if (iov->iov_len < sofar) {
 431            DPRINTF("flow_pkt_parse underrun on ip6_header\n");
 432            return;
 433        }
 434        fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1);
 435        break;
 436    }
 437
 438    /* To facilitate (potential) VLAN tag insertion, Make a
 439     * copy of the iov and insert two new vectors at the
 440     * beginning for eth hdr and vlan hdr.  No data is copied,
 441     * just the vectors.
 442     */
 443
 444    of_dpa_flow_pkt_hdr_reset(fc);
 445
 446    fc->iov[2].iov_base = fields->h_proto + 1;
 447    fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len;
 448
 449    for (i = 1; i < iovcnt; i++) {
 450        fc->iov[i+2] = iov[i];
 451    }
 452
 453    fc->iovcnt = iovcnt + 2;
 454}
 455
 456static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
 457{
 458    OfDpaFlowPktFields *fields = &fc->fields;
 459    uint16_t h_proto = fields->ethhdr->h_proto;
 460
 461    if (fields->vlanhdr) {
 462        DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
 463        return;
 464    }
 465
 466    fields->ethhdr->h_proto = htons(ETH_P_VLAN);
 467    fields->vlanhdr = &fc->vlanhdr;
 468    fields->vlanhdr->h_tci = vlan_id;
 469    fields->vlanhdr->h_proto = h_proto;
 470    fields->h_proto = &fields->vlanhdr->h_proto;
 471
 472    fc->iov[1].iov_base = fields->vlanhdr;
 473    fc->iov[1].iov_len = sizeof(struct vlan_header);
 474}
 475
 476static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
 477{
 478    OfDpaFlowPktFields *fields = &fc->fields;
 479
 480    if (!fields->vlanhdr) {
 481        return;
 482    }
 483
 484    fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto);
 485    fc->iov[1].iov_base = fields->h_proto;
 486    fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto);
 487}
 488
 489static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
 490                                        uint8_t *src_mac, uint8_t *dst_mac,
 491                                        __be16 vlan_id)
 492{
 493    OfDpaFlowPktFields *fields = &fc->fields;
 494
 495    if (src_mac || dst_mac) {
 496        memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header));
 497        if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) {
 498            memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN);
 499        }
 500        if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) {
 501            memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN);
 502        }
 503        fc->iov[0].iov_base = &fc->ethhdr_rewrite;
 504    }
 505
 506    if (vlan_id && fields->vlanhdr) {
 507        fc->vlanhdr_rewrite = fc->vlanhdr;
 508        fc->vlanhdr_rewrite.h_tci = vlan_id;
 509        fc->iov[1].iov_base = &fc->vlanhdr_rewrite;
 510    }
 511}
 512
 513static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id);
 514
 515static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc,
 516                                       OfDpaFlowMatch *match)
 517{
 518    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
 519    match->value.in_pport = fc->in_pport;
 520    match->value.width = FLOW_KEY_WIDTH(tbl_id);
 521}
 522
 523static void of_dpa_ig_port_miss(OfDpaFlowContext *fc)
 524{
 525    uint32_t port;
 526
 527    /* The default on miss is for packets from physical ports
 528     * to go to the VLAN Flow Table. There is no default rule
 529     * for packets from logical ports, which are dropped on miss.
 530     */
 531
 532    if (fp_port_from_pport(fc->in_pport, &port)) {
 533        of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN);
 534    }
 535}
 536
 537static void of_dpa_vlan_build_match(OfDpaFlowContext *fc,
 538                                    OfDpaFlowMatch *match)
 539{
 540    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
 541    match->value.in_pport = fc->in_pport;
 542    if (fc->fields.vlanhdr) {
 543        match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 544    }
 545    match->value.width = FLOW_KEY_WIDTH(eth.vlan_id);
 546}
 547
 548static void of_dpa_vlan_insert(OfDpaFlowContext *fc,
 549                               OfDpaFlow *flow)
 550{
 551    if (flow->action.apply.new_vlan_id) {
 552        of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id);
 553    }
 554}
 555
 556static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc,
 557                                        OfDpaFlowMatch *match)
 558{
 559    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
 560    match->value.in_pport = fc->in_pport;
 561    match->value.eth.type = *fc->fields.h_proto;
 562    match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 563    memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
 564           sizeof(match->value.eth.dst.a));
 565    match->value.width = FLOW_KEY_WIDTH(eth.type);
 566}
 567
 568static void of_dpa_term_mac_miss(OfDpaFlowContext *fc)
 569{
 570    of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING);
 571}
 572
 573static void of_dpa_apply_actions(OfDpaFlowContext *fc,
 574                                 OfDpaFlow *flow)
 575{
 576    fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu;
 577    fc->action_set.apply.vlan_id = flow->key.eth.vlan_id;
 578}
 579
 580static void of_dpa_bridging_build_match(OfDpaFlowContext *fc,
 581                                        OfDpaFlowMatch *match)
 582{
 583    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
 584    if (fc->fields.vlanhdr) {
 585        match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 586    } else if (fc->tunnel_id) {
 587        match->value.tunnel_id = fc->tunnel_id;
 588    }
 589    memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
 590           sizeof(match->value.eth.dst.a));
 591    match->value.width = FLOW_KEY_WIDTH(eth.dst);
 592}
 593
 594static void of_dpa_bridging_learn(OfDpaFlowContext *fc,
 595                                  OfDpaFlow *dst_flow)
 596{
 597    OfDpaFlowMatch match = { { 0, }, };
 598    OfDpaFlow *flow;
 599    uint8_t *addr;
 600    uint16_t vlan_id;
 601    int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
 602    int64_t refresh_delay = 1;
 603
 604    /* Do a lookup in bridge table by src_mac/vlan */
 605
 606    addr = fc->fields.ethhdr->h_source;
 607    vlan_id = fc->fields.vlanhdr->h_tci;
 608
 609    match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
 610    match.value.eth.vlan_id = vlan_id;
 611    memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a));
 612    match.value.width = FLOW_KEY_WIDTH(eth.dst);
 613
 614    flow = of_dpa_flow_match(fc->of_dpa, &match);
 615    if (flow) {
 616        if (!memcmp(flow->mask.eth.dst.a, ff_mac.a,
 617                    sizeof(flow->mask.eth.dst.a))) {
 618            /* src_mac/vlan already learned; if in_port and out_port
 619             * don't match, the end station has moved and the port
 620             * needs updating */
 621            /* XXX implement the in_port/out_port check */
 622            if (now - flow->stats.refresh_time < refresh_delay) {
 623                return;
 624            }
 625            flow->stats.refresh_time = now;
 626        }
 627    }
 628
 629    /* Let driver know about mac/vlan.  This may be a new mac/vlan
 630     * or a refresh of existing mac/vlan that's been hit after the
 631     * refresh_delay.
 632     */
 633
 634    rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world),
 635                               fc->in_pport, addr, vlan_id);
 636}
 637
 638static void of_dpa_bridging_miss(OfDpaFlowContext *fc)
 639{
 640    of_dpa_bridging_learn(fc, NULL);
 641    of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
 642}
 643
 644static void of_dpa_bridging_action_write(OfDpaFlowContext *fc,
 645                                         OfDpaFlow *flow)
 646{
 647    if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
 648        fc->action_set.write.group_id = flow->action.write.group_id;
 649    }
 650    fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport;
 651}
 652
 653static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc,
 654                                               OfDpaFlowMatch *match)
 655{
 656    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
 657    match->value.eth.type = *fc->fields.h_proto;
 658    if (fc->fields.ipv4hdr) {
 659        match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
 660    }
 661    if (fc->fields.ipv6_dst_addr) {
 662        memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
 663               sizeof(match->value.ipv6.addr.dst));
 664    }
 665    match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
 666}
 667
 668static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc)
 669{
 670    of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
 671}
 672
 673static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc,
 674                                                OfDpaFlow *flow)
 675{
 676    if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
 677        fc->action_set.write.group_id = flow->action.write.group_id;
 678    }
 679}
 680
 681static void
 682of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc,
 683                                     OfDpaFlowMatch *match)
 684{
 685    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
 686    match->value.eth.type = *fc->fields.h_proto;
 687    match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 688    if (fc->fields.ipv4hdr) {
 689        match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src;
 690        match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
 691    }
 692    if (fc->fields.ipv6_src_addr) {
 693        memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr,
 694               sizeof(match->value.ipv6.addr.src));
 695    }
 696    if (fc->fields.ipv6_dst_addr) {
 697        memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
 698               sizeof(match->value.ipv6.addr.dst));
 699    }
 700    match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
 701}
 702
 703static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc)
 704{
 705    of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
 706}
 707
 708static void
 709of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc,
 710                                      OfDpaFlow *flow)
 711{
 712    if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
 713        fc->action_set.write.group_id = flow->action.write.group_id;
 714    }
 715    fc->action_set.write.vlan_id = flow->action.write.vlan_id;
 716}
 717
 718static void of_dpa_acl_build_match(OfDpaFlowContext *fc,
 719                                   OfDpaFlowMatch *match)
 720{
 721    match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
 722    match->value.in_pport = fc->in_pport;
 723    memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source,
 724           sizeof(match->value.eth.src.a));
 725    memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
 726           sizeof(match->value.eth.dst.a));
 727    match->value.eth.type = *fc->fields.h_proto;
 728    match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
 729    match->value.width = FLOW_KEY_WIDTH(eth.type);
 730    if (fc->fields.ipv4hdr) {
 731        match->value.ip.proto = fc->fields.ipv4hdr->ip_p;
 732        match->value.ip.tos = fc->fields.ipv4hdr->ip_tos;
 733        match->value.width = FLOW_KEY_WIDTH(ip.tos);
 734    } else if (fc->fields.ipv6hdr) {
 735        match->value.ip.proto =
 736            fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt;
 737        match->value.ip.tos = 0; /* XXX what goes here? */
 738        match->value.width = FLOW_KEY_WIDTH(ip.tos);
 739    }
 740}
 741
 742static void of_dpa_eg(OfDpaFlowContext *fc);
 743static void of_dpa_acl_hit(OfDpaFlowContext *fc,
 744                           OfDpaFlow *dst_flow)
 745{
 746    of_dpa_eg(fc);
 747}
 748
 749static void of_dpa_acl_action_write(OfDpaFlowContext *fc,
 750                                    OfDpaFlow *flow)
 751{
 752    if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
 753        fc->action_set.write.group_id = flow->action.write.group_id;
 754    }
 755}
 756
 757static void of_dpa_drop(OfDpaFlowContext *fc)
 758{
 759    /* drop packet */
 760}
 761
 762static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa,
 763                                              uint32_t group_id)
 764{
 765    return g_hash_table_lookup(of_dpa->group_tbl, &group_id);
 766}
 767
 768static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group)
 769{
 770    g_hash_table_insert(of_dpa->group_tbl, &group->id, group);
 771
 772    return 0;
 773}
 774
 775#if 0
 776static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group)
 777{
 778    OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id);
 779
 780    if (!old_group) {
 781        return -ENOENT;
 782    }
 783
 784    /* XXX */
 785
 786    return 0;
 787}
 788#endif
 789
 790static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group)
 791{
 792    g_hash_table_remove(of_dpa->group_tbl, &group->id);
 793
 794    return 0;
 795}
 796
 797#if 0
 798static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id)
 799{
 800    OfDpaGroup *group = of_dpa_group_find(of_dpa, id);
 801
 802    if (!group) {
 803        return -ENOENT;
 804    }
 805
 806    /* XXX get/return stats */
 807
 808    return 0;
 809}
 810#endif
 811
 812static OfDpaGroup *of_dpa_group_alloc(uint32_t id)
 813{
 814    OfDpaGroup *group = g_new0(OfDpaGroup, 1);
 815
 816    if (!group) {
 817        return NULL;
 818    }
 819
 820    group->id = id;
 821
 822    return group;
 823}
 824
 825static void of_dpa_output_l2_interface(OfDpaFlowContext *fc,
 826                                       OfDpaGroup *group)
 827{
 828    uint8_t copy_to_cpu = fc->action_set.apply.copy_to_cpu;
 829
 830    if (group->l2_interface.pop_vlan) {
 831        of_dpa_flow_pkt_strip_vlan(fc);
 832    }
 833
 834    /* Note: By default, and as per the OpenFlow 1.3.1
 835     * specification, a packet cannot be forwarded back
 836     * to the IN_PORT from which it came in. An action
 837     * bucket that specifies the particular packet's
 838     * egress port is not evaluated.
 839     */
 840
 841    if (group->l2_interface.out_pport == 0) {
 842        rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt,
 843                   copy_to_cpu);
 844    } else if (group->l2_interface.out_pport != fc->in_pport) {
 845        rocker_port_eg(world_rocker(fc->of_dpa->world),
 846                       group->l2_interface.out_pport,
 847                       fc->iov, fc->iovcnt);
 848    }
 849}
 850
 851static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc,
 852                                     OfDpaGroup *group)
 853{
 854    OfDpaGroup *l2_group =
 855        of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id);
 856
 857    if (!l2_group) {
 858        return;
 859    }
 860
 861    of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a,
 862                         group->l2_rewrite.dst_mac.a,
 863                         group->l2_rewrite.vlan_id);
 864    of_dpa_output_l2_interface(fc, l2_group);
 865}
 866
 867static void of_dpa_output_l2_flood(OfDpaFlowContext *fc,
 868                                   OfDpaGroup *group)
 869{
 870    OfDpaGroup *l2_group;
 871    int i;
 872
 873    for (i = 0; i < group->l2_flood.group_count; i++) {
 874        of_dpa_flow_pkt_hdr_reset(fc);
 875        l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]);
 876        if (!l2_group) {
 877            continue;
 878        }
 879        switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) {
 880        case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
 881            of_dpa_output_l2_interface(fc, l2_group);
 882            break;
 883        case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
 884            of_dpa_output_l2_rewrite(fc, l2_group);
 885            break;
 886        }
 887    }
 888}
 889
 890static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group)
 891{
 892    OfDpaGroup *l2_group =
 893        of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id);
 894
 895    if (!l2_group) {
 896        return;
 897    }
 898
 899    of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a,
 900                                group->l3_unicast.dst_mac.a,
 901                                group->l3_unicast.vlan_id);
 902    /* XXX need ttl_check */
 903    of_dpa_output_l2_interface(fc, l2_group);
 904}
 905
 906static void of_dpa_eg(OfDpaFlowContext *fc)
 907{
 908    OfDpaFlowAction *set = &fc->action_set;
 909    OfDpaGroup *group;
 910    uint32_t group_id;
 911
 912    /* send a copy of pkt to CPU (controller)? */
 913
 914    if (set->apply.copy_to_cpu) {
 915        group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0);
 916        group = of_dpa_group_find(fc->of_dpa, group_id);
 917        if (group) {
 918            of_dpa_output_l2_interface(fc, group);
 919            of_dpa_flow_pkt_hdr_reset(fc);
 920        }
 921    }
 922
 923    /* process group write actions */
 924
 925    if (!set->write.group_id) {
 926        return;
 927    }
 928
 929    group = of_dpa_group_find(fc->of_dpa, set->write.group_id);
 930    if (!group) {
 931        return;
 932    }
 933
 934    switch (ROCKER_GROUP_TYPE_GET(group->id)) {
 935    case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
 936        of_dpa_output_l2_interface(fc, group);
 937        break;
 938    case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
 939        of_dpa_output_l2_rewrite(fc, group);
 940        break;
 941    case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
 942    case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
 943        of_dpa_output_l2_flood(fc, group);
 944        break;
 945    case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
 946        of_dpa_output_l3_unicast(fc, group);
 947        break;
 948    }
 949}
 950
 951typedef struct of_dpa_flow_tbl_ops {
 952    void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match);
 953    void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow);
 954    void (*miss)(OfDpaFlowContext *fc);
 955    void (*hit_no_goto)(OfDpaFlowContext *fc);
 956    void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow);
 957    void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow);
 958} OfDpaFlowTblOps;
 959
 960static OfDpaFlowTblOps of_dpa_tbl_ops[] = {
 961    [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = {
 962        .build_match = of_dpa_ig_port_build_match,
 963        .miss = of_dpa_ig_port_miss,
 964        .hit_no_goto = of_dpa_drop,
 965    },
 966    [ROCKER_OF_DPA_TABLE_ID_VLAN] = {
 967        .build_match = of_dpa_vlan_build_match,
 968        .hit_no_goto = of_dpa_drop,
 969        .action_apply = of_dpa_vlan_insert,
 970    },
 971    [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = {
 972        .build_match = of_dpa_term_mac_build_match,
 973        .miss = of_dpa_term_mac_miss,
 974        .hit_no_goto = of_dpa_drop,
 975        .action_apply = of_dpa_apply_actions,
 976    },
 977    [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = {
 978        .build_match = of_dpa_bridging_build_match,
 979        .hit = of_dpa_bridging_learn,
 980        .miss = of_dpa_bridging_miss,
 981        .hit_no_goto = of_dpa_drop,
 982        .action_apply = of_dpa_apply_actions,
 983        .action_write = of_dpa_bridging_action_write,
 984    },
 985    [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = {
 986        .build_match = of_dpa_unicast_routing_build_match,
 987        .miss = of_dpa_unicast_routing_miss,
 988        .hit_no_goto = of_dpa_drop,
 989        .action_write = of_dpa_unicast_routing_action_write,
 990    },
 991    [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = {
 992        .build_match = of_dpa_multicast_routing_build_match,
 993        .miss = of_dpa_multicast_routing_miss,
 994        .hit_no_goto = of_dpa_drop,
 995        .action_write = of_dpa_multicast_routing_action_write,
 996    },
 997    [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = {
 998        .build_match = of_dpa_acl_build_match,
 999        .hit = of_dpa_acl_hit,
1000        .miss = of_dpa_eg,
1001        .action_apply = of_dpa_apply_actions,
1002        .action_write = of_dpa_acl_action_write,
1003    },
1004};
1005
1006static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id)
1007{
1008    OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id];
1009    OfDpaFlowMatch match = { { 0, }, };
1010    OfDpaFlow *flow;
1011
1012    if (ops->build_match) {
1013        ops->build_match(fc, &match);
1014    } else {
1015        return;
1016    }
1017
1018    flow = of_dpa_flow_match(fc->of_dpa, &match);
1019    if (!flow) {
1020        if (ops->miss) {
1021            ops->miss(fc);
1022        }
1023        return;
1024    }
1025
1026    flow->stats.hits++;
1027
1028    if (ops->action_apply) {
1029        ops->action_apply(fc, flow);
1030    }
1031
1032    if (ops->action_write) {
1033        ops->action_write(fc, flow);
1034    }
1035
1036    if (ops->hit) {
1037        ops->hit(fc, flow);
1038    }
1039
1040    if (flow->action.goto_tbl) {
1041        of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl);
1042    } else if (ops->hit_no_goto) {
1043        ops->hit_no_goto(fc);
1044    }
1045
1046    /* drop packet */
1047}
1048
1049static ssize_t of_dpa_ig(World *world, uint32_t pport,
1050                         const struct iovec *iov, int iovcnt)
1051{
1052    struct iovec iov_copy[iovcnt + 2];
1053    OfDpaFlowContext fc = {
1054        .of_dpa = world_private(world),
1055        .in_pport = pport,
1056        .iov = iov_copy,
1057        .iovcnt = iovcnt + 2,
1058    };
1059
1060    of_dpa_flow_pkt_parse(&fc, iov, iovcnt);
1061    of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT);
1062
1063    return iov_size(iov, iovcnt);
1064}
1065
1066#define ROCKER_TUNNEL_LPORT 0x00010000
1067
1068static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1069{
1070    OfDpaFlowKey *key = &flow->key;
1071    OfDpaFlowKey *mask = &flow->mask;
1072    OfDpaFlowAction *action = &flow->action;
1073    bool overlay_tunnel;
1074
1075    if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1076        !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1077        return -ROCKER_EINVAL;
1078    }
1079
1080    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
1081    key->width = FLOW_KEY_WIDTH(tbl_id);
1082
1083    key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1084    if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1085        mask->in_pport =
1086            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1087    }
1088
1089    overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT);
1090
1091    action->goto_tbl =
1092        rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1093
1094    if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) {
1095        return -ROCKER_EINVAL;
1096    }
1097
1098    if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) {
1099        return -ROCKER_EINVAL;
1100    }
1101
1102    return ROCKER_OK;
1103}
1104
1105static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1106{
1107    OfDpaFlowKey *key = &flow->key;
1108    OfDpaFlowKey *mask = &flow->mask;
1109    OfDpaFlowAction *action = &flow->action;
1110    uint32_t port;
1111    bool untagged;
1112
1113    if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1114        !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1115        DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1116        return -ROCKER_EINVAL;
1117    }
1118
1119    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
1120    key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1121
1122    key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1123    if (!fp_port_from_pport(key->in_pport, &port)) {
1124        DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport);
1125        return -ROCKER_EINVAL;
1126    }
1127    mask->in_pport = 0xffffffff;
1128
1129    key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1130
1131    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1132        mask->eth.vlan_id =
1133            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1134    }
1135
1136    if (key->eth.vlan_id) {
1137        untagged = false; /* filtering */
1138    } else {
1139        untagged = true;
1140    }
1141
1142    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1143        action->goto_tbl =
1144            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1145        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1146            DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl);
1147            return -ROCKER_EINVAL;
1148        }
1149    }
1150
1151    if (untagged) {
1152        if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) {
1153            DPRINTF("Must specify new vlan_id if untagged\n");
1154            return -ROCKER_EINVAL;
1155        }
1156        action->apply.new_vlan_id =
1157            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]);
1158        if (1 > ntohs(action->apply.new_vlan_id) ||
1159            ntohs(action->apply.new_vlan_id) > 4095) {
1160            DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1161                    ntohs(action->apply.new_vlan_id));
1162            return -ROCKER_EINVAL;
1163        }
1164    }
1165
1166    return ROCKER_OK;
1167}
1168
1169static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1170{
1171    OfDpaFlowKey *key = &flow->key;
1172    OfDpaFlowKey *mask = &flow->mask;
1173    OfDpaFlowAction *action = &flow->action;
1174    const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1175    const MACAddr ipv4_mask =  { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1176    const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1177    const MACAddr ipv6_mask =  { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1178    uint32_t port;
1179    bool unicast = false;
1180    bool multicast = false;
1181
1182    if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1183        !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] ||
1184        !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1185        !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] ||
1186        !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] ||
1187        !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] ||
1188        !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1189        return -ROCKER_EINVAL;
1190    }
1191
1192    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1193    key->width = FLOW_KEY_WIDTH(eth.type);
1194
1195    key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1196    if (!fp_port_from_pport(key->in_pport, &port)) {
1197        return -ROCKER_EINVAL;
1198    }
1199    mask->in_pport =
1200        rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1201
1202    key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1203    if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) {
1204        return -ROCKER_EINVAL;
1205    }
1206    mask->eth.type = htons(0xffff);
1207
1208    memcpy(key->eth.dst.a,
1209           rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1210           sizeof(key->eth.dst.a));
1211    memcpy(mask->eth.dst.a,
1212           rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1213           sizeof(mask->eth.dst.a));
1214
1215    if ((key->eth.dst.a[0] & 0x01) == 0x00) {
1216        unicast = true;
1217    }
1218
1219    /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1220    if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1221        memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1222        multicast = true;
1223    }
1224    if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1225        memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1226        multicast = true;
1227    }
1228
1229    if (!unicast && !multicast) {
1230        return -ROCKER_EINVAL;
1231    }
1232
1233    key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1234    mask->eth.vlan_id =
1235        rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1236
1237    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1238        action->goto_tbl =
1239            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1240
1241        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING &&
1242            action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1243            return -ROCKER_EINVAL;
1244        }
1245
1246        if (unicast &&
1247            action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) {
1248            return -ROCKER_EINVAL;
1249        }
1250
1251        if (multicast &&
1252            action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1253            return -ROCKER_EINVAL;
1254        }
1255    }
1256
1257    if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1258        action->apply.copy_to_cpu =
1259            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1260    }
1261
1262    return ROCKER_OK;
1263}
1264
1265static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1266{
1267    OfDpaFlowKey *key = &flow->key;
1268    OfDpaFlowKey *mask = &flow->mask;
1269    OfDpaFlowAction *action = &flow->action;
1270    bool unicast = false;
1271    bool dst_mac = false;
1272    bool dst_mac_mask = false;
1273    enum {
1274        BRIDGING_MODE_UNKNOWN,
1275        BRIDGING_MODE_VLAN_UCAST,
1276        BRIDGING_MODE_VLAN_MCAST,
1277        BRIDGING_MODE_VLAN_DFLT,
1278        BRIDGING_MODE_TUNNEL_UCAST,
1279        BRIDGING_MODE_TUNNEL_MCAST,
1280        BRIDGING_MODE_TUNNEL_DFLT,
1281    } mode = BRIDGING_MODE_UNKNOWN;
1282
1283    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1284
1285    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1286        key->eth.vlan_id =
1287            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1288        mask->eth.vlan_id = 0xffff;
1289        key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1290    }
1291
1292    if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1293        key->tunnel_id =
1294            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]);
1295        mask->tunnel_id = 0xffffffff;
1296        key->width = FLOW_KEY_WIDTH(tunnel_id);
1297    }
1298
1299    /* can't do VLAN bridging and tunnel bridging at same time */
1300    if (key->eth.vlan_id && key->tunnel_id) {
1301        DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1302        return -ROCKER_EINVAL;
1303    }
1304
1305    if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1306        memcpy(key->eth.dst.a,
1307               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1308               sizeof(key->eth.dst.a));
1309        key->width = FLOW_KEY_WIDTH(eth.dst);
1310        dst_mac = true;
1311        unicast = (key->eth.dst.a[0] & 0x01) == 0x00;
1312    }
1313
1314    if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1315        memcpy(mask->eth.dst.a,
1316               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1317               sizeof(mask->eth.dst.a));
1318        key->width = FLOW_KEY_WIDTH(eth.dst);
1319        dst_mac_mask = true;
1320    } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1321        memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a));
1322    }
1323
1324    if (key->eth.vlan_id) {
1325        if (dst_mac && !dst_mac_mask) {
1326            mode = unicast ? BRIDGING_MODE_VLAN_UCAST :
1327                             BRIDGING_MODE_VLAN_MCAST;
1328        } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1329            mode = BRIDGING_MODE_VLAN_DFLT;
1330        }
1331    } else if (key->tunnel_id) {
1332        if (dst_mac && !dst_mac_mask) {
1333            mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST :
1334                             BRIDGING_MODE_TUNNEL_MCAST;
1335        } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1336            mode = BRIDGING_MODE_TUNNEL_DFLT;
1337        }
1338    }
1339
1340    if (mode == BRIDGING_MODE_UNKNOWN) {
1341        DPRINTF("Unknown bridging mode\n");
1342        return -ROCKER_EINVAL;
1343    }
1344
1345    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1346        action->goto_tbl =
1347            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1348        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1349            DPRINTF("Briding goto tbl must be ACL policy\n");
1350            return -ROCKER_EINVAL;
1351        }
1352    }
1353
1354    if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1355        action->write.group_id =
1356            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1357        switch (mode) {
1358        case BRIDGING_MODE_VLAN_UCAST:
1359            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1360                ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1361                DPRINTF("Bridging mode vlan ucast needs L2 "
1362                        "interface group (0x%08x)\n",
1363                        action->write.group_id);
1364                return -ROCKER_EINVAL;
1365            }
1366            break;
1367        case BRIDGING_MODE_VLAN_MCAST:
1368            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1369                ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) {
1370                DPRINTF("Bridging mode vlan mcast needs L2 "
1371                        "mcast group (0x%08x)\n",
1372                        action->write.group_id);
1373                return -ROCKER_EINVAL;
1374            }
1375            break;
1376        case BRIDGING_MODE_VLAN_DFLT:
1377            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1378                ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) {
1379                DPRINTF("Bridging mode vlan dflt needs L2 "
1380                        "flood group (0x%08x)\n",
1381                        action->write.group_id);
1382                return -ROCKER_EINVAL;
1383            }
1384            break;
1385        case BRIDGING_MODE_TUNNEL_MCAST:
1386            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1387                ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1388                DPRINTF("Bridging mode tunnel mcast needs L2 "
1389                        "overlay group (0x%08x)\n",
1390                        action->write.group_id);
1391                return -ROCKER_EINVAL;
1392            }
1393            break;
1394        case BRIDGING_MODE_TUNNEL_DFLT:
1395            if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1396                ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1397                DPRINTF("Bridging mode tunnel dflt needs L2 "
1398                        "overlay group (0x%08x)\n",
1399                        action->write.group_id);
1400                return -ROCKER_EINVAL;
1401            }
1402            break;
1403        default:
1404            return -ROCKER_EINVAL;
1405        }
1406    }
1407
1408    if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) {
1409        action->write.tun_log_lport =
1410            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]);
1411        if (mode != BRIDGING_MODE_TUNNEL_UCAST) {
1412            DPRINTF("Have tunnel logical port but not "
1413                    "in bridging tunnel mode\n");
1414            return -ROCKER_EINVAL;
1415        }
1416    }
1417
1418    if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1419        action->apply.copy_to_cpu =
1420            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1421    }
1422
1423    return ROCKER_OK;
1424}
1425
1426static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow,
1427                                          RockerTlv **flow_tlvs)
1428{
1429    OfDpaFlowKey *key = &flow->key;
1430    OfDpaFlowKey *mask = &flow->mask;
1431    OfDpaFlowAction *action = &flow->action;
1432    enum {
1433        UNICAST_ROUTING_MODE_UNKNOWN,
1434        UNICAST_ROUTING_MODE_IPV4,
1435        UNICAST_ROUTING_MODE_IPV6,
1436    } mode = UNICAST_ROUTING_MODE_UNKNOWN;
1437    uint8_t type;
1438
1439    if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1440        return -ROCKER_EINVAL;
1441    }
1442
1443    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1444    key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1445
1446    key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1447    switch (ntohs(key->eth.type)) {
1448    case 0x0800:
1449        mode = UNICAST_ROUTING_MODE_IPV4;
1450        break;
1451    case 0x86dd:
1452        mode = UNICAST_ROUTING_MODE_IPV6;
1453        break;
1454    default:
1455        return -ROCKER_EINVAL;
1456    }
1457    mask->eth.type = htons(0xffff);
1458
1459    switch (mode) {
1460    case UNICAST_ROUTING_MODE_IPV4:
1461        if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1462            return -ROCKER_EINVAL;
1463        }
1464        key->ipv4.addr.dst =
1465            rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1466        if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1467            return -ROCKER_EINVAL;
1468        }
1469        flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff));
1470        if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) {
1471            mask->ipv4.addr.dst =
1472                rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]);
1473            flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst);
1474        }
1475        break;
1476    case UNICAST_ROUTING_MODE_IPV6:
1477        if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1478            return -ROCKER_EINVAL;
1479        }
1480        memcpy(&key->ipv6.addr.dst,
1481               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1482               sizeof(key->ipv6.addr.dst));
1483        if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1484            return -ROCKER_EINVAL;
1485        }
1486        if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) {
1487            memcpy(&mask->ipv6.addr.dst,
1488                   rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]),
1489                   sizeof(mask->ipv6.addr.dst));
1490        }
1491        break;
1492    default:
1493        return -ROCKER_EINVAL;
1494    }
1495
1496    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1497        action->goto_tbl =
1498            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1499        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1500            return -ROCKER_EINVAL;
1501        }
1502    }
1503
1504    if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1505        action->write.group_id =
1506            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1507        type = ROCKER_GROUP_TYPE_GET(action->write.group_id);
1508        if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE &&
1509            type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST &&
1510            type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) {
1511            return -ROCKER_EINVAL;
1512        }
1513    }
1514
1515    return ROCKER_OK;
1516}
1517
1518static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
1519                                            RockerTlv **flow_tlvs)
1520{
1521    OfDpaFlowKey *key = &flow->key;
1522    OfDpaFlowKey *mask = &flow->mask;
1523    OfDpaFlowAction *action = &flow->action;
1524    enum {
1525        MULTICAST_ROUTING_MODE_UNKNOWN,
1526        MULTICAST_ROUTING_MODE_IPV4,
1527        MULTICAST_ROUTING_MODE_IPV6,
1528    } mode = MULTICAST_ROUTING_MODE_UNKNOWN;
1529
1530    if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1531        !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1532        return -ROCKER_EINVAL;
1533    }
1534
1535    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
1536    key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1537
1538    key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1539    switch (ntohs(key->eth.type)) {
1540    case 0x0800:
1541        mode = MULTICAST_ROUTING_MODE_IPV4;
1542        break;
1543    case 0x86dd:
1544        mode = MULTICAST_ROUTING_MODE_IPV6;
1545        break;
1546    default:
1547        return -ROCKER_EINVAL;
1548    }
1549
1550    key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1551
1552    switch (mode) {
1553    case MULTICAST_ROUTING_MODE_IPV4:
1554
1555        if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1556            key->ipv4.addr.src =
1557                rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]);
1558        }
1559
1560        if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) {
1561            mask->ipv4.addr.src =
1562                rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]);
1563        }
1564
1565        if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1566            if (mask->ipv4.addr.src != 0) {
1567                return -ROCKER_EINVAL;
1568            }
1569        }
1570
1571        if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1572            return -ROCKER_EINVAL;
1573        }
1574
1575        key->ipv4.addr.dst =
1576            rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1577        if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1578            return -ROCKER_EINVAL;
1579        }
1580
1581        break;
1582
1583    case MULTICAST_ROUTING_MODE_IPV6:
1584
1585        if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1586            memcpy(&key->ipv6.addr.src,
1587                   rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]),
1588                   sizeof(key->ipv6.addr.src));
1589        }
1590
1591        if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) {
1592            memcpy(&mask->ipv6.addr.src,
1593                   rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]),
1594                   sizeof(mask->ipv6.addr.src));
1595        }
1596
1597        if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1598            if (mask->ipv6.addr.src.addr32[0] != 0 &&
1599                mask->ipv6.addr.src.addr32[1] != 0 &&
1600                mask->ipv6.addr.src.addr32[2] != 0 &&
1601                mask->ipv6.addr.src.addr32[3] != 0) {
1602                return -ROCKER_EINVAL;
1603            }
1604        }
1605
1606        if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1607            return -ROCKER_EINVAL;
1608        }
1609
1610        memcpy(&key->ipv6.addr.dst,
1611               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1612               sizeof(key->ipv6.addr.dst));
1613        if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1614            return -ROCKER_EINVAL;
1615        }
1616
1617        break;
1618
1619    default:
1620        return -ROCKER_EINVAL;
1621    }
1622
1623    if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1624        action->goto_tbl =
1625            rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1626        if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1627            return -ROCKER_EINVAL;
1628        }
1629    }
1630
1631    if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1632        action->write.group_id =
1633            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1634        if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1635            ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) {
1636            return -ROCKER_EINVAL;
1637        }
1638        action->write.vlan_id = key->eth.vlan_id;
1639    }
1640
1641    return ROCKER_OK;
1642}
1643
1644static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
1645                                 RockerTlv **flow_tlvs)
1646{
1647    key->width = FLOW_KEY_WIDTH(ip.tos);
1648
1649    key->ip.proto = 0;
1650    key->ip.tos = 0;
1651    mask->ip.proto = 0;
1652    mask->ip.tos = 0;
1653
1654    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) {
1655        key->ip.proto =
1656            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]);
1657    }
1658    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) {
1659        mask->ip.proto =
1660            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]);
1661    }
1662    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) {
1663        key->ip.tos =
1664            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]);
1665    }
1666    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) {
1667        mask->ip.tos =
1668            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]);
1669    }
1670    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) {
1671        key->ip.tos |=
1672            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6;
1673    }
1674    if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) {
1675        mask->ip.tos |=
1676            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
1677    }
1678
1679    return ROCKER_OK;
1680}
1681
1682static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1683{
1684    OfDpaFlowKey *key = &flow->key;
1685    OfDpaFlowKey *mask = &flow->mask;
1686    OfDpaFlowAction *action = &flow->action;
1687    enum {
1688        ACL_MODE_UNKNOWN,
1689        ACL_MODE_IPV4_VLAN,
1690        ACL_MODE_IPV6_VLAN,
1691        ACL_MODE_IPV4_TENANT,
1692        ACL_MODE_IPV6_TENANT,
1693        ACL_MODE_NON_IP_VLAN,
1694        ACL_MODE_NON_IP_TENANT,
1695        ACL_MODE_ANY_VLAN,
1696        ACL_MODE_ANY_TENANT,
1697    } mode = ACL_MODE_UNKNOWN;
1698    int err = ROCKER_OK;
1699
1700    if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1701        !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1702        return -ROCKER_EINVAL;
1703    }
1704
1705    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] &&
1706        flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1707        return -ROCKER_EINVAL;
1708    }
1709
1710    key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1711    key->width = FLOW_KEY_WIDTH(eth.type);
1712
1713    key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1714    if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1715        mask->in_pport =
1716            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1717    }
1718
1719    if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1720        memcpy(key->eth.src.a,
1721               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1722               sizeof(key->eth.src.a));
1723    }
1724
1725    if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) {
1726        memcpy(mask->eth.src.a,
1727               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]),
1728               sizeof(mask->eth.src.a));
1729    }
1730
1731    if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1732        memcpy(key->eth.dst.a,
1733               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1734               sizeof(key->eth.dst.a));
1735    }
1736
1737    if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1738        memcpy(mask->eth.dst.a,
1739               rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1740               sizeof(mask->eth.dst.a));
1741    }
1742
1743    key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1744    if (key->eth.type) {
1745        mask->eth.type = 0xffff;
1746    }
1747
1748    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1749        key->eth.vlan_id =
1750            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1751    }
1752
1753    if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1754        mask->eth.vlan_id =
1755            rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1756    }
1757
1758    switch (ntohs(key->eth.type)) {
1759    case 0x0000:
1760        mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT;
1761        break;
1762    case 0x0800:
1763        mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT;
1764        break;
1765    case 0x86dd:
1766        mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT;
1767        break;
1768    default:
1769        mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN :
1770                                    ACL_MODE_NON_IP_TENANT;
1771        break;
1772    }
1773
1774    /* XXX only supporting VLAN modes for now */
1775    if (mode != ACL_MODE_IPV4_VLAN &&
1776        mode != ACL_MODE_IPV6_VLAN &&
1777        mode != ACL_MODE_NON_IP_VLAN &&
1778        mode != ACL_MODE_ANY_VLAN) {
1779        return -ROCKER_EINVAL;
1780    }
1781
1782    switch (ntohs(key->eth.type)) {
1783    case 0x0800:
1784    case 0x86dd:
1785        err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
1786        break;
1787    }
1788
1789    if (err) {
1790        return err;
1791    }
1792
1793    if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1794        action->write.group_id =
1795            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1796    }
1797
1798    if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1799        action->apply.copy_to_cpu =
1800            rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1801    }
1802
1803    return ROCKER_OK;
1804}
1805
1806static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow,
1807                                   RockerTlv **flow_tlvs)
1808{
1809    enum rocker_of_dpa_table_id tbl;
1810    int err = ROCKER_OK;
1811
1812    if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] ||
1813        !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] ||
1814        !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) {
1815        return -ROCKER_EINVAL;
1816    }
1817
1818    tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]);
1819    flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]);
1820    flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]);
1821
1822    if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) {
1823        if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT ||
1824            tbl == ROCKER_OF_DPA_TABLE_ID_VLAN ||
1825            tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1826            return -ROCKER_EINVAL;
1827        }
1828        flow->idletime =
1829            rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]);
1830    }
1831
1832    switch (tbl) {
1833    case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1834        err = of_dpa_cmd_add_ig_port(flow, flow_tlvs);
1835        break;
1836    case ROCKER_OF_DPA_TABLE_ID_VLAN:
1837        err = of_dpa_cmd_add_vlan(flow, flow_tlvs);
1838        break;
1839    case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1840        err = of_dpa_cmd_add_term_mac(flow, flow_tlvs);
1841        break;
1842    case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1843        err = of_dpa_cmd_add_bridging(flow, flow_tlvs);
1844        break;
1845    case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1846        err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs);
1847        break;
1848    case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING:
1849        err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs);
1850        break;
1851    case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1852        err = of_dpa_cmd_add_acl(flow, flow_tlvs);
1853        break;
1854    }
1855
1856    return err;
1857}
1858
1859static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie,
1860                               RockerTlv **flow_tlvs)
1861{
1862    OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1863    int err = ROCKER_OK;
1864
1865    if (flow) {
1866        return -ROCKER_EEXIST;
1867    }
1868
1869    flow = of_dpa_flow_alloc(cookie);
1870    if (!flow) {
1871        return -ROCKER_ENOMEM;
1872    }
1873
1874    err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1875    if (err) {
1876        g_free(flow);
1877        return err;
1878    }
1879
1880    return of_dpa_flow_add(of_dpa, flow);
1881}
1882
1883static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie,
1884                               RockerTlv **flow_tlvs)
1885{
1886    OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1887
1888    if (!flow) {
1889        return -ROCKER_ENOENT;
1890    }
1891
1892    return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1893}
1894
1895static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie)
1896{
1897    OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1898
1899    if (!flow) {
1900        return -ROCKER_ENOENT;
1901    }
1902
1903    of_dpa_flow_del(of_dpa, flow);
1904
1905    return ROCKER_OK;
1906}
1907
1908static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie,
1909                                     struct desc_info *info, char *buf)
1910{
1911    OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1912    size_t tlv_size;
1913    int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
1914    int pos;
1915
1916    if (!flow) {
1917        return -ROCKER_ENOENT;
1918    }
1919
1920    tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) +  /* duration */
1921               rocker_tlv_total_size(sizeof(uint64_t)) +  /* rx_pkts */
1922               rocker_tlv_total_size(sizeof(uint64_t));   /* tx_ptks */
1923
1924    if (tlv_size > desc_buf_size(info)) {
1925        return -ROCKER_EMSGSIZE;
1926    }
1927
1928    pos = 0;
1929    rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,
1930                        (int32_t)(now - flow->stats.install_time));
1931    rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,
1932                        flow->stats.rx_pkts);
1933    rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,
1934                        flow->stats.tx_pkts);
1935
1936    return desc_set_buf(info, tlv_size);
1937}
1938
1939static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info,
1940                           char *buf, uint16_t cmd,
1941                           RockerTlv **flow_tlvs)
1942{
1943    uint64_t cookie;
1944
1945    if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) {
1946        return -ROCKER_EINVAL;
1947    }
1948
1949    cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]);
1950
1951    switch (cmd) {
1952    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
1953        return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs);
1954    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
1955        return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs);
1956    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
1957        return of_dpa_cmd_flow_del(of_dpa, cookie);
1958    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
1959        return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf);
1960    }
1961
1962    return -ROCKER_ENOTSUP;
1963}
1964
1965static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group,
1966                                       RockerTlv **group_tlvs)
1967{
1968    if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] ||
1969        !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) {
1970        return -ROCKER_EINVAL;
1971    }
1972
1973    group->l2_interface.out_pport =
1974        rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]);
1975    group->l2_interface.pop_vlan =
1976        rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]);
1977
1978    return ROCKER_OK;
1979}
1980
1981static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group,
1982                                     RockerTlv **group_tlvs)
1983{
1984    OfDpaGroup *l2_interface_group;
1985
1986    if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
1987        return -ROCKER_EINVAL;
1988    }
1989
1990    group->l2_rewrite.group_id =
1991        rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
1992
1993    l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id);
1994    if (!l2_interface_group ||
1995        ROCKER_GROUP_TYPE_GET(l2_interface_group->id) !=
1996                              ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1997        DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1998        return -ROCKER_EINVAL;
1999    }
2000
2001    if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2002        memcpy(group->l2_rewrite.src_mac.a,
2003               rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2004               sizeof(group->l2_rewrite.src_mac.a));
2005    }
2006
2007    if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2008        memcpy(group->l2_rewrite.dst_mac.a,
2009               rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2010               sizeof(group->l2_rewrite.dst_mac.a));
2011    }
2012
2013    if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2014        group->l2_rewrite.vlan_id =
2015            rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2016        if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) !=
2017            (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) {
2018            DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2019            return -ROCKER_EINVAL;
2020        }
2021    }
2022
2023    return ROCKER_OK;
2024}
2025
2026static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group,
2027                                   RockerTlv **group_tlvs)
2028{
2029    OfDpaGroup *l2_group;
2030    RockerTlv **tlvs;
2031    int err;
2032    int i;
2033
2034    if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] ||
2035        !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) {
2036        return -ROCKER_EINVAL;
2037    }
2038
2039    group->l2_flood.group_count =
2040        rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
2041
2042    tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1);
2043    if (!tlvs) {
2044        return -ROCKER_ENOMEM;
2045    }
2046
2047    g_free(group->l2_flood.group_ids);
2048    group->l2_flood.group_ids =
2049        g_new0(uint32_t, group->l2_flood.group_count);
2050    if (!group->l2_flood.group_ids) {
2051        err = -ROCKER_ENOMEM;
2052        goto err_out;
2053    }
2054
2055    rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
2056                            group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
2057
2058    for (i = 0; i < group->l2_flood.group_count; i++) {
2059        group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]);
2060    }
2061
2062    /* All of the L2 interface groups referenced by the L2 flood
2063     * must have same VLAN
2064     */
2065
2066    for (i = 0; i < group->l2_flood.group_count; i++) {
2067        l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]);
2068        if (!l2_group) {
2069            continue;
2070        }
2071        if ((ROCKER_GROUP_TYPE_GET(l2_group->id) ==
2072             ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) &&
2073            (ROCKER_GROUP_VLAN_GET(l2_group->id) !=
2074             ROCKER_GROUP_VLAN_GET(group->id))) {
2075            DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2076                    "flood group 0x%08x\n",
2077                    group->l2_flood.group_ids[i], group->id);
2078            err = -ROCKER_EINVAL;
2079            goto err_out;
2080        }
2081    }
2082
2083    g_free(tlvs);
2084    return ROCKER_OK;
2085
2086err_out:
2087    group->l2_flood.group_count = 0;
2088    g_free(group->l2_flood.group_ids);
2089    g_free(tlvs);
2090
2091    return err;
2092}
2093
2094static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs)
2095{
2096    if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
2097        return -ROCKER_EINVAL;
2098    }
2099
2100    group->l3_unicast.group_id =
2101        rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
2102
2103    if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2104        memcpy(group->l3_unicast.src_mac.a,
2105               rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2106               sizeof(group->l3_unicast.src_mac.a));
2107    }
2108
2109    if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2110        memcpy(group->l3_unicast.dst_mac.a,
2111               rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2112               sizeof(group->l3_unicast.dst_mac.a));
2113    }
2114
2115    if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2116        group->l3_unicast.vlan_id =
2117            rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2118    }
2119
2120    if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) {
2121        group->l3_unicast.ttl_check =
2122            rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]);
2123    }
2124
2125    return ROCKER_OK;
2126}
2127
2128static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id,
2129                               OfDpaGroup *group, RockerTlv **group_tlvs)
2130{
2131    uint8_t type = ROCKER_GROUP_TYPE_GET(group_id);
2132
2133    switch (type) {
2134    case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2135        return of_dpa_cmd_add_l2_interface(group, group_tlvs);
2136    case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2137        return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs);
2138    case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2139    /* Treat L2 multicast group same as a L2 flood group */
2140    case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2141        return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs);
2142    case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2143        return of_dpa_cmd_add_l3_unicast(group, group_tlvs);
2144    }
2145
2146    return -ROCKER_ENOTSUP;
2147}
2148
2149static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
2150                                RockerTlv **group_tlvs)
2151{
2152    OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2153    int err;
2154
2155    if (group) {
2156        return -ROCKER_EEXIST;
2157    }
2158
2159    group = of_dpa_group_alloc(group_id);
2160    if (!group) {
2161        return -ROCKER_ENOMEM;
2162    }
2163
2164    err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2165    if (err) {
2166        goto err_cmd_add;
2167    }
2168
2169    err = of_dpa_group_add(of_dpa, group);
2170    if (err) {
2171        goto err_cmd_add;
2172    }
2173
2174    return ROCKER_OK;
2175
2176err_cmd_add:
2177    g_free(group);
2178    return err;
2179}
2180
2181static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id,
2182                                RockerTlv **group_tlvs)
2183{
2184    OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2185
2186    if (!group) {
2187        return -ROCKER_ENOENT;
2188    }
2189
2190    return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2191}
2192
2193static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id)
2194{
2195    OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2196
2197    if (!group) {
2198        return -ROCKER_ENOENT;
2199    }
2200
2201    return of_dpa_group_del(of_dpa, group);
2202}
2203
2204static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id,
2205                                      struct desc_info *info, char *buf)
2206{
2207    return -ROCKER_ENOTSUP;
2208}
2209
2210static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info,
2211                            char *buf, uint16_t cmd, RockerTlv **group_tlvs)
2212{
2213    uint32_t group_id;
2214
2215    if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
2216        return -ROCKER_EINVAL;
2217    }
2218
2219    group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
2220
2221    switch (cmd) {
2222    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2223        return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs);
2224    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2225        return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs);
2226    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2227        return of_dpa_cmd_group_del(of_dpa, group_id);
2228    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2229        return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf);
2230    }
2231
2232    return -ROCKER_ENOTSUP;
2233}
2234
2235static int of_dpa_cmd(World *world, struct desc_info *info,
2236                      char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv)
2237{
2238    OfDpa *of_dpa = world_private(world);
2239    RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1];
2240
2241    rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv);
2242
2243    switch (cmd) {
2244    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
2245    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
2246    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
2247    case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
2248        return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs);
2249    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2250    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2251    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2252    case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2253        return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs);
2254    }
2255
2256    return -ROCKER_ENOTSUP;
2257}
2258
2259static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2)
2260{
2261    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
2262}
2263
2264static guint rocker_int64_hash(gconstpointer v)
2265{
2266    return (guint)*(const uint64_t *)v;
2267}
2268
2269static int of_dpa_init(World *world)
2270{
2271    OfDpa *of_dpa = world_private(world);
2272
2273    of_dpa->world = world;
2274
2275    of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash,
2276                                             rocker_int64_equal,
2277                                             NULL, g_free);
2278    if (!of_dpa->flow_tbl) {
2279        return -ENOMEM;
2280    }
2281
2282    of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal,
2283                                              NULL, g_free);
2284    if (!of_dpa->group_tbl) {
2285        goto err_group_tbl;
2286    }
2287
2288    /* XXX hardcode some artificial table max values */
2289    of_dpa->flow_tbl_max_size = 100;
2290    of_dpa->group_tbl_max_size = 100;
2291
2292    return 0;
2293
2294err_group_tbl:
2295    g_hash_table_destroy(of_dpa->flow_tbl);
2296    return -ENOMEM;
2297}
2298
2299static void of_dpa_uninit(World *world)
2300{
2301    OfDpa *of_dpa = world_private(world);
2302
2303    g_hash_table_destroy(of_dpa->group_tbl);
2304    g_hash_table_destroy(of_dpa->flow_tbl);
2305}
2306
2307struct of_dpa_flow_fill_context {
2308    RockerOfDpaFlowList *list;
2309    uint32_t tbl_id;
2310};
2311
2312static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
2313{
2314    struct of_dpa_flow *flow = value;
2315    struct of_dpa_flow_key *key = &flow->key;
2316    struct of_dpa_flow_key *mask = &flow->mask;
2317    struct of_dpa_flow_fill_context *flow_context = user_data;
2318    RockerOfDpaFlowList *new;
2319    RockerOfDpaFlow *nflow;
2320    RockerOfDpaFlowKey *nkey;
2321    RockerOfDpaFlowMask *nmask;
2322    RockerOfDpaFlowAction *naction;
2323
2324    if (flow_context->tbl_id != -1 &&
2325        flow_context->tbl_id != key->tbl_id) {
2326        return;
2327    }
2328
2329    new = g_malloc0(sizeof(*new));
2330    nflow = new->value = g_malloc0(sizeof(*nflow));
2331    nkey = nflow->key = g_malloc0(sizeof(*nkey));
2332    nmask = nflow->mask = g_malloc0(sizeof(*nmask));
2333    naction = nflow->action = g_malloc0(sizeof(*naction));
2334
2335    nflow->cookie = flow->cookie;
2336    nflow->hits = flow->stats.hits;
2337    nkey->priority = flow->priority;
2338    nkey->tbl_id = key->tbl_id;
2339
2340    if (key->in_pport || mask->in_pport) {
2341        nkey->has_in_pport = true;
2342        nkey->in_pport = key->in_pport;
2343    }
2344
2345    if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
2346        nmask->has_in_pport = true;
2347        nmask->in_pport = mask->in_pport;
2348    }
2349
2350    if (key->eth.vlan_id || mask->eth.vlan_id) {
2351        nkey->has_vlan_id = true;
2352        nkey->vlan_id = ntohs(key->eth.vlan_id);
2353    }
2354
2355    if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
2356        nmask->has_vlan_id = true;
2357        nmask->vlan_id = ntohs(mask->eth.vlan_id);
2358    }
2359
2360    if (key->tunnel_id || mask->tunnel_id) {
2361        nkey->has_tunnel_id = true;
2362        nkey->tunnel_id = key->tunnel_id;
2363    }
2364
2365    if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
2366        nmask->has_tunnel_id = true;
2367        nmask->tunnel_id = mask->tunnel_id;
2368    }
2369
2370    if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
2371        memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
2372        nkey->has_eth_src = true;
2373        nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
2374    }
2375
2376    if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
2377        nmask->has_eth_src = true;
2378        nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
2379    }
2380
2381    if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
2382        memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
2383        nkey->has_eth_dst = true;
2384        nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
2385    }
2386
2387    if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
2388        nmask->has_eth_dst = true;
2389        nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
2390    }
2391
2392    if (key->eth.type) {
2393
2394        nkey->has_eth_type = true;
2395        nkey->eth_type = ntohs(key->eth.type);
2396
2397        switch (ntohs(key->eth.type)) {
2398        case 0x0800:
2399        case 0x86dd:
2400            if (key->ip.proto || mask->ip.proto) {
2401                nkey->has_ip_proto = true;
2402                nkey->ip_proto = key->ip.proto;
2403            }
2404            if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
2405                nmask->has_ip_proto = true;
2406                nmask->ip_proto = mask->ip.proto;
2407            }
2408            if (key->ip.tos || mask->ip.tos) {
2409                nkey->has_ip_tos = true;
2410                nkey->ip_tos = key->ip.tos;
2411            }
2412            if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
2413                nmask->has_ip_tos = true;
2414                nmask->ip_tos = mask->ip.tos;
2415            }
2416            break;
2417        }
2418
2419        switch (ntohs(key->eth.type)) {
2420        case 0x0800:
2421            if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
2422                char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
2423                int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
2424                nkey->has_ip_dst = true;
2425                nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
2426            }
2427            break;
2428        }
2429    }
2430
2431    if (flow->action.goto_tbl) {
2432        naction->has_goto_tbl = true;
2433        naction->goto_tbl = flow->action.goto_tbl;
2434    }
2435
2436    if (flow->action.write.group_id) {
2437        naction->has_group_id = true;
2438        naction->group_id = flow->action.write.group_id;
2439    }
2440
2441    if (flow->action.apply.new_vlan_id) {
2442        naction->has_new_vlan_id = true;
2443        naction->new_vlan_id = flow->action.apply.new_vlan_id;
2444    }
2445
2446    new->next = flow_context->list;
2447    flow_context->list = new;
2448}
2449
2450RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
2451                                                   bool has_tbl_id,
2452                                                   uint32_t tbl_id,
2453                                                   Error **errp)
2454{
2455    struct rocker *r;
2456    struct world *w;
2457    struct of_dpa *of_dpa;
2458    struct of_dpa_flow_fill_context fill_context = {
2459        .list = NULL,
2460        .tbl_id = tbl_id,
2461    };
2462
2463    r = rocker_find(name);
2464    if (!r) {
2465        error_setg(errp, "rocker %s not found", name);
2466        return NULL;
2467    }
2468
2469    w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2470    if (!w) {
2471        error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2472        return NULL;
2473    }
2474
2475    of_dpa = world_private(w);
2476
2477    g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
2478
2479    return fill_context.list;
2480}
2481
2482struct of_dpa_group_fill_context {
2483    RockerOfDpaGroupList *list;
2484    uint8_t type;
2485};
2486
2487static void of_dpa_group_fill(void *key, void *value, void *user_data)
2488{
2489    struct of_dpa_group *group = value;
2490    struct of_dpa_group_fill_context *flow_context = user_data;
2491    RockerOfDpaGroupList *new;
2492    RockerOfDpaGroup *ngroup;
2493    struct uint32List *id;
2494    int i;
2495
2496    if (flow_context->type != 9 &&
2497        flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
2498        return;
2499    }
2500
2501    new = g_malloc0(sizeof(*new));
2502    ngroup = new->value = g_malloc0(sizeof(*ngroup));
2503
2504    ngroup->id = group->id;
2505
2506    ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
2507
2508    switch (ngroup->type) {
2509    case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2510        ngroup->has_vlan_id = true;
2511        ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2512        ngroup->has_pport = true;
2513        ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
2514        ngroup->has_out_pport = true;
2515        ngroup->out_pport = group->l2_interface.out_pport;
2516        ngroup->has_pop_vlan = true;
2517        ngroup->pop_vlan = group->l2_interface.pop_vlan;
2518        break;
2519    case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2520        ngroup->has_index = true;
2521        ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2522        ngroup->has_group_id = true;
2523        ngroup->group_id = group->l2_rewrite.group_id;
2524        if (group->l2_rewrite.vlan_id) {
2525            ngroup->has_set_vlan_id = true;
2526            ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
2527        }
2528        if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
2529            ngroup->has_set_eth_src = true;
2530            ngroup->set_eth_src =
2531                qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
2532        }
2533        if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2534            ngroup->has_set_eth_dst = true;
2535            ngroup->set_eth_dst =
2536                qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
2537        }
2538        break;
2539    case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2540    case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2541        ngroup->has_vlan_id = true;
2542        ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2543        ngroup->has_index = true;
2544        ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
2545        for (i = 0; i < group->l2_flood.group_count; i++) {
2546            ngroup->has_group_ids = true;
2547            id = g_malloc0(sizeof(*id));
2548            id->value = group->l2_flood.group_ids[i];
2549            id->next = ngroup->group_ids;
2550            ngroup->group_ids = id;
2551        }
2552        break;
2553    case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2554        ngroup->has_index = true;
2555        ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2556        ngroup->has_group_id = true;
2557        ngroup->group_id = group->l3_unicast.group_id;
2558        if (group->l3_unicast.vlan_id) {
2559            ngroup->has_set_vlan_id = true;
2560            ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
2561        }
2562        if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
2563            ngroup->has_set_eth_src = true;
2564            ngroup->set_eth_src =
2565                qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
2566        }
2567        if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2568            ngroup->has_set_eth_dst = true;
2569            ngroup->set_eth_dst =
2570                qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
2571        }
2572        if (group->l3_unicast.ttl_check) {
2573            ngroup->has_ttl_check = true;
2574            ngroup->ttl_check = group->l3_unicast.ttl_check;
2575        }
2576        break;
2577    }
2578
2579    new->next = flow_context->list;
2580    flow_context->list = new;
2581}
2582
2583RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
2584                                                     bool has_type,
2585                                                     uint8_t type,
2586                                                     Error **errp)
2587{
2588    struct rocker *r;
2589    struct world *w;
2590    struct of_dpa *of_dpa;
2591    struct of_dpa_group_fill_context fill_context = {
2592        .list = NULL,
2593        .type = type,
2594    };
2595
2596    r = rocker_find(name);
2597    if (!r) {
2598        error_setg(errp, "rocker %s not found", name);
2599        return NULL;
2600    }
2601
2602    w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2603    if (!w) {
2604        error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2605        return NULL;
2606    }
2607
2608    of_dpa = world_private(w);
2609
2610    g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
2611
2612    return fill_context.list;
2613}
2614
2615static WorldOps of_dpa_ops = {
2616    .name = "ofdpa",
2617    .init = of_dpa_init,
2618    .uninit = of_dpa_uninit,
2619    .ig = of_dpa_ig,
2620    .cmd = of_dpa_cmd,
2621};
2622
2623World *of_dpa_world_alloc(Rocker *r)
2624{
2625    return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops);
2626}
2627