linux/drivers/net/ethernet/rocker/rocker_ofdpa.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
   3 *                                              implementation
   4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
   5 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/types.h>
  15#include <linux/spinlock.h>
  16#include <linux/hashtable.h>
  17#include <linux/crc32.h>
  18#include <linux/netdevice.h>
  19#include <linux/inetdevice.h>
  20#include <linux/if_vlan.h>
  21#include <linux/if_bridge.h>
  22#include <net/neighbour.h>
  23#include <net/switchdev.h>
  24#include <net/ip_fib.h>
  25#include <net/arp.h>
  26
  27#include "rocker.h"
  28#include "rocker_tlv.h"
  29
  30struct ofdpa_flow_tbl_key {
  31        u32 priority;
  32        enum rocker_of_dpa_table_id tbl_id;
  33        union {
  34                struct {
  35                        u32 in_pport;
  36                        u32 in_pport_mask;
  37                        enum rocker_of_dpa_table_id goto_tbl;
  38                } ig_port;
  39                struct {
  40                        u32 in_pport;
  41                        __be16 vlan_id;
  42                        __be16 vlan_id_mask;
  43                        enum rocker_of_dpa_table_id goto_tbl;
  44                        bool untagged;
  45                        __be16 new_vlan_id;
  46                } vlan;
  47                struct {
  48                        u32 in_pport;
  49                        u32 in_pport_mask;
  50                        __be16 eth_type;
  51                        u8 eth_dst[ETH_ALEN];
  52                        u8 eth_dst_mask[ETH_ALEN];
  53                        __be16 vlan_id;
  54                        __be16 vlan_id_mask;
  55                        enum rocker_of_dpa_table_id goto_tbl;
  56                        bool copy_to_cpu;
  57                } term_mac;
  58                struct {
  59                        __be16 eth_type;
  60                        __be32 dst4;
  61                        __be32 dst4_mask;
  62                        enum rocker_of_dpa_table_id goto_tbl;
  63                        u32 group_id;
  64                } ucast_routing;
  65                struct {
  66                        u8 eth_dst[ETH_ALEN];
  67                        u8 eth_dst_mask[ETH_ALEN];
  68                        int has_eth_dst;
  69                        int has_eth_dst_mask;
  70                        __be16 vlan_id;
  71                        u32 tunnel_id;
  72                        enum rocker_of_dpa_table_id goto_tbl;
  73                        u32 group_id;
  74                        bool copy_to_cpu;
  75                } bridge;
  76                struct {
  77                        u32 in_pport;
  78                        u32 in_pport_mask;
  79                        u8 eth_src[ETH_ALEN];
  80                        u8 eth_src_mask[ETH_ALEN];
  81                        u8 eth_dst[ETH_ALEN];
  82                        u8 eth_dst_mask[ETH_ALEN];
  83                        __be16 eth_type;
  84                        __be16 vlan_id;
  85                        __be16 vlan_id_mask;
  86                        u8 ip_proto;
  87                        u8 ip_proto_mask;
  88                        u8 ip_tos;
  89                        u8 ip_tos_mask;
  90                        u32 group_id;
  91                } acl;
  92        };
  93};
  94
  95struct ofdpa_flow_tbl_entry {
  96        struct hlist_node entry;
  97        u32 cmd;
  98        u64 cookie;
  99        struct ofdpa_flow_tbl_key key;
 100        size_t key_len;
 101        u32 key_crc32; /* key */
 102        struct fib_info *fi;
 103};
 104
 105struct ofdpa_group_tbl_entry {
 106        struct hlist_node entry;
 107        u32 cmd;
 108        u32 group_id; /* key */
 109        u16 group_count;
 110        u32 *group_ids;
 111        union {
 112                struct {
 113                        u8 pop_vlan;
 114                } l2_interface;
 115                struct {
 116                        u8 eth_src[ETH_ALEN];
 117                        u8 eth_dst[ETH_ALEN];
 118                        __be16 vlan_id;
 119                        u32 group_id;
 120                } l2_rewrite;
 121                struct {
 122                        u8 eth_src[ETH_ALEN];
 123                        u8 eth_dst[ETH_ALEN];
 124                        __be16 vlan_id;
 125                        bool ttl_check;
 126                        u32 group_id;
 127                } l3_unicast;
 128        };
 129};
 130
 131struct ofdpa_fdb_tbl_entry {
 132        struct hlist_node entry;
 133        u32 key_crc32; /* key */
 134        bool learned;
 135        unsigned long touched;
 136        struct ofdpa_fdb_tbl_key {
 137                struct ofdpa_port *ofdpa_port;
 138                u8 addr[ETH_ALEN];
 139                __be16 vlan_id;
 140        } key;
 141};
 142
 143struct ofdpa_internal_vlan_tbl_entry {
 144        struct hlist_node entry;
 145        int ifindex; /* key */
 146        u32 ref_count;
 147        __be16 vlan_id;
 148};
 149
 150struct ofdpa_neigh_tbl_entry {
 151        struct hlist_node entry;
 152        __be32 ip_addr; /* key */
 153        struct net_device *dev;
 154        u32 ref_count;
 155        u32 index;
 156        u8 eth_dst[ETH_ALEN];
 157        bool ttl_check;
 158};
 159
 160enum {
 161        OFDPA_CTRL_LINK_LOCAL_MCAST,
 162        OFDPA_CTRL_LOCAL_ARP,
 163        OFDPA_CTRL_IPV4_MCAST,
 164        OFDPA_CTRL_IPV6_MCAST,
 165        OFDPA_CTRL_DFLT_BRIDGING,
 166        OFDPA_CTRL_DFLT_OVS,
 167        OFDPA_CTRL_MAX,
 168};
 169
 170#define OFDPA_INTERNAL_VLAN_ID_BASE     0x0f00
 171#define OFDPA_N_INTERNAL_VLANS          255
 172#define OFDPA_VLAN_BITMAP_LEN           BITS_TO_LONGS(VLAN_N_VID)
 173#define OFDPA_INTERNAL_VLAN_BITMAP_LEN  BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
 174#define OFDPA_UNTAGGED_VID 0
 175
 176struct ofdpa {
 177        struct rocker *rocker;
 178        DECLARE_HASHTABLE(flow_tbl, 16);
 179        spinlock_t flow_tbl_lock;               /* for flow tbl accesses */
 180        u64 flow_tbl_next_cookie;
 181        DECLARE_HASHTABLE(group_tbl, 16);
 182        spinlock_t group_tbl_lock;              /* for group tbl accesses */
 183        struct timer_list fdb_cleanup_timer;
 184        DECLARE_HASHTABLE(fdb_tbl, 16);
 185        spinlock_t fdb_tbl_lock;                /* for fdb tbl accesses */
 186        unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
 187        DECLARE_HASHTABLE(internal_vlan_tbl, 8);
 188        spinlock_t internal_vlan_tbl_lock;      /* for vlan tbl accesses */
 189        DECLARE_HASHTABLE(neigh_tbl, 16);
 190        spinlock_t neigh_tbl_lock;              /* for neigh tbl accesses */
 191        u32 neigh_tbl_next_index;
 192        unsigned long ageing_time;
 193        bool fib_aborted;
 194};
 195
 196struct ofdpa_port {
 197        struct ofdpa *ofdpa;
 198        struct rocker_port *rocker_port;
 199        struct net_device *dev;
 200        u32 pport;
 201        struct net_device *bridge_dev;
 202        __be16 internal_vlan_id;
 203        int stp_state;
 204        u32 brport_flags;
 205        unsigned long ageing_time;
 206        bool ctrls[OFDPA_CTRL_MAX];
 207        unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
 208};
 209
 210static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 211static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 212static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
 213static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
 214static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
 215static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
 216static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
 217static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
 218static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
 219
 220/* Rocker priority levels for flow table entries.  Higher
 221 * priority match takes precedence over lower priority match.
 222 */
 223
 224enum {
 225        OFDPA_PRIORITY_UNKNOWN = 0,
 226        OFDPA_PRIORITY_IG_PORT = 1,
 227        OFDPA_PRIORITY_VLAN = 1,
 228        OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
 229        OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
 230        OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
 231        OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
 232        OFDPA_PRIORITY_BRIDGING_VLAN = 3,
 233        OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
 234        OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
 235        OFDPA_PRIORITY_BRIDGING_TENANT = 3,
 236        OFDPA_PRIORITY_ACL_CTRL = 3,
 237        OFDPA_PRIORITY_ACL_NORMAL = 2,
 238        OFDPA_PRIORITY_ACL_DFLT = 1,
 239};
 240
 241static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
 242{
 243        u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
 244        u16 end = 0xffe;
 245        u16 _vlan_id = ntohs(vlan_id);
 246
 247        return (_vlan_id >= start && _vlan_id <= end);
 248}
 249
 250static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
 251                                     u16 vid, bool *pop_vlan)
 252{
 253        __be16 vlan_id;
 254
 255        if (pop_vlan)
 256                *pop_vlan = false;
 257        vlan_id = htons(vid);
 258        if (!vlan_id) {
 259                vlan_id = ofdpa_port->internal_vlan_id;
 260                if (pop_vlan)
 261                        *pop_vlan = true;
 262        }
 263
 264        return vlan_id;
 265}
 266
 267static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
 268                                  __be16 vlan_id)
 269{
 270        if (ofdpa_vlan_id_is_internal(vlan_id))
 271                return 0;
 272
 273        return ntohs(vlan_id);
 274}
 275
 276static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
 277                                const char *kind)
 278{
 279        return ofdpa_port->bridge_dev &&
 280                !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
 281}
 282
 283static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
 284{
 285        return ofdpa_port_is_slave(ofdpa_port, "bridge");
 286}
 287
 288static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
 289{
 290        return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
 291}
 292
 293#define OFDPA_OP_FLAG_REMOVE            BIT(0)
 294#define OFDPA_OP_FLAG_NOWAIT            BIT(1)
 295#define OFDPA_OP_FLAG_LEARNED           BIT(2)
 296#define OFDPA_OP_FLAG_REFRESH           BIT(3)
 297
 298static bool ofdpa_flags_nowait(int flags)
 299{
 300        return flags & OFDPA_OP_FLAG_NOWAIT;
 301}
 302
 303static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
 304                               size_t size)
 305{
 306        struct switchdev_trans_item *elem = NULL;
 307        gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
 308                          GFP_ATOMIC : GFP_KERNEL;
 309
 310        /* If in transaction prepare phase, allocate the memory
 311         * and enqueue it on a transaction.  If in transaction
 312         * commit phase, dequeue the memory from the transaction
 313         * rather than re-allocating the memory.  The idea is the
 314         * driver code paths for prepare and commit are identical
 315         * so the memory allocated in the prepare phase is the
 316         * memory used in the commit phase.
 317         */
 318
 319        if (!trans) {
 320                elem = kzalloc(size + sizeof(*elem), gfp_flags);
 321        } else if (switchdev_trans_ph_prepare(trans)) {
 322                elem = kzalloc(size + sizeof(*elem), gfp_flags);
 323                if (!elem)
 324                        return NULL;
 325                switchdev_trans_item_enqueue(trans, elem, kfree, elem);
 326        } else {
 327                elem = switchdev_trans_item_dequeue(trans);
 328        }
 329
 330        return elem ? elem + 1 : NULL;
 331}
 332
 333static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
 334                           size_t size)
 335{
 336        return __ofdpa_mem_alloc(trans, flags, size);
 337}
 338
 339static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
 340                           size_t n, size_t size)
 341{
 342        return __ofdpa_mem_alloc(trans, flags, n * size);
 343}
 344
 345static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
 346{
 347        struct switchdev_trans_item *elem;
 348
 349        /* Frees are ignored if in transaction prepare phase.  The
 350         * memory remains on the per-port list until freed in the
 351         * commit phase.
 352         */
 353
 354        if (switchdev_trans_ph_prepare(trans))
 355                return;
 356
 357        elem = (struct switchdev_trans_item *) mem - 1;
 358        kfree(elem);
 359}
 360
 361/*************************************************************
 362 * Flow, group, FDB, internal VLAN and neigh command prepares
 363 *************************************************************/
 364
 365static int
 366ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
 367                               const struct ofdpa_flow_tbl_entry *entry)
 368{
 369        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 370                               entry->key.ig_port.in_pport))
 371                return -EMSGSIZE;
 372        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 373                               entry->key.ig_port.in_pport_mask))
 374                return -EMSGSIZE;
 375        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 376                               entry->key.ig_port.goto_tbl))
 377                return -EMSGSIZE;
 378
 379        return 0;
 380}
 381
 382static int
 383ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
 384                            const struct ofdpa_flow_tbl_entry *entry)
 385{
 386        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 387                               entry->key.vlan.in_pport))
 388                return -EMSGSIZE;
 389        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 390                                entry->key.vlan.vlan_id))
 391                return -EMSGSIZE;
 392        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 393                                entry->key.vlan.vlan_id_mask))
 394                return -EMSGSIZE;
 395        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 396                               entry->key.vlan.goto_tbl))
 397                return -EMSGSIZE;
 398        if (entry->key.vlan.untagged &&
 399            rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
 400                                entry->key.vlan.new_vlan_id))
 401                return -EMSGSIZE;
 402
 403        return 0;
 404}
 405
 406static int
 407ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
 408                                const struct ofdpa_flow_tbl_entry *entry)
 409{
 410        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 411                               entry->key.term_mac.in_pport))
 412                return -EMSGSIZE;
 413        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 414                               entry->key.term_mac.in_pport_mask))
 415                return -EMSGSIZE;
 416        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 417                                entry->key.term_mac.eth_type))
 418                return -EMSGSIZE;
 419        if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 420                           ETH_ALEN, entry->key.term_mac.eth_dst))
 421                return -EMSGSIZE;
 422        if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 423                           ETH_ALEN, entry->key.term_mac.eth_dst_mask))
 424                return -EMSGSIZE;
 425        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 426                                entry->key.term_mac.vlan_id))
 427                return -EMSGSIZE;
 428        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 429                                entry->key.term_mac.vlan_id_mask))
 430                return -EMSGSIZE;
 431        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 432                               entry->key.term_mac.goto_tbl))
 433                return -EMSGSIZE;
 434        if (entry->key.term_mac.copy_to_cpu &&
 435            rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
 436                              entry->key.term_mac.copy_to_cpu))
 437                return -EMSGSIZE;
 438
 439        return 0;
 440}
 441
 442static int
 443ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
 444                                     const struct ofdpa_flow_tbl_entry *entry)
 445{
 446        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 447                                entry->key.ucast_routing.eth_type))
 448                return -EMSGSIZE;
 449        if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
 450                                entry->key.ucast_routing.dst4))
 451                return -EMSGSIZE;
 452        if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
 453                                entry->key.ucast_routing.dst4_mask))
 454                return -EMSGSIZE;
 455        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 456                               entry->key.ucast_routing.goto_tbl))
 457                return -EMSGSIZE;
 458        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 459                               entry->key.ucast_routing.group_id))
 460                return -EMSGSIZE;
 461
 462        return 0;
 463}
 464
 465static int
 466ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
 467                              const struct ofdpa_flow_tbl_entry *entry)
 468{
 469        if (entry->key.bridge.has_eth_dst &&
 470            rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 471                           ETH_ALEN, entry->key.bridge.eth_dst))
 472                return -EMSGSIZE;
 473        if (entry->key.bridge.has_eth_dst_mask &&
 474            rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 475                           ETH_ALEN, entry->key.bridge.eth_dst_mask))
 476                return -EMSGSIZE;
 477        if (entry->key.bridge.vlan_id &&
 478            rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 479                                entry->key.bridge.vlan_id))
 480                return -EMSGSIZE;
 481        if (entry->key.bridge.tunnel_id &&
 482            rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
 483                               entry->key.bridge.tunnel_id))
 484                return -EMSGSIZE;
 485        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 486                               entry->key.bridge.goto_tbl))
 487                return -EMSGSIZE;
 488        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 489                               entry->key.bridge.group_id))
 490                return -EMSGSIZE;
 491        if (entry->key.bridge.copy_to_cpu &&
 492            rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
 493                              entry->key.bridge.copy_to_cpu))
 494                return -EMSGSIZE;
 495
 496        return 0;
 497}
 498
 499static int
 500ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
 501                           const struct ofdpa_flow_tbl_entry *entry)
 502{
 503        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 504                               entry->key.acl.in_pport))
 505                return -EMSGSIZE;
 506        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 507                               entry->key.acl.in_pport_mask))
 508                return -EMSGSIZE;
 509        if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 510                           ETH_ALEN, entry->key.acl.eth_src))
 511                return -EMSGSIZE;
 512        if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
 513                           ETH_ALEN, entry->key.acl.eth_src_mask))
 514                return -EMSGSIZE;
 515        if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 516                           ETH_ALEN, entry->key.acl.eth_dst))
 517                return -EMSGSIZE;
 518        if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 519                           ETH_ALEN, entry->key.acl.eth_dst_mask))
 520                return -EMSGSIZE;
 521        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 522                                entry->key.acl.eth_type))
 523                return -EMSGSIZE;
 524        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 525                                entry->key.acl.vlan_id))
 526                return -EMSGSIZE;
 527        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 528                                entry->key.acl.vlan_id_mask))
 529                return -EMSGSIZE;
 530
 531        switch (ntohs(entry->key.acl.eth_type)) {
 532        case ETH_P_IP:
 533        case ETH_P_IPV6:
 534                if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
 535                                      entry->key.acl.ip_proto))
 536                        return -EMSGSIZE;
 537                if (rocker_tlv_put_u8(desc_info,
 538                                      ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
 539                                      entry->key.acl.ip_proto_mask))
 540                        return -EMSGSIZE;
 541                if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
 542                                      entry->key.acl.ip_tos & 0x3f))
 543                        return -EMSGSIZE;
 544                if (rocker_tlv_put_u8(desc_info,
 545                                      ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
 546                                      entry->key.acl.ip_tos_mask & 0x3f))
 547                        return -EMSGSIZE;
 548                if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
 549                                      (entry->key.acl.ip_tos & 0xc0) >> 6))
 550                        return -EMSGSIZE;
 551                if (rocker_tlv_put_u8(desc_info,
 552                                      ROCKER_TLV_OF_DPA_IP_ECN_MASK,
 553                                      (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
 554                        return -EMSGSIZE;
 555                break;
 556        }
 557
 558        if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
 559            rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 560                               entry->key.acl.group_id))
 561                return -EMSGSIZE;
 562
 563        return 0;
 564}
 565
 566static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
 567                                  struct rocker_desc_info *desc_info,
 568                                  void *priv)
 569{
 570        const struct ofdpa_flow_tbl_entry *entry = priv;
 571        struct rocker_tlv *cmd_info;
 572        int err = 0;
 573
 574        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 575                return -EMSGSIZE;
 576        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 577        if (!cmd_info)
 578                return -EMSGSIZE;
 579        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
 580                               entry->key.tbl_id))
 581                return -EMSGSIZE;
 582        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
 583                               entry->key.priority))
 584                return -EMSGSIZE;
 585        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
 586                return -EMSGSIZE;
 587        if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
 588                               entry->cookie))
 589                return -EMSGSIZE;
 590
 591        switch (entry->key.tbl_id) {
 592        case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
 593                err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
 594                break;
 595        case ROCKER_OF_DPA_TABLE_ID_VLAN:
 596                err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
 597                break;
 598        case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
 599                err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
 600                break;
 601        case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
 602                err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
 603                break;
 604        case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
 605                err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
 606                break;
 607        case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
 608                err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
 609                break;
 610        default:
 611                err = -ENOTSUPP;
 612                break;
 613        }
 614
 615        if (err)
 616                return err;
 617
 618        rocker_tlv_nest_end(desc_info, cmd_info);
 619
 620        return 0;
 621}
 622
 623static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
 624                                  struct rocker_desc_info *desc_info,
 625                                  void *priv)
 626{
 627        const struct ofdpa_flow_tbl_entry *entry = priv;
 628        struct rocker_tlv *cmd_info;
 629
 630        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 631                return -EMSGSIZE;
 632        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 633        if (!cmd_info)
 634                return -EMSGSIZE;
 635        if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
 636                               entry->cookie))
 637                return -EMSGSIZE;
 638        rocker_tlv_nest_end(desc_info, cmd_info);
 639
 640        return 0;
 641}
 642
 643static int
 644ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
 645                                     struct ofdpa_group_tbl_entry *entry)
 646{
 647        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
 648                               ROCKER_GROUP_PORT_GET(entry->group_id)))
 649                return -EMSGSIZE;
 650        if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
 651                              entry->l2_interface.pop_vlan))
 652                return -EMSGSIZE;
 653
 654        return 0;
 655}
 656
 657static int
 658ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
 659                                   const struct ofdpa_group_tbl_entry *entry)
 660{
 661        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
 662                               entry->l2_rewrite.group_id))
 663                return -EMSGSIZE;
 664        if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
 665            rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 666                           ETH_ALEN, entry->l2_rewrite.eth_src))
 667                return -EMSGSIZE;
 668        if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
 669            rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 670                           ETH_ALEN, entry->l2_rewrite.eth_dst))
 671                return -EMSGSIZE;
 672        if (entry->l2_rewrite.vlan_id &&
 673            rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 674                                entry->l2_rewrite.vlan_id))
 675                return -EMSGSIZE;
 676
 677        return 0;
 678}
 679
 680static int
 681ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
 682                                  const struct ofdpa_group_tbl_entry *entry)
 683{
 684        int i;
 685        struct rocker_tlv *group_ids;
 686
 687        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
 688                               entry->group_count))
 689                return -EMSGSIZE;
 690
 691        group_ids = rocker_tlv_nest_start(desc_info,
 692                                          ROCKER_TLV_OF_DPA_GROUP_IDS);
 693        if (!group_ids)
 694                return -EMSGSIZE;
 695
 696        for (i = 0; i < entry->group_count; i++)
 697                /* Note TLV array is 1-based */
 698                if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
 699                        return -EMSGSIZE;
 700
 701        rocker_tlv_nest_end(desc_info, group_ids);
 702
 703        return 0;
 704}
 705
 706static int
 707ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
 708                                   const struct ofdpa_group_tbl_entry *entry)
 709{
 710        if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
 711            rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 712                           ETH_ALEN, entry->l3_unicast.eth_src))
 713                return -EMSGSIZE;
 714        if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
 715            rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 716                           ETH_ALEN, entry->l3_unicast.eth_dst))
 717                return -EMSGSIZE;
 718        if (entry->l3_unicast.vlan_id &&
 719            rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 720                                entry->l3_unicast.vlan_id))
 721                return -EMSGSIZE;
 722        if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
 723                              entry->l3_unicast.ttl_check))
 724                return -EMSGSIZE;
 725        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
 726                               entry->l3_unicast.group_id))
 727                return -EMSGSIZE;
 728
 729        return 0;
 730}
 731
 732static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
 733                                   struct rocker_desc_info *desc_info,
 734                                   void *priv)
 735{
 736        struct ofdpa_group_tbl_entry *entry = priv;
 737        struct rocker_tlv *cmd_info;
 738        int err = 0;
 739
 740        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 741                return -EMSGSIZE;
 742        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 743        if (!cmd_info)
 744                return -EMSGSIZE;
 745
 746        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 747                               entry->group_id))
 748                return -EMSGSIZE;
 749
 750        switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
 751        case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
 752                err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
 753                break;
 754        case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
 755                err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
 756                break;
 757        case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
 758        case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
 759                err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
 760                break;
 761        case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
 762                err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
 763                break;
 764        default:
 765                err = -ENOTSUPP;
 766                break;
 767        }
 768
 769        if (err)
 770                return err;
 771
 772        rocker_tlv_nest_end(desc_info, cmd_info);
 773
 774        return 0;
 775}
 776
 777static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
 778                                   struct rocker_desc_info *desc_info,
 779                                   void *priv)
 780{
 781        const struct ofdpa_group_tbl_entry *entry = priv;
 782        struct rocker_tlv *cmd_info;
 783
 784        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 785                return -EMSGSIZE;
 786        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 787        if (!cmd_info)
 788                return -EMSGSIZE;
 789        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 790                               entry->group_id))
 791                return -EMSGSIZE;
 792        rocker_tlv_nest_end(desc_info, cmd_info);
 793
 794        return 0;
 795}
 796
 797/***************************************************
 798 * Flow, group, FDB, internal VLAN and neigh tables
 799 ***************************************************/
 800
 801static struct ofdpa_flow_tbl_entry *
 802ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
 803                    const struct ofdpa_flow_tbl_entry *match)
 804{
 805        struct ofdpa_flow_tbl_entry *found;
 806        size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 807
 808        hash_for_each_possible(ofdpa->flow_tbl, found,
 809                               entry, match->key_crc32) {
 810                if (memcmp(&found->key, &match->key, key_len) == 0)
 811                        return found;
 812        }
 813
 814        return NULL;
 815}
 816
 817static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
 818                              struct switchdev_trans *trans, int flags,
 819                              struct ofdpa_flow_tbl_entry *match)
 820{
 821        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
 822        struct ofdpa_flow_tbl_entry *found;
 823        size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 824        unsigned long lock_flags;
 825
 826        match->key_crc32 = crc32(~0, &match->key, key_len);
 827
 828        spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
 829
 830        found = ofdpa_flow_tbl_find(ofdpa, match);
 831
 832        if (found) {
 833                match->cookie = found->cookie;
 834                if (!switchdev_trans_ph_prepare(trans))
 835                        hash_del(&found->entry);
 836                ofdpa_kfree(trans, found);
 837                found = match;
 838                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
 839        } else {
 840                found = match;
 841                found->cookie = ofdpa->flow_tbl_next_cookie++;
 842                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
 843        }
 844
 845        if (!switchdev_trans_ph_prepare(trans))
 846                hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
 847
 848        spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 849
 850        if (!switchdev_trans_ph_prepare(trans))
 851                return rocker_cmd_exec(ofdpa_port->rocker_port,
 852                                       ofdpa_flags_nowait(flags),
 853                                       ofdpa_cmd_flow_tbl_add,
 854                                       found, NULL, NULL);
 855        return 0;
 856}
 857
 858static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
 859                              struct switchdev_trans *trans, int flags,
 860                              struct ofdpa_flow_tbl_entry *match)
 861{
 862        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
 863        struct ofdpa_flow_tbl_entry *found;
 864        size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 865        unsigned long lock_flags;
 866        int err = 0;
 867
 868        match->key_crc32 = crc32(~0, &match->key, key_len);
 869
 870        spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
 871
 872        found = ofdpa_flow_tbl_find(ofdpa, match);
 873
 874        if (found) {
 875                if (!switchdev_trans_ph_prepare(trans))
 876                        hash_del(&found->entry);
 877                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
 878        }
 879
 880        spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 881
 882        ofdpa_kfree(trans, match);
 883
 884        if (found) {
 885                if (!switchdev_trans_ph_prepare(trans))
 886                        err = rocker_cmd_exec(ofdpa_port->rocker_port,
 887                                              ofdpa_flags_nowait(flags),
 888                                              ofdpa_cmd_flow_tbl_del,
 889                                              found, NULL, NULL);
 890                ofdpa_kfree(trans, found);
 891        }
 892
 893        return err;
 894}
 895
 896static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
 897                             struct switchdev_trans *trans, int flags,
 898                             struct ofdpa_flow_tbl_entry *entry)
 899{
 900        if (flags & OFDPA_OP_FLAG_REMOVE)
 901                return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
 902        else
 903                return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
 904}
 905
 906static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
 907                                  struct switchdev_trans *trans, int flags,
 908                                  u32 in_pport, u32 in_pport_mask,
 909                                  enum rocker_of_dpa_table_id goto_tbl)
 910{
 911        struct ofdpa_flow_tbl_entry *entry;
 912
 913        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
 914        if (!entry)
 915                return -ENOMEM;
 916
 917        entry->key.priority = OFDPA_PRIORITY_IG_PORT;
 918        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
 919        entry->key.ig_port.in_pport = in_pport;
 920        entry->key.ig_port.in_pport_mask = in_pport_mask;
 921        entry->key.ig_port.goto_tbl = goto_tbl;
 922
 923        return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
 924}
 925
 926static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
 927                               struct switchdev_trans *trans, int flags,
 928                               u32 in_pport, __be16 vlan_id,
 929                               __be16 vlan_id_mask,
 930                               enum rocker_of_dpa_table_id goto_tbl,
 931                               bool untagged, __be16 new_vlan_id)
 932{
 933        struct ofdpa_flow_tbl_entry *entry;
 934
 935        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
 936        if (!entry)
 937                return -ENOMEM;
 938
 939        entry->key.priority = OFDPA_PRIORITY_VLAN;
 940        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
 941        entry->key.vlan.in_pport = in_pport;
 942        entry->key.vlan.vlan_id = vlan_id;
 943        entry->key.vlan.vlan_id_mask = vlan_id_mask;
 944        entry->key.vlan.goto_tbl = goto_tbl;
 945
 946        entry->key.vlan.untagged = untagged;
 947        entry->key.vlan.new_vlan_id = new_vlan_id;
 948
 949        return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
 950}
 951
 952static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
 953                                   struct switchdev_trans *trans,
 954                                   u32 in_pport, u32 in_pport_mask,
 955                                   __be16 eth_type, const u8 *eth_dst,
 956                                   const u8 *eth_dst_mask, __be16 vlan_id,
 957                                   __be16 vlan_id_mask, bool copy_to_cpu,
 958                                   int flags)
 959{
 960        struct ofdpa_flow_tbl_entry *entry;
 961
 962        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
 963        if (!entry)
 964                return -ENOMEM;
 965
 966        if (is_multicast_ether_addr(eth_dst)) {
 967                entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
 968                entry->key.term_mac.goto_tbl =
 969                         ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
 970        } else {
 971                entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
 972                entry->key.term_mac.goto_tbl =
 973                         ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
 974        }
 975
 976        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
 977        entry->key.term_mac.in_pport = in_pport;
 978        entry->key.term_mac.in_pport_mask = in_pport_mask;
 979        entry->key.term_mac.eth_type = eth_type;
 980        ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
 981        ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
 982        entry->key.term_mac.vlan_id = vlan_id;
 983        entry->key.term_mac.vlan_id_mask = vlan_id_mask;
 984        entry->key.term_mac.copy_to_cpu = copy_to_cpu;
 985
 986        return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
 987}
 988
 989static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
 990                                 struct switchdev_trans *trans, int flags,
 991                                 const u8 *eth_dst, const u8 *eth_dst_mask,
 992                                 __be16 vlan_id, u32 tunnel_id,
 993                                 enum rocker_of_dpa_table_id goto_tbl,
 994                                 u32 group_id, bool copy_to_cpu)
 995{
 996        struct ofdpa_flow_tbl_entry *entry;
 997        u32 priority;
 998        bool vlan_bridging = !!vlan_id;
 999        bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
1000        bool wild = false;
1001
1002        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1003        if (!entry)
1004                return -ENOMEM;
1005
1006        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1007
1008        if (eth_dst) {
1009                entry->key.bridge.has_eth_dst = 1;
1010                ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
1011        }
1012        if (eth_dst_mask) {
1013                entry->key.bridge.has_eth_dst_mask = 1;
1014                ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
1015                if (!ether_addr_equal(eth_dst_mask, ff_mac))
1016                        wild = true;
1017        }
1018
1019        priority = OFDPA_PRIORITY_UNKNOWN;
1020        if (vlan_bridging && dflt && wild)
1021                priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
1022        else if (vlan_bridging && dflt && !wild)
1023                priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
1024        else if (vlan_bridging && !dflt)
1025                priority = OFDPA_PRIORITY_BRIDGING_VLAN;
1026        else if (!vlan_bridging && dflt && wild)
1027                priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
1028        else if (!vlan_bridging && dflt && !wild)
1029                priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
1030        else if (!vlan_bridging && !dflt)
1031                priority = OFDPA_PRIORITY_BRIDGING_TENANT;
1032
1033        entry->key.priority = priority;
1034        entry->key.bridge.vlan_id = vlan_id;
1035        entry->key.bridge.tunnel_id = tunnel_id;
1036        entry->key.bridge.goto_tbl = goto_tbl;
1037        entry->key.bridge.group_id = group_id;
1038        entry->key.bridge.copy_to_cpu = copy_to_cpu;
1039
1040        return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1041}
1042
1043static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
1044                                         struct switchdev_trans *trans,
1045                                         __be16 eth_type, __be32 dst,
1046                                         __be32 dst_mask, u32 priority,
1047                                         enum rocker_of_dpa_table_id goto_tbl,
1048                                         u32 group_id, struct fib_info *fi,
1049                                         int flags)
1050{
1051        struct ofdpa_flow_tbl_entry *entry;
1052
1053        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1054        if (!entry)
1055                return -ENOMEM;
1056
1057        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1058        entry->key.priority = priority;
1059        entry->key.ucast_routing.eth_type = eth_type;
1060        entry->key.ucast_routing.dst4 = dst;
1061        entry->key.ucast_routing.dst4_mask = dst_mask;
1062        entry->key.ucast_routing.goto_tbl = goto_tbl;
1063        entry->key.ucast_routing.group_id = group_id;
1064        entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
1065                                  ucast_routing.group_id);
1066        entry->fi = fi;
1067
1068        return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1069}
1070
1071static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
1072                              struct switchdev_trans *trans, int flags,
1073                              u32 in_pport, u32 in_pport_mask,
1074                              const u8 *eth_src, const u8 *eth_src_mask,
1075                              const u8 *eth_dst, const u8 *eth_dst_mask,
1076                              __be16 eth_type, __be16 vlan_id,
1077                              __be16 vlan_id_mask, u8 ip_proto,
1078                              u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1079                              u32 group_id)
1080{
1081        u32 priority;
1082        struct ofdpa_flow_tbl_entry *entry;
1083
1084        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1085        if (!entry)
1086                return -ENOMEM;
1087
1088        priority = OFDPA_PRIORITY_ACL_NORMAL;
1089        if (eth_dst && eth_dst_mask) {
1090                if (ether_addr_equal(eth_dst_mask, mcast_mac))
1091                        priority = OFDPA_PRIORITY_ACL_DFLT;
1092                else if (is_link_local_ether_addr(eth_dst))
1093                        priority = OFDPA_PRIORITY_ACL_CTRL;
1094        }
1095
1096        entry->key.priority = priority;
1097        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1098        entry->key.acl.in_pport = in_pport;
1099        entry->key.acl.in_pport_mask = in_pport_mask;
1100
1101        if (eth_src)
1102                ether_addr_copy(entry->key.acl.eth_src, eth_src);
1103        if (eth_src_mask)
1104                ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1105        if (eth_dst)
1106                ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1107        if (eth_dst_mask)
1108                ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1109
1110        entry->key.acl.eth_type = eth_type;
1111        entry->key.acl.vlan_id = vlan_id;
1112        entry->key.acl.vlan_id_mask = vlan_id_mask;
1113        entry->key.acl.ip_proto = ip_proto;
1114        entry->key.acl.ip_proto_mask = ip_proto_mask;
1115        entry->key.acl.ip_tos = ip_tos;
1116        entry->key.acl.ip_tos_mask = ip_tos_mask;
1117        entry->key.acl.group_id = group_id;
1118
1119        return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1120}
1121
1122static struct ofdpa_group_tbl_entry *
1123ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1124                     const struct ofdpa_group_tbl_entry *match)
1125{
1126        struct ofdpa_group_tbl_entry *found;
1127
1128        hash_for_each_possible(ofdpa->group_tbl, found,
1129                               entry, match->group_id) {
1130                if (found->group_id == match->group_id)
1131                        return found;
1132        }
1133
1134        return NULL;
1135}
1136
1137static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
1138                                       struct ofdpa_group_tbl_entry *entry)
1139{
1140        switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1141        case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1142        case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1143                ofdpa_kfree(trans, entry->group_ids);
1144                break;
1145        default:
1146                break;
1147        }
1148        ofdpa_kfree(trans, entry);
1149}
1150
1151static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
1152                               struct switchdev_trans *trans, int flags,
1153                               struct ofdpa_group_tbl_entry *match)
1154{
1155        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1156        struct ofdpa_group_tbl_entry *found;
1157        unsigned long lock_flags;
1158
1159        spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1160
1161        found = ofdpa_group_tbl_find(ofdpa, match);
1162
1163        if (found) {
1164                if (!switchdev_trans_ph_prepare(trans))
1165                        hash_del(&found->entry);
1166                ofdpa_group_tbl_entry_free(trans, found);
1167                found = match;
1168                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1169        } else {
1170                found = match;
1171                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1172        }
1173
1174        if (!switchdev_trans_ph_prepare(trans))
1175                hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1176
1177        spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1178
1179        if (!switchdev_trans_ph_prepare(trans))
1180                return rocker_cmd_exec(ofdpa_port->rocker_port,
1181                                       ofdpa_flags_nowait(flags),
1182                                       ofdpa_cmd_group_tbl_add,
1183                                       found, NULL, NULL);
1184        return 0;
1185}
1186
1187static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
1188                               struct switchdev_trans *trans, int flags,
1189                               struct ofdpa_group_tbl_entry *match)
1190{
1191        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1192        struct ofdpa_group_tbl_entry *found;
1193        unsigned long lock_flags;
1194        int err = 0;
1195
1196        spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1197
1198        found = ofdpa_group_tbl_find(ofdpa, match);
1199
1200        if (found) {
1201                if (!switchdev_trans_ph_prepare(trans))
1202                        hash_del(&found->entry);
1203                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1204        }
1205
1206        spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1207
1208        ofdpa_group_tbl_entry_free(trans, match);
1209
1210        if (found) {
1211                if (!switchdev_trans_ph_prepare(trans))
1212                        err = rocker_cmd_exec(ofdpa_port->rocker_port,
1213                                              ofdpa_flags_nowait(flags),
1214                                              ofdpa_cmd_group_tbl_del,
1215                                              found, NULL, NULL);
1216                ofdpa_group_tbl_entry_free(trans, found);
1217        }
1218
1219        return err;
1220}
1221
1222static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
1223                              struct switchdev_trans *trans, int flags,
1224                              struct ofdpa_group_tbl_entry *entry)
1225{
1226        if (flags & OFDPA_OP_FLAG_REMOVE)
1227                return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
1228        else
1229                return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
1230}
1231
1232static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1233                                    struct switchdev_trans *trans, int flags,
1234                                    __be16 vlan_id, u32 out_pport,
1235                                    int pop_vlan)
1236{
1237        struct ofdpa_group_tbl_entry *entry;
1238
1239        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1240        if (!entry)
1241                return -ENOMEM;
1242
1243        entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1244        entry->l2_interface.pop_vlan = pop_vlan;
1245
1246        return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1247}
1248
1249static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1250                                  struct switchdev_trans *trans,
1251                                  int flags, u8 group_count,
1252                                  const u32 *group_ids, u32 group_id)
1253{
1254        struct ofdpa_group_tbl_entry *entry;
1255
1256        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1257        if (!entry)
1258                return -ENOMEM;
1259
1260        entry->group_id = group_id;
1261        entry->group_count = group_count;
1262
1263        entry->group_ids = ofdpa_kcalloc(trans, flags,
1264                                         group_count, sizeof(u32));
1265        if (!entry->group_ids) {
1266                ofdpa_kfree(trans, entry);
1267                return -ENOMEM;
1268        }
1269        memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1270
1271        return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1272}
1273
1274static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1275                                struct switchdev_trans *trans, int flags,
1276                                __be16 vlan_id, u8 group_count,
1277                                const u32 *group_ids, u32 group_id)
1278{
1279        return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
1280                                      group_count, group_ids,
1281                                      group_id);
1282}
1283
1284static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
1285                                  struct switchdev_trans *trans, int flags,
1286                                  u32 index, const u8 *src_mac, const u8 *dst_mac,
1287                                  __be16 vlan_id, bool ttl_check, u32 pport)
1288{
1289        struct ofdpa_group_tbl_entry *entry;
1290
1291        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1292        if (!entry)
1293                return -ENOMEM;
1294
1295        entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1296        if (src_mac)
1297                ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1298        if (dst_mac)
1299                ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1300        entry->l3_unicast.vlan_id = vlan_id;
1301        entry->l3_unicast.ttl_check = ttl_check;
1302        entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1303
1304        return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1305}
1306
1307static struct ofdpa_neigh_tbl_entry *
1308ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1309{
1310        struct ofdpa_neigh_tbl_entry *found;
1311
1312        hash_for_each_possible(ofdpa->neigh_tbl, found,
1313                               entry, be32_to_cpu(ip_addr))
1314                if (found->ip_addr == ip_addr)
1315                        return found;
1316
1317        return NULL;
1318}
1319
1320static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1321                            struct switchdev_trans *trans,
1322                            struct ofdpa_neigh_tbl_entry *entry)
1323{
1324        if (!switchdev_trans_ph_commit(trans))
1325                entry->index = ofdpa->neigh_tbl_next_index++;
1326        if (switchdev_trans_ph_prepare(trans))
1327                return;
1328        entry->ref_count++;
1329        hash_add(ofdpa->neigh_tbl, &entry->entry,
1330                 be32_to_cpu(entry->ip_addr));
1331}
1332
1333static void ofdpa_neigh_del(struct switchdev_trans *trans,
1334                            struct ofdpa_neigh_tbl_entry *entry)
1335{
1336        if (switchdev_trans_ph_prepare(trans))
1337                return;
1338        if (--entry->ref_count == 0) {
1339                hash_del(&entry->entry);
1340                ofdpa_kfree(trans, entry);
1341        }
1342}
1343
1344static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1345                               struct switchdev_trans *trans,
1346                               const u8 *eth_dst, bool ttl_check)
1347{
1348        if (eth_dst) {
1349                ether_addr_copy(entry->eth_dst, eth_dst);
1350                entry->ttl_check = ttl_check;
1351        } else if (!switchdev_trans_ph_prepare(trans)) {
1352                entry->ref_count++;
1353        }
1354}
1355
1356static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1357                                 struct switchdev_trans *trans,
1358                                 int flags, __be32 ip_addr, const u8 *eth_dst)
1359{
1360        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1361        struct ofdpa_neigh_tbl_entry *entry;
1362        struct ofdpa_neigh_tbl_entry *found;
1363        unsigned long lock_flags;
1364        __be16 eth_type = htons(ETH_P_IP);
1365        enum rocker_of_dpa_table_id goto_tbl =
1366                        ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1367        u32 group_id;
1368        u32 priority = 0;
1369        bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1370        bool updating;
1371        bool removing;
1372        int err = 0;
1373
1374        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1375        if (!entry)
1376                return -ENOMEM;
1377
1378        spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1379
1380        found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1381
1382        updating = found && adding;
1383        removing = found && !adding;
1384        adding = !found && adding;
1385
1386        if (adding) {
1387                entry->ip_addr = ip_addr;
1388                entry->dev = ofdpa_port->dev;
1389                ether_addr_copy(entry->eth_dst, eth_dst);
1390                entry->ttl_check = true;
1391                ofdpa_neigh_add(ofdpa, trans, entry);
1392        } else if (removing) {
1393                memcpy(entry, found, sizeof(*entry));
1394                ofdpa_neigh_del(trans, found);
1395        } else if (updating) {
1396                ofdpa_neigh_update(found, trans, eth_dst, true);
1397                memcpy(entry, found, sizeof(*entry));
1398        } else {
1399                err = -ENOENT;
1400        }
1401
1402        spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1403
1404        if (err)
1405                goto err_out;
1406
1407        /* For each active neighbor, we have an L3 unicast group and
1408         * a /32 route to the neighbor, which uses the L3 unicast
1409         * group.  The L3 unicast group can also be referred to by
1410         * other routes' nexthops.
1411         */
1412
1413        err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
1414                                     entry->index,
1415                                     ofdpa_port->dev->dev_addr,
1416                                     entry->eth_dst,
1417                                     ofdpa_port->internal_vlan_id,
1418                                     entry->ttl_check,
1419                                     ofdpa_port->pport);
1420        if (err) {
1421                netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1422                           err, entry->index);
1423                goto err_out;
1424        }
1425
1426        if (adding || removing) {
1427                group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1428                err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
1429                                                    eth_type, ip_addr,
1430                                                    inet_make_mask(32),
1431                                                    priority, goto_tbl,
1432                                                    group_id, NULL, flags);
1433
1434                if (err)
1435                        netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1436                                   err, &entry->ip_addr, group_id);
1437        }
1438
1439err_out:
1440        if (!adding)
1441                ofdpa_kfree(trans, entry);
1442
1443        return err;
1444}
1445
1446static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1447                                   struct switchdev_trans *trans,
1448                                   __be32 ip_addr)
1449{
1450        struct net_device *dev = ofdpa_port->dev;
1451        struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1452        int err = 0;
1453
1454        if (!n) {
1455                n = neigh_create(&arp_tbl, &ip_addr, dev);
1456                if (IS_ERR(n))
1457                        return PTR_ERR(n);
1458        }
1459
1460        /* If the neigh is already resolved, then go ahead and
1461         * install the entry, otherwise start the ARP process to
1462         * resolve the neigh.
1463         */
1464
1465        if (n->nud_state & NUD_VALID)
1466                err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
1467                                            ip_addr, n->ha);
1468        else
1469                neigh_event_send(n, NULL);
1470
1471        neigh_release(n);
1472        return err;
1473}
1474
1475static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1476                              struct switchdev_trans *trans, int flags,
1477                              __be32 ip_addr, u32 *index)
1478{
1479        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1480        struct ofdpa_neigh_tbl_entry *entry;
1481        struct ofdpa_neigh_tbl_entry *found;
1482        unsigned long lock_flags;
1483        bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1484        bool updating;
1485        bool removing;
1486        bool resolved = true;
1487        int err = 0;
1488
1489        entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1490        if (!entry)
1491                return -ENOMEM;
1492
1493        spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1494
1495        found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1496        if (found)
1497                *index = found->index;
1498
1499        updating = found && adding;
1500        removing = found && !adding;
1501        adding = !found && adding;
1502
1503        if (adding) {
1504                entry->ip_addr = ip_addr;
1505                entry->dev = ofdpa_port->dev;
1506                ofdpa_neigh_add(ofdpa, trans, entry);
1507                *index = entry->index;
1508                resolved = false;
1509        } else if (removing) {
1510                ofdpa_neigh_del(trans, found);
1511        } else if (updating) {
1512                ofdpa_neigh_update(found, trans, NULL, false);
1513                resolved = !is_zero_ether_addr(found->eth_dst);
1514        } else {
1515                err = -ENOENT;
1516        }
1517
1518        spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1519
1520        if (!adding)
1521                ofdpa_kfree(trans, entry);
1522
1523        if (err)
1524                return err;
1525
1526        /* Resolved means neigh ip_addr is resolved to neigh mac. */
1527
1528        if (!resolved)
1529                err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
1530
1531        return err;
1532}
1533
1534static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1535                                         int port_index)
1536{
1537        struct rocker_port *rocker_port;
1538
1539        rocker_port = ofdpa->rocker->ports[port_index];
1540        return rocker_port ? rocker_port->wpriv : NULL;
1541}
1542
1543static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1544                                       struct switchdev_trans *trans,
1545                                       int flags, __be16 vlan_id)
1546{
1547        struct ofdpa_port *p;
1548        const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1549        unsigned int port_count = ofdpa->rocker->port_count;
1550        u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1551        u32 *group_ids;
1552        u8 group_count = 0;
1553        int err = 0;
1554        int i;
1555
1556        group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
1557        if (!group_ids)
1558                return -ENOMEM;
1559
1560        /* Adjust the flood group for this VLAN.  The flood group
1561         * references an L2 interface group for each port in this
1562         * VLAN.
1563         */
1564
1565        for (i = 0; i < port_count; i++) {
1566                p = ofdpa_port_get(ofdpa, i);
1567                if (!p)
1568                        continue;
1569                if (!ofdpa_port_is_bridged(p))
1570                        continue;
1571                if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1572                        group_ids[group_count++] =
1573                                ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1574                }
1575        }
1576
1577        /* If there are no bridged ports in this VLAN, we're done */
1578        if (group_count == 0)
1579                goto no_ports_in_vlan;
1580
1581        err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
1582                                   group_count, group_ids, group_id);
1583        if (err)
1584                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1585
1586no_ports_in_vlan:
1587        ofdpa_kfree(trans, group_ids);
1588        return err;
1589}
1590
1591static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
1592                                     struct switchdev_trans *trans, int flags,
1593                                     __be16 vlan_id, bool pop_vlan)
1594{
1595        const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1596        unsigned int port_count = ofdpa->rocker->port_count;
1597        struct ofdpa_port *p;
1598        bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1599        u32 out_pport;
1600        int ref = 0;
1601        int err;
1602        int i;
1603
1604        /* An L2 interface group for this port in this VLAN, but
1605         * only when port STP state is LEARNING|FORWARDING.
1606         */
1607
1608        if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1609            ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1610                out_pport = ofdpa_port->pport;
1611                err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1612                                               vlan_id, out_pport, pop_vlan);
1613                if (err) {
1614                        netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1615                                   err, out_pport);
1616                        return err;
1617                }
1618        }
1619
1620        /* An L2 interface group for this VLAN to CPU port.
1621         * Add when first port joins this VLAN and destroy when
1622         * last port leaves this VLAN.
1623         */
1624
1625        for (i = 0; i < port_count; i++) {
1626                p = ofdpa_port_get(ofdpa, i);
1627                if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1628                        ref++;
1629        }
1630
1631        if ((!adding || ref != 1) && (adding || ref != 0))
1632                return 0;
1633
1634        out_pport = 0;
1635        err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1636                                       vlan_id, out_pport, pop_vlan);
1637        if (err) {
1638                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1639                return err;
1640        }
1641
1642        return 0;
1643}
1644
1645static struct ofdpa_ctrl {
1646        const u8 *eth_dst;
1647        const u8 *eth_dst_mask;
1648        __be16 eth_type;
1649        bool acl;
1650        bool bridge;
1651        bool term;
1652        bool copy_to_cpu;
1653} ofdpa_ctrls[] = {
1654        [OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1655                /* pass link local multicast pkts up to CPU for filtering */
1656                .eth_dst = ll_mac,
1657                .eth_dst_mask = ll_mask,
1658                .acl = true,
1659        },
1660        [OFDPA_CTRL_LOCAL_ARP] = {
1661                /* pass local ARP pkts up to CPU */
1662                .eth_dst = zero_mac,
1663                .eth_dst_mask = zero_mac,
1664                .eth_type = htons(ETH_P_ARP),
1665                .acl = true,
1666        },
1667        [OFDPA_CTRL_IPV4_MCAST] = {
1668                /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1669                .eth_dst = ipv4_mcast,
1670                .eth_dst_mask = ipv4_mask,
1671                .eth_type = htons(ETH_P_IP),
1672                .term  = true,
1673                .copy_to_cpu = true,
1674        },
1675        [OFDPA_CTRL_IPV6_MCAST] = {
1676                /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1677                .eth_dst = ipv6_mcast,
1678                .eth_dst_mask = ipv6_mask,
1679                .eth_type = htons(ETH_P_IPV6),
1680                .term  = true,
1681                .copy_to_cpu = true,
1682        },
1683        [OFDPA_CTRL_DFLT_BRIDGING] = {
1684                /* flood any pkts on vlan */
1685                .bridge = true,
1686                .copy_to_cpu = true,
1687        },
1688        [OFDPA_CTRL_DFLT_OVS] = {
1689                /* pass all pkts up to CPU */
1690                .eth_dst = zero_mac,
1691                .eth_dst_mask = zero_mac,
1692                .acl = true,
1693        },
1694};
1695
1696static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
1697                                    struct switchdev_trans *trans, int flags,
1698                                    const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1699{
1700        u32 in_pport = ofdpa_port->pport;
1701        u32 in_pport_mask = 0xffffffff;
1702        u32 out_pport = 0;
1703        const u8 *eth_src = NULL;
1704        const u8 *eth_src_mask = NULL;
1705        __be16 vlan_id_mask = htons(0xffff);
1706        u8 ip_proto = 0;
1707        u8 ip_proto_mask = 0;
1708        u8 ip_tos = 0;
1709        u8 ip_tos_mask = 0;
1710        u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1711        int err;
1712
1713        err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
1714                                 in_pport, in_pport_mask,
1715                                 eth_src, eth_src_mask,
1716                                 ctrl->eth_dst, ctrl->eth_dst_mask,
1717                                 ctrl->eth_type,
1718                                 vlan_id, vlan_id_mask,
1719                                 ip_proto, ip_proto_mask,
1720                                 ip_tos, ip_tos_mask,
1721                                 group_id);
1722
1723        if (err)
1724                netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1725
1726        return err;
1727}
1728
1729static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1730                                       struct switchdev_trans *trans,
1731                                       int flags,
1732                                       const struct ofdpa_ctrl *ctrl,
1733                                       __be16 vlan_id)
1734{
1735        enum rocker_of_dpa_table_id goto_tbl =
1736                        ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1737        u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1738        u32 tunnel_id = 0;
1739        int err;
1740
1741        if (!ofdpa_port_is_bridged(ofdpa_port))
1742                return 0;
1743
1744        err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
1745                                    ctrl->eth_dst, ctrl->eth_dst_mask,
1746                                    vlan_id, tunnel_id,
1747                                    goto_tbl, group_id, ctrl->copy_to_cpu);
1748
1749        if (err)
1750                netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1751
1752        return err;
1753}
1754
1755static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
1756                                     struct switchdev_trans *trans, int flags,
1757                                     const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1758{
1759        u32 in_pport_mask = 0xffffffff;
1760        __be16 vlan_id_mask = htons(0xffff);
1761        int err;
1762
1763        if (ntohs(vlan_id) == 0)
1764                vlan_id = ofdpa_port->internal_vlan_id;
1765
1766        err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
1767                                      ofdpa_port->pport, in_pport_mask,
1768                                      ctrl->eth_type, ctrl->eth_dst,
1769                                      ctrl->eth_dst_mask, vlan_id,
1770                                      vlan_id_mask, ctrl->copy_to_cpu,
1771                                      flags);
1772
1773        if (err)
1774                netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1775
1776        return err;
1777}
1778
1779static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
1780                                struct switchdev_trans *trans, int flags,
1781                                const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1782{
1783        if (ctrl->acl)
1784                return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
1785                                                ctrl, vlan_id);
1786        if (ctrl->bridge)
1787                return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
1788                                                   ctrl, vlan_id);
1789
1790        if (ctrl->term)
1791                return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
1792                                                 ctrl, vlan_id);
1793
1794        return -EOPNOTSUPP;
1795}
1796
1797static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
1798                                    struct switchdev_trans *trans, int flags,
1799                                    __be16 vlan_id)
1800{
1801        int err = 0;
1802        int i;
1803
1804        for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1805                if (ofdpa_port->ctrls[i]) {
1806                        err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1807                                                   &ofdpa_ctrls[i], vlan_id);
1808                        if (err)
1809                                return err;
1810                }
1811        }
1812
1813        return err;
1814}
1815
1816static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
1817                           struct switchdev_trans *trans, int flags,
1818                           const struct ofdpa_ctrl *ctrl)
1819{
1820        u16 vid;
1821        int err = 0;
1822
1823        for (vid = 1; vid < VLAN_N_VID; vid++) {
1824                if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1825                        continue;
1826                err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1827                                           ctrl, htons(vid));
1828                if (err)
1829                        break;
1830        }
1831
1832        return err;
1833}
1834
1835static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
1836                           struct switchdev_trans *trans, int flags, u16 vid)
1837{
1838        enum rocker_of_dpa_table_id goto_tbl =
1839                        ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1840        u32 in_pport = ofdpa_port->pport;
1841        __be16 vlan_id = htons(vid);
1842        __be16 vlan_id_mask = htons(0xffff);
1843        __be16 internal_vlan_id;
1844        bool untagged;
1845        bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1846        int err;
1847
1848        internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1849
1850        if (adding &&
1851            test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1852                return 0; /* already added */
1853        else if (!adding &&
1854                 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1855                return 0; /* already removed */
1856
1857        change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1858
1859        if (adding) {
1860                err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
1861                                               internal_vlan_id);
1862                if (err) {
1863                        netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1864                        goto err_out;
1865                }
1866        }
1867
1868        err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
1869                                        internal_vlan_id, untagged);
1870        if (err) {
1871                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1872                goto err_out;
1873        }
1874
1875        err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
1876                                          internal_vlan_id);
1877        if (err) {
1878                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1879                goto err_out;
1880        }
1881
1882        err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
1883                                  in_pport, vlan_id, vlan_id_mask,
1884                                  goto_tbl, untagged, internal_vlan_id);
1885        if (err)
1886                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1887
1888err_out:
1889        if (switchdev_trans_ph_prepare(trans))
1890                change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1891
1892        return err;
1893}
1894
1895static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
1896                             struct switchdev_trans *trans, int flags)
1897{
1898        enum rocker_of_dpa_table_id goto_tbl;
1899        u32 in_pport;
1900        u32 in_pport_mask;
1901        int err;
1902
1903        /* Normal Ethernet Frames.  Matches pkts from any local physical
1904         * ports.  Goto VLAN tbl.
1905         */
1906
1907        in_pport = 0;
1908        in_pport_mask = 0xffff0000;
1909        goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1910
1911        err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
1912                                     in_pport, in_pport_mask,
1913                                     goto_tbl);
1914        if (err)
1915                netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1916
1917        return err;
1918}
1919
1920struct ofdpa_fdb_learn_work {
1921        struct work_struct work;
1922        struct ofdpa_port *ofdpa_port;
1923        struct switchdev_trans *trans;
1924        int flags;
1925        u8 addr[ETH_ALEN];
1926        u16 vid;
1927};
1928
1929static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1930{
1931        const struct ofdpa_fdb_learn_work *lw =
1932                container_of(work, struct ofdpa_fdb_learn_work, work);
1933        bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1934        bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1935        struct switchdev_notifier_fdb_info info;
1936
1937        info.addr = lw->addr;
1938        info.vid = lw->vid;
1939
1940        rtnl_lock();
1941        if (learned && removing)
1942                call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
1943                                         lw->ofdpa_port->dev, &info.info);
1944        else if (learned && !removing)
1945                call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
1946                                         lw->ofdpa_port->dev, &info.info);
1947        rtnl_unlock();
1948
1949        ofdpa_kfree(lw->trans, work);
1950}
1951
1952static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1953                                struct switchdev_trans *trans, int flags,
1954                                const u8 *addr, __be16 vlan_id)
1955{
1956        struct ofdpa_fdb_learn_work *lw;
1957        enum rocker_of_dpa_table_id goto_tbl =
1958                        ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1959        u32 out_pport = ofdpa_port->pport;
1960        u32 tunnel_id = 0;
1961        u32 group_id = ROCKER_GROUP_NONE;
1962        bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
1963        bool copy_to_cpu = false;
1964        int err;
1965
1966        if (ofdpa_port_is_bridged(ofdpa_port))
1967                group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1968
1969        if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1970                err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
1971                                            NULL, vlan_id, tunnel_id, goto_tbl,
1972                                            group_id, copy_to_cpu);
1973                if (err)
1974                        return err;
1975        }
1976
1977        if (!syncing)
1978                return 0;
1979
1980        if (!ofdpa_port_is_bridged(ofdpa_port))
1981                return 0;
1982
1983        lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
1984        if (!lw)
1985                return -ENOMEM;
1986
1987        INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1988
1989        lw->ofdpa_port = ofdpa_port;
1990        lw->trans = trans;
1991        lw->flags = flags;
1992        ether_addr_copy(lw->addr, addr);
1993        lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1994
1995        if (switchdev_trans_ph_prepare(trans))
1996                ofdpa_kfree(trans, lw);
1997        else
1998                schedule_work(&lw->work);
1999
2000        return 0;
2001}
2002
2003static struct ofdpa_fdb_tbl_entry *
2004ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
2005                   const struct ofdpa_fdb_tbl_entry *match)
2006{
2007        struct ofdpa_fdb_tbl_entry *found;
2008
2009        hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
2010                if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2011                        return found;
2012
2013        return NULL;
2014}
2015
2016static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
2017                          struct switchdev_trans *trans,
2018                          const unsigned char *addr,
2019                          __be16 vlan_id, int flags)
2020{
2021        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2022        struct ofdpa_fdb_tbl_entry *fdb;
2023        struct ofdpa_fdb_tbl_entry *found;
2024        bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
2025        unsigned long lock_flags;
2026
2027        fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
2028        if (!fdb)
2029                return -ENOMEM;
2030
2031        fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
2032        fdb->touched = jiffies;
2033        fdb->key.ofdpa_port = ofdpa_port;
2034        ether_addr_copy(fdb->key.addr, addr);
2035        fdb->key.vlan_id = vlan_id;
2036        fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
2037
2038        spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2039
2040        found = ofdpa_fdb_tbl_find(ofdpa, fdb);
2041
2042        if (found) {
2043                found->touched = jiffies;
2044                if (removing) {
2045                        ofdpa_kfree(trans, fdb);
2046                        if (!switchdev_trans_ph_prepare(trans))
2047                                hash_del(&found->entry);
2048                }
2049        } else if (!removing) {
2050                if (!switchdev_trans_ph_prepare(trans))
2051                        hash_add(ofdpa->fdb_tbl, &fdb->entry,
2052                                 fdb->key_crc32);
2053        }
2054
2055        spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2056
2057        /* Check if adding and already exists, or removing and can't find */
2058        if (!found != !removing) {
2059                ofdpa_kfree(trans, fdb);
2060                if (!found && removing)
2061                        return 0;
2062                /* Refreshing existing to update aging timers */
2063                flags |= OFDPA_OP_FLAG_REFRESH;
2064        }
2065
2066        return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
2067}
2068
2069static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
2070                                struct switchdev_trans *trans, int flags)
2071{
2072        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2073        struct ofdpa_fdb_tbl_entry *found;
2074        unsigned long lock_flags;
2075        struct hlist_node *tmp;
2076        int bkt;
2077        int err = 0;
2078
2079        if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
2080            ofdpa_port->stp_state == BR_STATE_FORWARDING)
2081                return 0;
2082
2083        flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
2084
2085        spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2086
2087        hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2088                if (found->key.ofdpa_port != ofdpa_port)
2089                        continue;
2090                if (!found->learned)
2091                        continue;
2092                err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
2093                                           found->key.addr,
2094                                           found->key.vlan_id);
2095                if (err)
2096                        goto err_out;
2097                if (!switchdev_trans_ph_prepare(trans))
2098                        hash_del(&found->entry);
2099        }
2100
2101err_out:
2102        spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2103
2104        return err;
2105}
2106
2107static void ofdpa_fdb_cleanup(unsigned long data)
2108{
2109        struct ofdpa *ofdpa = (struct ofdpa *)data;
2110        struct ofdpa_port *ofdpa_port;
2111        struct ofdpa_fdb_tbl_entry *entry;
2112        struct hlist_node *tmp;
2113        unsigned long next_timer = jiffies + ofdpa->ageing_time;
2114        unsigned long expires;
2115        unsigned long lock_flags;
2116        int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
2117                    OFDPA_OP_FLAG_LEARNED;
2118        int bkt;
2119
2120        spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2121
2122        hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
2123                if (!entry->learned)
2124                        continue;
2125                ofdpa_port = entry->key.ofdpa_port;
2126                expires = entry->touched + ofdpa_port->ageing_time;
2127                if (time_before_eq(expires, jiffies)) {
2128                        ofdpa_port_fdb_learn(ofdpa_port, NULL,
2129                                             flags, entry->key.addr,
2130                                             entry->key.vlan_id);
2131                        hash_del(&entry->entry);
2132                } else if (time_before(expires, next_timer)) {
2133                        next_timer = expires;
2134                }
2135        }
2136
2137        spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2138
2139        mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2140}
2141
2142static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2143                                 struct switchdev_trans *trans, int flags,
2144                                 __be16 vlan_id)
2145{
2146        u32 in_pport_mask = 0xffffffff;
2147        __be16 eth_type;
2148        const u8 *dst_mac_mask = ff_mac;
2149        __be16 vlan_id_mask = htons(0xffff);
2150        bool copy_to_cpu = false;
2151        int err;
2152
2153        if (ntohs(vlan_id) == 0)
2154                vlan_id = ofdpa_port->internal_vlan_id;
2155
2156        eth_type = htons(ETH_P_IP);
2157        err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2158                                      ofdpa_port->pport, in_pport_mask,
2159                                      eth_type, ofdpa_port->dev->dev_addr,
2160                                      dst_mac_mask, vlan_id, vlan_id_mask,
2161                                      copy_to_cpu, flags);
2162        if (err)
2163                return err;
2164
2165        eth_type = htons(ETH_P_IPV6);
2166        err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2167                                      ofdpa_port->pport, in_pport_mask,
2168                                      eth_type, ofdpa_port->dev->dev_addr,
2169                                      dst_mac_mask, vlan_id, vlan_id_mask,
2170                                      copy_to_cpu, flags);
2171
2172        return err;
2173}
2174
2175static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
2176                             struct switchdev_trans *trans, int flags)
2177{
2178        bool pop_vlan;
2179        u32 out_pport;
2180        __be16 vlan_id;
2181        u16 vid;
2182        int err;
2183
2184        /* Port will be forwarding-enabled if its STP state is LEARNING
2185         * or FORWARDING.  Traffic from CPU can still egress, regardless of
2186         * port STP state.  Use L2 interface group on port VLANs as a way
2187         * to toggle port forwarding: if forwarding is disabled, L2
2188         * interface group will not exist.
2189         */
2190
2191        if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2192            ofdpa_port->stp_state != BR_STATE_FORWARDING)
2193                flags |= OFDPA_OP_FLAG_REMOVE;
2194
2195        out_pport = ofdpa_port->pport;
2196        for (vid = 1; vid < VLAN_N_VID; vid++) {
2197                if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2198                        continue;
2199                vlan_id = htons(vid);
2200                pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2201                err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
2202                                               vlan_id, out_pport, pop_vlan);
2203                if (err) {
2204                        netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2205                                   err, out_pport);
2206                        return err;
2207                }
2208        }
2209
2210        return 0;
2211}
2212
2213static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2214                                 struct switchdev_trans *trans,
2215                                 int flags, u8 state)
2216{
2217        bool want[OFDPA_CTRL_MAX] = { 0, };
2218        bool prev_ctrls[OFDPA_CTRL_MAX];
2219        u8 uninitialized_var(prev_state);
2220        int err;
2221        int i;
2222
2223        if (switchdev_trans_ph_prepare(trans)) {
2224                memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2225                prev_state = ofdpa_port->stp_state;
2226        }
2227
2228        if (ofdpa_port->stp_state == state)
2229                return 0;
2230
2231        ofdpa_port->stp_state = state;
2232
2233        switch (state) {
2234        case BR_STATE_DISABLED:
2235                /* port is completely disabled */
2236                break;
2237        case BR_STATE_LISTENING:
2238        case BR_STATE_BLOCKING:
2239                want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2240                break;
2241        case BR_STATE_LEARNING:
2242        case BR_STATE_FORWARDING:
2243                if (!ofdpa_port_is_ovsed(ofdpa_port))
2244                        want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2245                want[OFDPA_CTRL_IPV4_MCAST] = true;
2246                want[OFDPA_CTRL_IPV6_MCAST] = true;
2247                if (ofdpa_port_is_bridged(ofdpa_port))
2248                        want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2249                else if (ofdpa_port_is_ovsed(ofdpa_port))
2250                        want[OFDPA_CTRL_DFLT_OVS] = true;
2251                else
2252                        want[OFDPA_CTRL_LOCAL_ARP] = true;
2253                break;
2254        }
2255
2256        for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2257                if (want[i] != ofdpa_port->ctrls[i]) {
2258                        int ctrl_flags = flags |
2259                                         (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2260                        err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
2261                                              &ofdpa_ctrls[i]);
2262                        if (err)
2263                                goto err_out;
2264                        ofdpa_port->ctrls[i] = want[i];
2265                }
2266        }
2267
2268        err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
2269        if (err)
2270                goto err_out;
2271
2272        err = ofdpa_port_fwding(ofdpa_port, trans, flags);
2273
2274err_out:
2275        if (switchdev_trans_ph_prepare(trans)) {
2276                memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2277                ofdpa_port->stp_state = prev_state;
2278        }
2279
2280        return err;
2281}
2282
2283static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2284{
2285        if (ofdpa_port_is_bridged(ofdpa_port))
2286                /* bridge STP will enable port */
2287                return 0;
2288
2289        /* port is not bridged, so simulate going to FORWARDING state */
2290        return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2291                                     BR_STATE_FORWARDING);
2292}
2293
2294static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2295{
2296        if (ofdpa_port_is_bridged(ofdpa_port))
2297                /* bridge STP will disable port */
2298                return 0;
2299
2300        /* port is not bridged, so simulate going to DISABLED state */
2301        return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2302                                     BR_STATE_DISABLED);
2303}
2304
2305static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2306                               struct switchdev_trans *trans,
2307                               u16 vid, u16 flags)
2308{
2309        int err;
2310
2311        /* XXX deal with flags for PVID and untagged */
2312
2313        err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
2314        if (err)
2315                return err;
2316
2317        err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
2318        if (err)
2319                ofdpa_port_vlan(ofdpa_port, trans,
2320                                OFDPA_OP_FLAG_REMOVE, vid);
2321
2322        return err;
2323}
2324
2325static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2326                               u16 vid, u16 flags)
2327{
2328        int err;
2329
2330        err = ofdpa_port_router_mac(ofdpa_port, NULL,
2331                                    OFDPA_OP_FLAG_REMOVE, htons(vid));
2332        if (err)
2333                return err;
2334
2335        return ofdpa_port_vlan(ofdpa_port, NULL,
2336                               OFDPA_OP_FLAG_REMOVE, vid);
2337}
2338
2339static struct ofdpa_internal_vlan_tbl_entry *
2340ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2341{
2342        struct ofdpa_internal_vlan_tbl_entry *found;
2343
2344        hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2345                               entry, ifindex) {
2346                if (found->ifindex == ifindex)
2347                        return found;
2348        }
2349
2350        return NULL;
2351}
2352
2353static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2354                                              int ifindex)
2355{
2356        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2357        struct ofdpa_internal_vlan_tbl_entry *entry;
2358        struct ofdpa_internal_vlan_tbl_entry *found;
2359        unsigned long lock_flags;
2360        int i;
2361
2362        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2363        if (!entry)
2364                return 0;
2365
2366        entry->ifindex = ifindex;
2367
2368        spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2369
2370        found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2371        if (found) {
2372                kfree(entry);
2373                goto found;
2374        }
2375
2376        found = entry;
2377        hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2378
2379        for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2380                if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2381                        continue;
2382                found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2383                goto found;
2384        }
2385
2386        netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2387
2388found:
2389        found->ref_count++;
2390        spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2391
2392        return found->vlan_id;
2393}
2394
2395static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
2396                               struct switchdev_trans *trans, __be32 dst,
2397                               int dst_len, struct fib_info *fi,
2398                               u32 tb_id, int flags)
2399{
2400        const struct fib_nh *nh;
2401        __be16 eth_type = htons(ETH_P_IP);
2402        __be32 dst_mask = inet_make_mask(dst_len);
2403        __be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2404        u32 priority = fi->fib_priority;
2405        enum rocker_of_dpa_table_id goto_tbl =
2406                ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2407        u32 group_id;
2408        bool nh_on_port;
2409        bool has_gw;
2410        u32 index;
2411        int err;
2412
2413        /* XXX support ECMP */
2414
2415        nh = fi->fib_nh;
2416        nh_on_port = (fi->fib_dev == ofdpa_port->dev);
2417        has_gw = !!nh->nh_gw;
2418
2419        if (has_gw && nh_on_port) {
2420                err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
2421                                         nh->nh_gw, &index);
2422                if (err)
2423                        return err;
2424
2425                group_id = ROCKER_GROUP_L3_UNICAST(index);
2426        } else {
2427                /* Send to CPU for processing */
2428                group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2429        }
2430
2431        err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
2432                                            dst_mask, priority, goto_tbl,
2433                                            group_id, fi, flags);
2434        if (err)
2435                netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2436                           err, &dst);
2437
2438        return err;
2439}
2440
2441static void
2442ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2443                                int ifindex)
2444{
2445        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2446        struct ofdpa_internal_vlan_tbl_entry *found;
2447        unsigned long lock_flags;
2448        unsigned long bit;
2449
2450        spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2451
2452        found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2453        if (!found) {
2454                netdev_err(ofdpa_port->dev,
2455                           "ifindex (%d) not found in internal VLAN tbl\n",
2456                           ifindex);
2457                goto not_found;
2458        }
2459
2460        if (--found->ref_count <= 0) {
2461                bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2462                clear_bit(bit, ofdpa->internal_vlan_bitmap);
2463                hash_del(&found->entry);
2464                kfree(found);
2465        }
2466
2467not_found:
2468        spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2469}
2470
2471/**********************************
2472 * Rocker world ops implementation
2473 **********************************/
2474
2475static int ofdpa_init(struct rocker *rocker)
2476{
2477        struct ofdpa *ofdpa = rocker->wpriv;
2478
2479        ofdpa->rocker = rocker;
2480
2481        hash_init(ofdpa->flow_tbl);
2482        spin_lock_init(&ofdpa->flow_tbl_lock);
2483
2484        hash_init(ofdpa->group_tbl);
2485        spin_lock_init(&ofdpa->group_tbl_lock);
2486
2487        hash_init(ofdpa->fdb_tbl);
2488        spin_lock_init(&ofdpa->fdb_tbl_lock);
2489
2490        hash_init(ofdpa->internal_vlan_tbl);
2491        spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2492
2493        hash_init(ofdpa->neigh_tbl);
2494        spin_lock_init(&ofdpa->neigh_tbl_lock);
2495
2496        setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
2497                    (unsigned long) ofdpa);
2498        mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2499
2500        ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2501
2502        return 0;
2503}
2504
2505static void ofdpa_fini(struct rocker *rocker)
2506{
2507        struct ofdpa *ofdpa = rocker->wpriv;
2508
2509        unsigned long flags;
2510        struct ofdpa_flow_tbl_entry *flow_entry;
2511        struct ofdpa_group_tbl_entry *group_entry;
2512        struct ofdpa_fdb_tbl_entry *fdb_entry;
2513        struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2514        struct ofdpa_neigh_tbl_entry *neigh_entry;
2515        struct hlist_node *tmp;
2516        int bkt;
2517
2518        del_timer_sync(&ofdpa->fdb_cleanup_timer);
2519        flush_workqueue(rocker->rocker_owq);
2520
2521        spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2522        hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2523                hash_del(&flow_entry->entry);
2524        spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2525
2526        spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2527        hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2528                hash_del(&group_entry->entry);
2529        spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2530
2531        spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2532        hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2533                hash_del(&fdb_entry->entry);
2534        spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2535
2536        spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2537        hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2538                           tmp, internal_vlan_entry, entry)
2539                hash_del(&internal_vlan_entry->entry);
2540        spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2541
2542        spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2543        hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2544                hash_del(&neigh_entry->entry);
2545        spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2546}
2547
2548static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2549{
2550        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2551
2552        ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2553        ofdpa_port->rocker_port = rocker_port;
2554        ofdpa_port->dev = rocker_port->dev;
2555        ofdpa_port->pport = rocker_port->pport;
2556        ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
2557        ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2558        return 0;
2559}
2560
2561static int ofdpa_port_init(struct rocker_port *rocker_port)
2562{
2563        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2564        int err;
2565
2566        rocker_port_set_learning(rocker_port,
2567                                 !!(ofdpa_port->brport_flags & BR_LEARNING));
2568
2569        err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
2570        if (err) {
2571                netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2572                return err;
2573        }
2574
2575        ofdpa_port->internal_vlan_id =
2576                ofdpa_port_internal_vlan_id_get(ofdpa_port,
2577                                                ofdpa_port->dev->ifindex);
2578
2579        err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2580        if (err) {
2581                netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2582                goto err_untagged_vlan;
2583        }
2584        return 0;
2585
2586err_untagged_vlan:
2587        ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2588        return err;
2589}
2590
2591static void ofdpa_port_fini(struct rocker_port *rocker_port)
2592{
2593        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2594
2595        ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2596}
2597
2598static int ofdpa_port_open(struct rocker_port *rocker_port)
2599{
2600        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2601
2602        return ofdpa_port_fwd_enable(ofdpa_port, 0);
2603}
2604
2605static void ofdpa_port_stop(struct rocker_port *rocker_port)
2606{
2607        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2608
2609        ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2610}
2611
2612static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2613                                         u8 state,
2614                                         struct switchdev_trans *trans)
2615{
2616        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2617
2618        return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
2619}
2620
2621static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2622                                            unsigned long brport_flags,
2623                                            struct switchdev_trans *trans)
2624{
2625        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2626        unsigned long orig_flags;
2627        int err = 0;
2628
2629        orig_flags = ofdpa_port->brport_flags;
2630        ofdpa_port->brport_flags = brport_flags;
2631        if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
2632            !switchdev_trans_ph_prepare(trans))
2633                err = rocker_port_set_learning(ofdpa_port->rocker_port,
2634                                               !!(ofdpa_port->brport_flags & BR_LEARNING));
2635
2636        if (switchdev_trans_ph_prepare(trans))
2637                ofdpa_port->brport_flags = orig_flags;
2638
2639        return err;
2640}
2641
2642static int
2643ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
2644                                 unsigned long *p_brport_flags)
2645{
2646        const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2647
2648        *p_brport_flags = ofdpa_port->brport_flags;
2649        return 0;
2650}
2651
2652static int
2653ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2654                                       u32 ageing_time,
2655                                       struct switchdev_trans *trans)
2656{
2657        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2658        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2659
2660        if (!switchdev_trans_ph_prepare(trans)) {
2661                ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2662                if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2663                        ofdpa->ageing_time = ofdpa_port->ageing_time;
2664                mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2665        }
2666
2667        return 0;
2668}
2669
2670static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2671                                   const struct switchdev_obj_port_vlan *vlan,
2672                                   struct switchdev_trans *trans)
2673{
2674        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2675        u16 vid;
2676        int err;
2677
2678        for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2679                err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
2680                if (err)
2681                        return err;
2682        }
2683
2684        return 0;
2685}
2686
2687static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2688                                   const struct switchdev_obj_port_vlan *vlan)
2689{
2690        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2691        u16 vid;
2692        int err;
2693
2694        for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2695                err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
2696                if (err)
2697                        return err;
2698        }
2699
2700        return 0;
2701}
2702
2703static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
2704                                    struct switchdev_obj_port_vlan *vlan,
2705                                    switchdev_obj_dump_cb_t *cb)
2706{
2707        const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2708        u16 vid;
2709        int err = 0;
2710
2711        for (vid = 1; vid < VLAN_N_VID; vid++) {
2712                if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2713                        continue;
2714                vlan->flags = 0;
2715                if (ofdpa_vlan_id_is_internal(htons(vid)))
2716                        vlan->flags |= BRIDGE_VLAN_INFO_PVID;
2717                vlan->vid_begin = vlan->vid_end = vid;
2718                err = cb(&vlan->obj);
2719                if (err)
2720                        break;
2721        }
2722
2723        return err;
2724}
2725
2726static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2727                                  const struct switchdev_obj_port_fdb *fdb,
2728                                  struct switchdev_trans *trans)
2729{
2730        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2731        __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2732
2733        if (!ofdpa_port_is_bridged(ofdpa_port))
2734                return -EINVAL;
2735
2736        return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
2737}
2738
2739static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2740                                  const struct switchdev_obj_port_fdb *fdb)
2741{
2742        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2743        __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2744        int flags = OFDPA_OP_FLAG_REMOVE;
2745
2746        if (!ofdpa_port_is_bridged(ofdpa_port))
2747                return -EINVAL;
2748
2749        return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
2750}
2751
2752static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
2753                                   struct switchdev_obj_port_fdb *fdb,
2754                                   switchdev_obj_dump_cb_t *cb)
2755{
2756        const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2757        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2758        struct ofdpa_fdb_tbl_entry *found;
2759        struct hlist_node *tmp;
2760        unsigned long lock_flags;
2761        int bkt;
2762        int err = 0;
2763
2764        spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2765        hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2766                if (found->key.ofdpa_port != ofdpa_port)
2767                        continue;
2768                ether_addr_copy(fdb->addr, found->key.addr);
2769                fdb->ndm_state = NUD_REACHABLE;
2770                fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
2771                                                  found->key.vlan_id);
2772                err = cb(&fdb->obj);
2773                if (err)
2774                        break;
2775        }
2776        spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2777
2778        return err;
2779}
2780
2781static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2782                                  struct net_device *bridge)
2783{
2784        int err;
2785
2786        /* Port is joining bridge, so the internal VLAN for the
2787         * port is going to change to the bridge internal VLAN.
2788         * Let's remove untagged VLAN (vid=0) from port and
2789         * re-add once internal VLAN has changed.
2790         */
2791
2792        err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2793        if (err)
2794                return err;
2795
2796        ofdpa_port_internal_vlan_id_put(ofdpa_port,
2797                                        ofdpa_port->dev->ifindex);
2798        ofdpa_port->internal_vlan_id =
2799                ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2800
2801        ofdpa_port->bridge_dev = bridge;
2802
2803        return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2804}
2805
2806static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2807{
2808        int err;
2809
2810        err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2811        if (err)
2812                return err;
2813
2814        ofdpa_port_internal_vlan_id_put(ofdpa_port,
2815                                        ofdpa_port->bridge_dev->ifindex);
2816        ofdpa_port->internal_vlan_id =
2817                ofdpa_port_internal_vlan_id_get(ofdpa_port,
2818                                                ofdpa_port->dev->ifindex);
2819
2820        ofdpa_port->bridge_dev = NULL;
2821
2822        err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2823        if (err)
2824                return err;
2825
2826        if (ofdpa_port->dev->flags & IFF_UP)
2827                err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2828
2829        return err;
2830}
2831
2832static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2833                                  struct net_device *master)
2834{
2835        int err;
2836
2837        ofdpa_port->bridge_dev = master;
2838
2839        err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2840        if (err)
2841                return err;
2842        err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2843
2844        return err;
2845}
2846
2847static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2848                                    struct net_device *master)
2849{
2850        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2851        int err = 0;
2852
2853        if (netif_is_bridge_master(master))
2854                err = ofdpa_port_bridge_join(ofdpa_port, master);
2855        else if (netif_is_ovs_master(master))
2856                err = ofdpa_port_ovs_changed(ofdpa_port, master);
2857        return err;
2858}
2859
2860static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2861                                      struct net_device *master)
2862{
2863        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2864        int err = 0;
2865
2866        if (ofdpa_port_is_bridged(ofdpa_port))
2867                err = ofdpa_port_bridge_leave(ofdpa_port);
2868        else if (ofdpa_port_is_ovsed(ofdpa_port))
2869                err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2870        return err;
2871}
2872
2873static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2874                                   struct neighbour *n)
2875{
2876        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2877        int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2878                                                    OFDPA_OP_FLAG_NOWAIT;
2879        __be32 ip_addr = *(__be32 *) n->primary_key;
2880
2881        return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2882}
2883
2884static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2885                                    struct neighbour *n)
2886{
2887        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2888        int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2889        __be32 ip_addr = *(__be32 *) n->primary_key;
2890
2891        return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2892}
2893
2894static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2895                                       const unsigned char *addr,
2896                                       __be16 vlan_id)
2897{
2898        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2899        int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2900
2901        if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2902            ofdpa_port->stp_state != BR_STATE_FORWARDING)
2903                return 0;
2904
2905        return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
2906}
2907
2908static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2909                                                    struct rocker *rocker)
2910{
2911        struct rocker_port *rocker_port;
2912
2913        rocker_port = rocker_port_dev_lower_find(dev, rocker);
2914        return rocker_port ? rocker_port->wpriv : NULL;
2915}
2916
2917static int ofdpa_fib4_add(struct rocker *rocker,
2918                          const struct fib_entry_notifier_info *fen_info)
2919{
2920        struct ofdpa *ofdpa = rocker->wpriv;
2921        struct ofdpa_port *ofdpa_port;
2922        int err;
2923
2924        if (ofdpa->fib_aborted)
2925                return 0;
2926        ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2927        if (!ofdpa_port)
2928                return 0;
2929        err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
2930                                  fen_info->dst_len, fen_info->fi,
2931                                  fen_info->tb_id, 0);
2932        if (err)
2933                return err;
2934        fib_info_offload_inc(fen_info->fi);
2935        return 0;
2936}
2937
2938static int ofdpa_fib4_del(struct rocker *rocker,
2939                          const struct fib_entry_notifier_info *fen_info)
2940{
2941        struct ofdpa *ofdpa = rocker->wpriv;
2942        struct ofdpa_port *ofdpa_port;
2943
2944        if (ofdpa->fib_aborted)
2945                return 0;
2946        ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2947        if (!ofdpa_port)
2948                return 0;
2949        fib_info_offload_dec(fen_info->fi);
2950        return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
2951                                   fen_info->dst_len, fen_info->fi,
2952                                   fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2953}
2954
2955static void ofdpa_fib4_abort(struct rocker *rocker)
2956{
2957        struct ofdpa *ofdpa = rocker->wpriv;
2958        struct ofdpa_port *ofdpa_port;
2959        struct ofdpa_flow_tbl_entry *flow_entry;
2960        struct hlist_node *tmp;
2961        unsigned long flags;
2962        int bkt;
2963
2964        if (ofdpa->fib_aborted)
2965                return;
2966
2967        spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2968        hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2969                if (flow_entry->key.tbl_id !=
2970                    ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2971                        continue;
2972                ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
2973                                                       rocker);
2974                if (!ofdpa_port)
2975                        continue;
2976                fib_info_offload_dec(flow_entry->fi);
2977                ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE,
2978                                   flow_entry);
2979        }
2980        spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2981        ofdpa->fib_aborted = true;
2982}
2983
2984struct rocker_world_ops rocker_ofdpa_ops = {
2985        .kind = "ofdpa",
2986        .priv_size = sizeof(struct ofdpa),
2987        .port_priv_size = sizeof(struct ofdpa_port),
2988        .mode = ROCKER_PORT_MODE_OF_DPA,
2989        .init = ofdpa_init,
2990        .fini = ofdpa_fini,
2991        .port_pre_init = ofdpa_port_pre_init,
2992        .port_init = ofdpa_port_init,
2993        .port_fini = ofdpa_port_fini,
2994        .port_open = ofdpa_port_open,
2995        .port_stop = ofdpa_port_stop,
2996        .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2997        .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2998        .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
2999        .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
3000        .port_obj_vlan_add = ofdpa_port_obj_vlan_add,
3001        .port_obj_vlan_del = ofdpa_port_obj_vlan_del,
3002        .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
3003        .port_obj_fdb_add = ofdpa_port_obj_fdb_add,
3004        .port_obj_fdb_del = ofdpa_port_obj_fdb_del,
3005        .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
3006        .port_master_linked = ofdpa_port_master_linked,
3007        .port_master_unlinked = ofdpa_port_master_unlinked,
3008        .port_neigh_update = ofdpa_port_neigh_update,
3009        .port_neigh_destroy = ofdpa_port_neigh_destroy,
3010        .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
3011        .fib4_add = ofdpa_fib4_add,
3012        .fib4_del = ofdpa_fib4_del,
3013        .fib4_abort = ofdpa_fib4_abort,
3014};
3015