linux/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#include <linux/etherdevice.h>
   5#include <linux/inetdevice.h>
   6#include <net/netevent.h>
   7#include <linux/idr.h>
   8#include <net/dst_metadata.h>
   9#include <net/arp.h>
  10
  11#include "cmsg.h"
  12#include "main.h"
  13#include "../nfp_net_repr.h"
  14#include "../nfp_net.h"
  15
  16#define NFP_FL_MAX_ROUTES               32
  17
  18#define NFP_TUN_PRE_TUN_RULE_LIMIT      32
  19#define NFP_TUN_PRE_TUN_RULE_DEL        0x1
  20#define NFP_TUN_PRE_TUN_IDX_BIT         0x8
  21
  22/**
  23 * struct nfp_tun_pre_run_rule - rule matched before decap
  24 * @flags:              options for the rule offset
  25 * @port_idx:           index of destination MAC address for the rule
  26 * @vlan_tci:           VLAN info associated with MAC
  27 * @host_ctx_id:        stats context of rule to update
  28 */
  29struct nfp_tun_pre_tun_rule {
  30        __be32 flags;
  31        __be16 port_idx;
  32        __be16 vlan_tci;
  33        __be32 host_ctx_id;
  34};
  35
  36/**
  37 * struct nfp_tun_active_tuns - periodic message of active tunnels
  38 * @seq:                sequence number of the message
  39 * @count:              number of tunnels report in message
  40 * @flags:              options part of the request
  41 * @tun_info.ipv4:              dest IPv4 address of active route
  42 * @tun_info.egress_port:       port the encapsulated packet egressed
  43 * @tun_info.extra:             reserved for future use
  44 * @tun_info:           tunnels that have sent traffic in reported period
  45 */
  46struct nfp_tun_active_tuns {
  47        __be32 seq;
  48        __be32 count;
  49        __be32 flags;
  50        struct route_ip_info {
  51                __be32 ipv4;
  52                __be32 egress_port;
  53                __be32 extra[2];
  54        } tun_info[];
  55};
  56
  57/**
  58 * struct nfp_tun_neigh - neighbour/route entry on the NFP
  59 * @dst_ipv4:   destination IPv4 address
  60 * @src_ipv4:   source IPv4 address
  61 * @dst_addr:   destination MAC address
  62 * @src_addr:   source MAC address
  63 * @port_id:    NFP port to output packet on - associated with source IPv4
  64 */
  65struct nfp_tun_neigh {
  66        __be32 dst_ipv4;
  67        __be32 src_ipv4;
  68        u8 dst_addr[ETH_ALEN];
  69        u8 src_addr[ETH_ALEN];
  70        __be32 port_id;
  71};
  72
  73/**
  74 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
  75 * @ingress_port:       ingress port of packet that signalled request
  76 * @ipv4_addr:          destination ipv4 address for route
  77 * @reserved:           reserved for future use
  78 */
  79struct nfp_tun_req_route_ipv4 {
  80        __be32 ingress_port;
  81        __be32 ipv4_addr;
  82        __be32 reserved[2];
  83};
  84
  85/**
  86 * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
  87 * @ipv4_addr:  destination of route
  88 * @list:       list pointer
  89 */
  90struct nfp_ipv4_route_entry {
  91        __be32 ipv4_addr;
  92        struct list_head list;
  93};
  94
  95#define NFP_FL_IPV4_ADDRS_MAX        32
  96
  97/**
  98 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
  99 * @count:      number of IPs populated in the array
 100 * @ipv4_addr:  array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
 101 */
 102struct nfp_tun_ipv4_addr {
 103        __be32 count;
 104        __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
 105};
 106
 107/**
 108 * struct nfp_ipv4_addr_entry - cached IPv4 addresses
 109 * @ipv4_addr:  IP address
 110 * @ref_count:  number of rules currently using this IP
 111 * @list:       list pointer
 112 */
 113struct nfp_ipv4_addr_entry {
 114        __be32 ipv4_addr;
 115        int ref_count;
 116        struct list_head list;
 117};
 118
 119#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG    0x2
 120
 121/**
 122 * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
 123 * @flags:      MAC address offload options
 124 * @count:      number of MAC addresses in the message (should be 1)
 125 * @index:      index of MAC address in the lookup table
 126 * @addr:       interface MAC address
 127 */
 128struct nfp_tun_mac_addr_offload {
 129        __be16 flags;
 130        __be16 count;
 131        __be16 index;
 132        u8 addr[ETH_ALEN];
 133};
 134
 135enum nfp_flower_mac_offload_cmd {
 136        NFP_TUNNEL_MAC_OFFLOAD_ADD =            0,
 137        NFP_TUNNEL_MAC_OFFLOAD_DEL =            1,
 138        NFP_TUNNEL_MAC_OFFLOAD_MOD =            2,
 139};
 140
 141#define NFP_MAX_MAC_INDEX       0xff
 142
 143/**
 144 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
 145 * @ht_node:            Hashtable entry
 146 * @addr:               Offloaded MAC address
 147 * @index:              Offloaded index for given MAC address
 148 * @ref_count:          Number of devs using this MAC address
 149 * @repr_list:          List of reprs sharing this MAC address
 150 * @bridge_count:       Number of bridge/internal devs with MAC
 151 */
 152struct nfp_tun_offloaded_mac {
 153        struct rhash_head ht_node;
 154        u8 addr[ETH_ALEN];
 155        u16 index;
 156        int ref_count;
 157        struct list_head repr_list;
 158        int bridge_count;
 159};
 160
 161static const struct rhashtable_params offloaded_macs_params = {
 162        .key_offset     = offsetof(struct nfp_tun_offloaded_mac, addr),
 163        .head_offset    = offsetof(struct nfp_tun_offloaded_mac, ht_node),
 164        .key_len        = ETH_ALEN,
 165        .automatic_shrinking    = true,
 166};
 167
 168void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
 169{
 170        struct nfp_tun_active_tuns *payload;
 171        struct net_device *netdev;
 172        int count, i, pay_len;
 173        struct neighbour *n;
 174        __be32 ipv4_addr;
 175        u32 port;
 176
 177        payload = nfp_flower_cmsg_get_data(skb);
 178        count = be32_to_cpu(payload->count);
 179        if (count > NFP_FL_MAX_ROUTES) {
 180                nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
 181                return;
 182        }
 183
 184        pay_len = nfp_flower_cmsg_get_data_len(skb);
 185        if (pay_len != struct_size(payload, tun_info, count)) {
 186                nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
 187                return;
 188        }
 189
 190        rcu_read_lock();
 191        for (i = 0; i < count; i++) {
 192                ipv4_addr = payload->tun_info[i].ipv4;
 193                port = be32_to_cpu(payload->tun_info[i].egress_port);
 194                netdev = nfp_app_dev_get(app, port, NULL);
 195                if (!netdev)
 196                        continue;
 197
 198                n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
 199                if (!n)
 200                        continue;
 201
 202                /* Update the used timestamp of neighbour */
 203                neigh_event_send(n, NULL);
 204                neigh_release(n);
 205        }
 206        rcu_read_unlock();
 207}
 208
 209static int
 210nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
 211                         gfp_t flag)
 212{
 213        struct sk_buff *skb;
 214        unsigned char *msg;
 215
 216        skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
 217        if (!skb)
 218                return -ENOMEM;
 219
 220        msg = nfp_flower_cmsg_get_data(skb);
 221        memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
 222
 223        nfp_ctrl_tx(app->ctrl, skb);
 224        return 0;
 225}
 226
 227static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
 228{
 229        struct nfp_flower_priv *priv = app->priv;
 230        struct nfp_ipv4_route_entry *entry;
 231        struct list_head *ptr, *storage;
 232
 233        spin_lock_bh(&priv->tun.neigh_off_lock);
 234        list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
 235                entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
 236                if (entry->ipv4_addr == ipv4_addr) {
 237                        spin_unlock_bh(&priv->tun.neigh_off_lock);
 238                        return true;
 239                }
 240        }
 241        spin_unlock_bh(&priv->tun.neigh_off_lock);
 242        return false;
 243}
 244
 245static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
 246{
 247        struct nfp_flower_priv *priv = app->priv;
 248        struct nfp_ipv4_route_entry *entry;
 249        struct list_head *ptr, *storage;
 250
 251        spin_lock_bh(&priv->tun.neigh_off_lock);
 252        list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
 253                entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
 254                if (entry->ipv4_addr == ipv4_addr) {
 255                        spin_unlock_bh(&priv->tun.neigh_off_lock);
 256                        return;
 257                }
 258        }
 259        entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 260        if (!entry) {
 261                spin_unlock_bh(&priv->tun.neigh_off_lock);
 262                nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
 263                return;
 264        }
 265
 266        entry->ipv4_addr = ipv4_addr;
 267        list_add_tail(&entry->list, &priv->tun.neigh_off_list);
 268        spin_unlock_bh(&priv->tun.neigh_off_lock);
 269}
 270
 271static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
 272{
 273        struct nfp_flower_priv *priv = app->priv;
 274        struct nfp_ipv4_route_entry *entry;
 275        struct list_head *ptr, *storage;
 276
 277        spin_lock_bh(&priv->tun.neigh_off_lock);
 278        list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
 279                entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
 280                if (entry->ipv4_addr == ipv4_addr) {
 281                        list_del(&entry->list);
 282                        kfree(entry);
 283                        break;
 284                }
 285        }
 286        spin_unlock_bh(&priv->tun.neigh_off_lock);
 287}
 288
 289static void
 290nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
 291                    struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
 292{
 293        struct nfp_tun_neigh payload;
 294        u32 port_id;
 295
 296        port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
 297        if (!port_id)
 298                return;
 299
 300        memset(&payload, 0, sizeof(struct nfp_tun_neigh));
 301        payload.dst_ipv4 = flow->daddr;
 302
 303        /* If entry has expired send dst IP with all other fields 0. */
 304        if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
 305                nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
 306                /* Trigger ARP to verify invalid neighbour state. */
 307                neigh_event_send(neigh, NULL);
 308                goto send_msg;
 309        }
 310
 311        /* Have a valid neighbour so populate rest of entry. */
 312        payload.src_ipv4 = flow->saddr;
 313        ether_addr_copy(payload.src_addr, netdev->dev_addr);
 314        neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
 315        payload.port_id = cpu_to_be32(port_id);
 316        /* Add destination of new route to NFP cache. */
 317        nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
 318
 319send_msg:
 320        nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
 321                                 sizeof(struct nfp_tun_neigh),
 322                                 (unsigned char *)&payload, flag);
 323}
 324
 325static int
 326nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
 327                            void *ptr)
 328{
 329        struct nfp_flower_priv *app_priv;
 330        struct netevent_redirect *redir;
 331        struct flowi4 flow = {};
 332        struct neighbour *n;
 333        struct nfp_app *app;
 334        struct rtable *rt;
 335        int err;
 336
 337        switch (event) {
 338        case NETEVENT_REDIRECT:
 339                redir = (struct netevent_redirect *)ptr;
 340                n = redir->neigh;
 341                break;
 342        case NETEVENT_NEIGH_UPDATE:
 343                n = (struct neighbour *)ptr;
 344                break;
 345        default:
 346                return NOTIFY_DONE;
 347        }
 348
 349        flow.daddr = *(__be32 *)n->primary_key;
 350
 351        app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
 352        app = app_priv->app;
 353
 354        if (!nfp_netdev_is_nfp_repr(n->dev) &&
 355            !nfp_flower_internal_port_can_offload(app, n->dev))
 356                return NOTIFY_DONE;
 357
 358        /* Only concerned with changes to routes already added to NFP. */
 359        if (!nfp_tun_has_route(app, flow.daddr))
 360                return NOTIFY_DONE;
 361
 362#if IS_ENABLED(CONFIG_INET)
 363        /* Do a route lookup to populate flow data. */
 364        rt = ip_route_output_key(dev_net(n->dev), &flow);
 365        err = PTR_ERR_OR_ZERO(rt);
 366        if (err)
 367                return NOTIFY_DONE;
 368
 369        ip_rt_put(rt);
 370#else
 371        return NOTIFY_DONE;
 372#endif
 373
 374        flow.flowi4_proto = IPPROTO_UDP;
 375        nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
 376
 377        return NOTIFY_OK;
 378}
 379
 380void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
 381{
 382        struct nfp_tun_req_route_ipv4 *payload;
 383        struct net_device *netdev;
 384        struct flowi4 flow = {};
 385        struct neighbour *n;
 386        struct rtable *rt;
 387        int err;
 388
 389        payload = nfp_flower_cmsg_get_data(skb);
 390
 391        rcu_read_lock();
 392        netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
 393        if (!netdev)
 394                goto fail_rcu_unlock;
 395
 396        flow.daddr = payload->ipv4_addr;
 397        flow.flowi4_proto = IPPROTO_UDP;
 398
 399#if IS_ENABLED(CONFIG_INET)
 400        /* Do a route lookup on same namespace as ingress port. */
 401        rt = ip_route_output_key(dev_net(netdev), &flow);
 402        err = PTR_ERR_OR_ZERO(rt);
 403        if (err)
 404                goto fail_rcu_unlock;
 405#else
 406        goto fail_rcu_unlock;
 407#endif
 408
 409        /* Get the neighbour entry for the lookup */
 410        n = dst_neigh_lookup(&rt->dst, &flow.daddr);
 411        ip_rt_put(rt);
 412        if (!n)
 413                goto fail_rcu_unlock;
 414        nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
 415        neigh_release(n);
 416        rcu_read_unlock();
 417        return;
 418
 419fail_rcu_unlock:
 420        rcu_read_unlock();
 421        nfp_flower_cmsg_warn(app, "Requested route not found.\n");
 422}
 423
 424static void nfp_tun_write_ipv4_list(struct nfp_app *app)
 425{
 426        struct nfp_flower_priv *priv = app->priv;
 427        struct nfp_ipv4_addr_entry *entry;
 428        struct nfp_tun_ipv4_addr payload;
 429        struct list_head *ptr, *storage;
 430        int count;
 431
 432        memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
 433        mutex_lock(&priv->tun.ipv4_off_lock);
 434        count = 0;
 435        list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
 436                if (count >= NFP_FL_IPV4_ADDRS_MAX) {
 437                        mutex_unlock(&priv->tun.ipv4_off_lock);
 438                        nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
 439                        return;
 440                }
 441                entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
 442                payload.ipv4_addr[count++] = entry->ipv4_addr;
 443        }
 444        payload.count = cpu_to_be32(count);
 445        mutex_unlock(&priv->tun.ipv4_off_lock);
 446
 447        nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
 448                                 sizeof(struct nfp_tun_ipv4_addr),
 449                                 &payload, GFP_KERNEL);
 450}
 451
 452void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
 453{
 454        struct nfp_flower_priv *priv = app->priv;
 455        struct nfp_ipv4_addr_entry *entry;
 456        struct list_head *ptr, *storage;
 457
 458        mutex_lock(&priv->tun.ipv4_off_lock);
 459        list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
 460                entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
 461                if (entry->ipv4_addr == ipv4) {
 462                        entry->ref_count++;
 463                        mutex_unlock(&priv->tun.ipv4_off_lock);
 464                        return;
 465                }
 466        }
 467
 468        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 469        if (!entry) {
 470                mutex_unlock(&priv->tun.ipv4_off_lock);
 471                nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
 472                return;
 473        }
 474        entry->ipv4_addr = ipv4;
 475        entry->ref_count = 1;
 476        list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
 477        mutex_unlock(&priv->tun.ipv4_off_lock);
 478
 479        nfp_tun_write_ipv4_list(app);
 480}
 481
 482void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
 483{
 484        struct nfp_flower_priv *priv = app->priv;
 485        struct nfp_ipv4_addr_entry *entry;
 486        struct list_head *ptr, *storage;
 487
 488        mutex_lock(&priv->tun.ipv4_off_lock);
 489        list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
 490                entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
 491                if (entry->ipv4_addr == ipv4) {
 492                        entry->ref_count--;
 493                        if (!entry->ref_count) {
 494                                list_del(&entry->list);
 495                                kfree(entry);
 496                        }
 497                        break;
 498                }
 499        }
 500        mutex_unlock(&priv->tun.ipv4_off_lock);
 501
 502        nfp_tun_write_ipv4_list(app);
 503}
 504
 505static int
 506__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
 507{
 508        struct nfp_tun_mac_addr_offload payload;
 509
 510        memset(&payload, 0, sizeof(payload));
 511
 512        if (del)
 513                payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
 514
 515        /* FW supports multiple MACs per cmsg but restrict to single. */
 516        payload.count = cpu_to_be16(1);
 517        payload.index = cpu_to_be16(idx);
 518        ether_addr_copy(payload.addr, mac);
 519
 520        return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
 521                                        sizeof(struct nfp_tun_mac_addr_offload),
 522                                        &payload, GFP_KERNEL);
 523}
 524
 525static bool nfp_tunnel_port_is_phy_repr(int port)
 526{
 527        if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
 528            NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
 529                return true;
 530
 531        return false;
 532}
 533
 534static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
 535{
 536        return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
 537}
 538
 539static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
 540{
 541        return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
 542}
 543
 544static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
 545{
 546        return nfp_mac_idx >> 8;
 547}
 548
 549static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
 550{
 551        return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
 552}
 553
 554static struct nfp_tun_offloaded_mac *
 555nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
 556{
 557        struct nfp_flower_priv *priv = app->priv;
 558
 559        return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
 560                                      offloaded_macs_params);
 561}
 562
 563static void
 564nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
 565                                           struct net_device *netdev, bool mod)
 566{
 567        if (nfp_netdev_is_nfp_repr(netdev)) {
 568                struct nfp_flower_repr_priv *repr_priv;
 569                struct nfp_repr *repr;
 570
 571                repr = netdev_priv(netdev);
 572                repr_priv = repr->app_priv;
 573
 574                /* If modifing MAC, remove repr from old list first. */
 575                if (mod)
 576                        list_del(&repr_priv->mac_list);
 577
 578                list_add_tail(&repr_priv->mac_list, &entry->repr_list);
 579        } else if (nfp_flower_is_supported_bridge(netdev)) {
 580                entry->bridge_count++;
 581        }
 582
 583        entry->ref_count++;
 584}
 585
 586static int
 587nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
 588                          int port, bool mod)
 589{
 590        struct nfp_flower_priv *priv = app->priv;
 591        int ida_idx = NFP_MAX_MAC_INDEX, err;
 592        struct nfp_tun_offloaded_mac *entry;
 593        u16 nfp_mac_idx = 0;
 594
 595        entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
 596        if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
 597                if (entry->bridge_count ||
 598                    !nfp_flower_is_supported_bridge(netdev)) {
 599                        nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
 600                                                                   netdev, mod);
 601                        return 0;
 602                }
 603
 604                /* MAC is global but matches need to go to pre_tun table. */
 605                nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
 606        }
 607
 608        if (!nfp_mac_idx) {
 609                /* Assign a global index if non-repr or MAC is now shared. */
 610                if (entry || !port) {
 611                        ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
 612                                                 NFP_MAX_MAC_INDEX, GFP_KERNEL);
 613                        if (ida_idx < 0)
 614                                return ida_idx;
 615
 616                        nfp_mac_idx =
 617                                nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
 618
 619                        if (nfp_flower_is_supported_bridge(netdev))
 620                                nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
 621
 622                } else {
 623                        nfp_mac_idx =
 624                                nfp_tunnel_get_mac_idx_from_phy_port_id(port);
 625                }
 626        }
 627
 628        if (!entry) {
 629                entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 630                if (!entry) {
 631                        err = -ENOMEM;
 632                        goto err_free_ida;
 633                }
 634
 635                ether_addr_copy(entry->addr, netdev->dev_addr);
 636                INIT_LIST_HEAD(&entry->repr_list);
 637
 638                if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
 639                                           &entry->ht_node,
 640                                           offloaded_macs_params)) {
 641                        err = -ENOMEM;
 642                        goto err_free_entry;
 643                }
 644        }
 645
 646        err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
 647                                       nfp_mac_idx, false);
 648        if (err) {
 649                /* If not shared then free. */
 650                if (!entry->ref_count)
 651                        goto err_remove_hash;
 652                goto err_free_ida;
 653        }
 654
 655        entry->index = nfp_mac_idx;
 656        nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
 657
 658        return 0;
 659
 660err_remove_hash:
 661        rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
 662                               offloaded_macs_params);
 663err_free_entry:
 664        kfree(entry);
 665err_free_ida:
 666        if (ida_idx != NFP_MAX_MAC_INDEX)
 667                ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
 668
 669        return err;
 670}
 671
 672static int
 673nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
 674                          u8 *mac, bool mod)
 675{
 676        struct nfp_flower_priv *priv = app->priv;
 677        struct nfp_flower_repr_priv *repr_priv;
 678        struct nfp_tun_offloaded_mac *entry;
 679        struct nfp_repr *repr;
 680        int ida_idx;
 681
 682        entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
 683        if (!entry)
 684                return 0;
 685
 686        entry->ref_count--;
 687        /* If del is part of a mod then mac_list is still in use elsewheree. */
 688        if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
 689                repr = netdev_priv(netdev);
 690                repr_priv = repr->app_priv;
 691                list_del(&repr_priv->mac_list);
 692        }
 693
 694        if (nfp_flower_is_supported_bridge(netdev)) {
 695                entry->bridge_count--;
 696
 697                if (!entry->bridge_count && entry->ref_count) {
 698                        u16 nfp_mac_idx;
 699
 700                        nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
 701                        if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
 702                                                     false)) {
 703                                nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
 704                                                     netdev_name(netdev));
 705                                return 0;
 706                        }
 707
 708                        entry->index = nfp_mac_idx;
 709                        return 0;
 710                }
 711        }
 712
 713        /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
 714        if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
 715                u16 nfp_mac_idx;
 716                int port, err;
 717
 718                repr_priv = list_first_entry(&entry->repr_list,
 719                                             struct nfp_flower_repr_priv,
 720                                             mac_list);
 721                repr = repr_priv->nfp_repr;
 722                port = nfp_repr_get_port_id(repr->netdev);
 723                nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
 724                err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
 725                if (err) {
 726                        nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
 727                                             netdev_name(netdev));
 728                        return 0;
 729                }
 730
 731                ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
 732                ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
 733                entry->index = nfp_mac_idx;
 734                return 0;
 735        }
 736
 737        if (entry->ref_count)
 738                return 0;
 739
 740        WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
 741                                            &entry->ht_node,
 742                                            offloaded_macs_params));
 743        /* If MAC has global ID then extract and free the ida entry. */
 744        if (nfp_tunnel_is_mac_idx_global(entry->index)) {
 745                ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
 746                ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
 747        }
 748
 749        kfree(entry);
 750
 751        return __nfp_tunnel_offload_mac(app, mac, 0, true);
 752}
 753
 754static int
 755nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
 756                       enum nfp_flower_mac_offload_cmd cmd)
 757{
 758        struct nfp_flower_non_repr_priv *nr_priv = NULL;
 759        bool non_repr = false, *mac_offloaded;
 760        u8 *off_mac = NULL;
 761        int err, port = 0;
 762
 763        if (nfp_netdev_is_nfp_repr(netdev)) {
 764                struct nfp_flower_repr_priv *repr_priv;
 765                struct nfp_repr *repr;
 766
 767                repr = netdev_priv(netdev);
 768                if (repr->app != app)
 769                        return 0;
 770
 771                repr_priv = repr->app_priv;
 772                if (repr_priv->on_bridge)
 773                        return 0;
 774
 775                mac_offloaded = &repr_priv->mac_offloaded;
 776                off_mac = &repr_priv->offloaded_mac_addr[0];
 777                port = nfp_repr_get_port_id(netdev);
 778                if (!nfp_tunnel_port_is_phy_repr(port))
 779                        return 0;
 780        } else if (nfp_fl_is_netdev_to_offload(netdev)) {
 781                nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
 782                if (!nr_priv)
 783                        return -ENOMEM;
 784
 785                mac_offloaded = &nr_priv->mac_offloaded;
 786                off_mac = &nr_priv->offloaded_mac_addr[0];
 787                non_repr = true;
 788        } else {
 789                return 0;
 790        }
 791
 792        if (!is_valid_ether_addr(netdev->dev_addr)) {
 793                err = -EINVAL;
 794                goto err_put_non_repr_priv;
 795        }
 796
 797        if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
 798                cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
 799
 800        switch (cmd) {
 801        case NFP_TUNNEL_MAC_OFFLOAD_ADD:
 802                err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
 803                if (err)
 804                        goto err_put_non_repr_priv;
 805
 806                if (non_repr)
 807                        __nfp_flower_non_repr_priv_get(nr_priv);
 808
 809                *mac_offloaded = true;
 810                ether_addr_copy(off_mac, netdev->dev_addr);
 811                break;
 812        case NFP_TUNNEL_MAC_OFFLOAD_DEL:
 813                /* Only attempt delete if add was successful. */
 814                if (!*mac_offloaded)
 815                        break;
 816
 817                if (non_repr)
 818                        __nfp_flower_non_repr_priv_put(nr_priv);
 819
 820                *mac_offloaded = false;
 821
 822                err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
 823                                                false);
 824                if (err)
 825                        goto err_put_non_repr_priv;
 826
 827                break;
 828        case NFP_TUNNEL_MAC_OFFLOAD_MOD:
 829                /* Ignore if changing to the same address. */
 830                if (ether_addr_equal(netdev->dev_addr, off_mac))
 831                        break;
 832
 833                err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
 834                if (err)
 835                        goto err_put_non_repr_priv;
 836
 837                /* Delete the previous MAC address. */
 838                err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
 839                if (err)
 840                        nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
 841                                             netdev_name(netdev));
 842
 843                ether_addr_copy(off_mac, netdev->dev_addr);
 844                break;
 845        default:
 846                err = -EINVAL;
 847                goto err_put_non_repr_priv;
 848        }
 849
 850        if (non_repr)
 851                __nfp_flower_non_repr_priv_put(nr_priv);
 852
 853        return 0;
 854
 855err_put_non_repr_priv:
 856        if (non_repr)
 857                __nfp_flower_non_repr_priv_put(nr_priv);
 858
 859        return err;
 860}
 861
 862int nfp_tunnel_mac_event_handler(struct nfp_app *app,
 863                                 struct net_device *netdev,
 864                                 unsigned long event, void *ptr)
 865{
 866        int err;
 867
 868        if (event == NETDEV_DOWN) {
 869                err = nfp_tunnel_offload_mac(app, netdev,
 870                                             NFP_TUNNEL_MAC_OFFLOAD_DEL);
 871                if (err)
 872                        nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
 873                                             netdev_name(netdev));
 874        } else if (event == NETDEV_UP) {
 875                err = nfp_tunnel_offload_mac(app, netdev,
 876                                             NFP_TUNNEL_MAC_OFFLOAD_ADD);
 877                if (err)
 878                        nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
 879                                             netdev_name(netdev));
 880        } else if (event == NETDEV_CHANGEADDR) {
 881                /* Only offload addr change if netdev is already up. */
 882                if (!(netdev->flags & IFF_UP))
 883                        return NOTIFY_OK;
 884
 885                err = nfp_tunnel_offload_mac(app, netdev,
 886                                             NFP_TUNNEL_MAC_OFFLOAD_MOD);
 887                if (err)
 888                        nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
 889                                             netdev_name(netdev));
 890        } else if (event == NETDEV_CHANGEUPPER) {
 891                /* If a repr is attached to a bridge then tunnel packets
 892                 * entering the physical port are directed through the bridge
 893                 * datapath and cannot be directly detunneled. Therefore,
 894                 * associated offloaded MACs and indexes should not be used
 895                 * by fw for detunneling.
 896                 */
 897                struct netdev_notifier_changeupper_info *info = ptr;
 898                struct net_device *upper = info->upper_dev;
 899                struct nfp_flower_repr_priv *repr_priv;
 900                struct nfp_repr *repr;
 901
 902                if (!nfp_netdev_is_nfp_repr(netdev) ||
 903                    !nfp_flower_is_supported_bridge(upper))
 904                        return NOTIFY_OK;
 905
 906                repr = netdev_priv(netdev);
 907                if (repr->app != app)
 908                        return NOTIFY_OK;
 909
 910                repr_priv = repr->app_priv;
 911
 912                if (info->linking) {
 913                        if (nfp_tunnel_offload_mac(app, netdev,
 914                                                   NFP_TUNNEL_MAC_OFFLOAD_DEL))
 915                                nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
 916                                                     netdev_name(netdev));
 917                        repr_priv->on_bridge = true;
 918                } else {
 919                        repr_priv->on_bridge = false;
 920
 921                        if (!(netdev->flags & IFF_UP))
 922                                return NOTIFY_OK;
 923
 924                        if (nfp_tunnel_offload_mac(app, netdev,
 925                                                   NFP_TUNNEL_MAC_OFFLOAD_ADD))
 926                                nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
 927                                                     netdev_name(netdev));
 928                }
 929        }
 930        return NOTIFY_OK;
 931}
 932
 933int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
 934                                 struct nfp_fl_payload *flow)
 935{
 936        struct nfp_flower_priv *app_priv = app->priv;
 937        struct nfp_tun_offloaded_mac *mac_entry;
 938        struct nfp_tun_pre_tun_rule payload;
 939        struct net_device *internal_dev;
 940        int err;
 941
 942        if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
 943                return -ENOSPC;
 944
 945        memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
 946
 947        internal_dev = flow->pre_tun_rule.dev;
 948        payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
 949        payload.host_ctx_id = flow->meta.host_ctx_id;
 950
 951        /* Lookup MAC index for the pre-tunnel rule egress device.
 952         * Note that because the device is always an internal port, it will
 953         * have a constant global index so does not need to be tracked.
 954         */
 955        mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
 956                                                     internal_dev->dev_addr);
 957        if (!mac_entry)
 958                return -ENOENT;
 959
 960        payload.port_idx = cpu_to_be16(mac_entry->index);
 961
 962        /* Copy mac id and vlan to flow - dev may not exist at delete time. */
 963        flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
 964        flow->pre_tun_rule.port_idx = payload.port_idx;
 965
 966        err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
 967                                       sizeof(struct nfp_tun_pre_tun_rule),
 968                                       (unsigned char *)&payload, GFP_KERNEL);
 969        if (err)
 970                return err;
 971
 972        app_priv->pre_tun_rule_cnt++;
 973
 974        return 0;
 975}
 976
 977int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
 978                                     struct nfp_fl_payload *flow)
 979{
 980        struct nfp_flower_priv *app_priv = app->priv;
 981        struct nfp_tun_pre_tun_rule payload;
 982        u32 tmp_flags = 0;
 983        int err;
 984
 985        memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
 986
 987        tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
 988        payload.flags = cpu_to_be32(tmp_flags);
 989        payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
 990        payload.port_idx = flow->pre_tun_rule.port_idx;
 991
 992        err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
 993                                       sizeof(struct nfp_tun_pre_tun_rule),
 994                                       (unsigned char *)&payload, GFP_KERNEL);
 995        if (err)
 996                return err;
 997
 998        app_priv->pre_tun_rule_cnt--;
 999
1000        return 0;
1001}
1002
1003int nfp_tunnel_config_start(struct nfp_app *app)
1004{
1005        struct nfp_flower_priv *priv = app->priv;
1006        int err;
1007
1008        /* Initialise rhash for MAC offload tracking. */
1009        err = rhashtable_init(&priv->tun.offloaded_macs,
1010                              &offloaded_macs_params);
1011        if (err)
1012                return err;
1013
1014        ida_init(&priv->tun.mac_off_ids);
1015
1016        /* Initialise priv data for IPv4 offloading. */
1017        mutex_init(&priv->tun.ipv4_off_lock);
1018        INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
1019
1020        /* Initialise priv data for neighbour offloading. */
1021        spin_lock_init(&priv->tun.neigh_off_lock);
1022        INIT_LIST_HEAD(&priv->tun.neigh_off_list);
1023        priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
1024
1025        err = register_netevent_notifier(&priv->tun.neigh_nb);
1026        if (err) {
1027                rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1028                                            nfp_check_rhashtable_empty, NULL);
1029                return err;
1030        }
1031
1032        return 0;
1033}
1034
1035void nfp_tunnel_config_stop(struct nfp_app *app)
1036{
1037        struct nfp_flower_priv *priv = app->priv;
1038        struct nfp_ipv4_route_entry *route_entry;
1039        struct nfp_ipv4_addr_entry *ip_entry;
1040        struct list_head *ptr, *storage;
1041
1042        unregister_netevent_notifier(&priv->tun.neigh_nb);
1043
1044        ida_destroy(&priv->tun.mac_off_ids);
1045
1046        /* Free any memory that may be occupied by ipv4 list. */
1047        list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
1048                ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
1049                list_del(&ip_entry->list);
1050                kfree(ip_entry);
1051        }
1052
1053        /* Free any memory that may be occupied by the route list. */
1054        list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
1055                route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
1056                                         list);
1057                list_del(&route_entry->list);
1058                kfree(route_entry);
1059        }
1060
1061        /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
1062        rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1063                                    nfp_check_rhashtable_empty, NULL);
1064}
1065