linux/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#include <linux/etherdevice.h>
   5#include <linux/inetdevice.h>
   6#include <net/netevent.h>
   7#include <linux/idr.h>
   8#include <net/dst_metadata.h>
   9#include <net/arp.h>
  10
  11#include "cmsg.h"
  12#include "main.h"
  13#include "../nfp_net_repr.h"
  14#include "../nfp_net.h"
  15
  16#define NFP_FL_MAX_ROUTES               32
  17
  18/**
  19 * struct nfp_tun_active_tuns - periodic message of active tunnels
  20 * @seq:                sequence number of the message
  21 * @count:              number of tunnels report in message
  22 * @flags:              options part of the request
  23 * @tun_info.ipv4:              dest IPv4 address of active route
  24 * @tun_info.egress_port:       port the encapsulated packet egressed
  25 * @tun_info.extra:             reserved for future use
  26 * @tun_info:           tunnels that have sent traffic in reported period
  27 */
  28struct nfp_tun_active_tuns {
  29        __be32 seq;
  30        __be32 count;
  31        __be32 flags;
  32        struct route_ip_info {
  33                __be32 ipv4;
  34                __be32 egress_port;
  35                __be32 extra[2];
  36        } tun_info[];
  37};
  38
  39/**
  40 * struct nfp_tun_neigh - neighbour/route entry on the NFP
  41 * @dst_ipv4:   destination IPv4 address
  42 * @src_ipv4:   source IPv4 address
  43 * @dst_addr:   destination MAC address
  44 * @src_addr:   source MAC address
  45 * @port_id:    NFP port to output packet on - associated with source IPv4
  46 */
  47struct nfp_tun_neigh {
  48        __be32 dst_ipv4;
  49        __be32 src_ipv4;
  50        u8 dst_addr[ETH_ALEN];
  51        u8 src_addr[ETH_ALEN];
  52        __be32 port_id;
  53};
  54
  55/**
  56 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
  57 * @ingress_port:       ingress port of packet that signalled request
  58 * @ipv4_addr:          destination ipv4 address for route
  59 * @reserved:           reserved for future use
  60 */
  61struct nfp_tun_req_route_ipv4 {
  62        __be32 ingress_port;
  63        __be32 ipv4_addr;
  64        __be32 reserved[2];
  65};
  66
  67/**
  68 * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
  69 * @ipv4_addr:  destination of route
  70 * @list:       list pointer
  71 */
  72struct nfp_ipv4_route_entry {
  73        __be32 ipv4_addr;
  74        struct list_head list;
  75};
  76
  77#define NFP_FL_IPV4_ADDRS_MAX        32
  78
  79/**
  80 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
  81 * @count:      number of IPs populated in the array
  82 * @ipv4_addr:  array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
  83 */
  84struct nfp_tun_ipv4_addr {
  85        __be32 count;
  86        __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
  87};
  88
  89/**
  90 * struct nfp_ipv4_addr_entry - cached IPv4 addresses
  91 * @ipv4_addr:  IP address
  92 * @ref_count:  number of rules currently using this IP
  93 * @list:       list pointer
  94 */
  95struct nfp_ipv4_addr_entry {
  96        __be32 ipv4_addr;
  97        int ref_count;
  98        struct list_head list;
  99};
 100
 101/**
 102 * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
 103 * @reserved:   reserved for future use
 104 * @count:      number of MAC addresses in the message
 105 * @addresses.index:    index of MAC address in the lookup table
 106 * @addresses.addr:     interface MAC address
 107 * @addresses:  series of MACs to offload
 108 */
 109struct nfp_tun_mac_addr {
 110        __be16 reserved;
 111        __be16 count;
 112        struct index_mac_addr {
 113                __be16 index;
 114                u8 addr[ETH_ALEN];
 115        } addresses[];
 116};
 117
 118/**
 119 * struct nfp_tun_mac_offload_entry - list of MACs to offload
 120 * @index:      index of MAC address for offloading
 121 * @addr:       interface MAC address
 122 * @list:       list pointer
 123 */
 124struct nfp_tun_mac_offload_entry {
 125        __be16 index;
 126        u8 addr[ETH_ALEN];
 127        struct list_head list;
 128};
 129
 130#define NFP_MAX_MAC_INDEX       0xff
 131
 132/**
 133 * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
 134 * @ifindex:    netdev ifindex of the device
 135 * @index:      index of netdevs mac on NFP
 136 * @list:       list pointer
 137 */
 138struct nfp_tun_mac_non_nfp_idx {
 139        int ifindex;
 140        u8 index;
 141        struct list_head list;
 142};
 143
 144void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
 145{
 146        struct nfp_tun_active_tuns *payload;
 147        struct net_device *netdev;
 148        int count, i, pay_len;
 149        struct neighbour *n;
 150        __be32 ipv4_addr;
 151        u32 port;
 152
 153        payload = nfp_flower_cmsg_get_data(skb);
 154        count = be32_to_cpu(payload->count);
 155        if (count > NFP_FL_MAX_ROUTES) {
 156                nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
 157                return;
 158        }
 159
 160        pay_len = nfp_flower_cmsg_get_data_len(skb);
 161        if (pay_len != sizeof(struct nfp_tun_active_tuns) +
 162            sizeof(struct route_ip_info) * count) {
 163                nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
 164                return;
 165        }
 166
 167        for (i = 0; i < count; i++) {
 168                ipv4_addr = payload->tun_info[i].ipv4;
 169                port = be32_to_cpu(payload->tun_info[i].egress_port);
 170                netdev = nfp_app_repr_get(app, port);
 171                if (!netdev)
 172                        continue;
 173
 174                n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
 175                if (!n)
 176                        continue;
 177
 178                /* Update the used timestamp of neighbour */
 179                neigh_event_send(n, NULL);
 180                neigh_release(n);
 181        }
 182}
 183
 184static int
 185nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
 186                         gfp_t flag)
 187{
 188        struct sk_buff *skb;
 189        unsigned char *msg;
 190
 191        skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
 192        if (!skb)
 193                return -ENOMEM;
 194
 195        msg = nfp_flower_cmsg_get_data(skb);
 196        memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
 197
 198        nfp_ctrl_tx(app->ctrl, skb);
 199        return 0;
 200}
 201
 202static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
 203{
 204        struct nfp_flower_priv *priv = app->priv;
 205        struct nfp_ipv4_route_entry *entry;
 206        struct list_head *ptr, *storage;
 207
 208        spin_lock_bh(&priv->nfp_neigh_off_lock);
 209        list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
 210                entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
 211                if (entry->ipv4_addr == ipv4_addr) {
 212                        spin_unlock_bh(&priv->nfp_neigh_off_lock);
 213                        return true;
 214                }
 215        }
 216        spin_unlock_bh(&priv->nfp_neigh_off_lock);
 217        return false;
 218}
 219
 220static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
 221{
 222        struct nfp_flower_priv *priv = app->priv;
 223        struct nfp_ipv4_route_entry *entry;
 224        struct list_head *ptr, *storage;
 225
 226        spin_lock_bh(&priv->nfp_neigh_off_lock);
 227        list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
 228                entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
 229                if (entry->ipv4_addr == ipv4_addr) {
 230                        spin_unlock_bh(&priv->nfp_neigh_off_lock);
 231                        return;
 232                }
 233        }
 234        entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 235        if (!entry) {
 236                spin_unlock_bh(&priv->nfp_neigh_off_lock);
 237                nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
 238                return;
 239        }
 240
 241        entry->ipv4_addr = ipv4_addr;
 242        list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
 243        spin_unlock_bh(&priv->nfp_neigh_off_lock);
 244}
 245
 246static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
 247{
 248        struct nfp_flower_priv *priv = app->priv;
 249        struct nfp_ipv4_route_entry *entry;
 250        struct list_head *ptr, *storage;
 251
 252        spin_lock_bh(&priv->nfp_neigh_off_lock);
 253        list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
 254                entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
 255                if (entry->ipv4_addr == ipv4_addr) {
 256                        list_del(&entry->list);
 257                        kfree(entry);
 258                        break;
 259                }
 260        }
 261        spin_unlock_bh(&priv->nfp_neigh_off_lock);
 262}
 263
 264static void
 265nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
 266                    struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
 267{
 268        struct nfp_tun_neigh payload;
 269
 270        /* Only offload representor IPv4s for now. */
 271        if (!nfp_netdev_is_nfp_repr(netdev))
 272                return;
 273
 274        memset(&payload, 0, sizeof(struct nfp_tun_neigh));
 275        payload.dst_ipv4 = flow->daddr;
 276
 277        /* If entry has expired send dst IP with all other fields 0. */
 278        if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
 279                nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
 280                /* Trigger ARP to verify invalid neighbour state. */
 281                neigh_event_send(neigh, NULL);
 282                goto send_msg;
 283        }
 284
 285        /* Have a valid neighbour so populate rest of entry. */
 286        payload.src_ipv4 = flow->saddr;
 287        ether_addr_copy(payload.src_addr, netdev->dev_addr);
 288        neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
 289        payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
 290        /* Add destination of new route to NFP cache. */
 291        nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
 292
 293send_msg:
 294        nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
 295                                 sizeof(struct nfp_tun_neigh),
 296                                 (unsigned char *)&payload, flag);
 297}
 298
 299static int
 300nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
 301                            void *ptr)
 302{
 303        struct nfp_flower_priv *app_priv;
 304        struct netevent_redirect *redir;
 305        struct flowi4 flow = {};
 306        struct neighbour *n;
 307        struct nfp_app *app;
 308        struct rtable *rt;
 309        int err;
 310
 311        switch (event) {
 312        case NETEVENT_REDIRECT:
 313                redir = (struct netevent_redirect *)ptr;
 314                n = redir->neigh;
 315                break;
 316        case NETEVENT_NEIGH_UPDATE:
 317                n = (struct neighbour *)ptr;
 318                break;
 319        default:
 320                return NOTIFY_DONE;
 321        }
 322
 323        flow.daddr = *(__be32 *)n->primary_key;
 324
 325        /* Only concerned with route changes for representors. */
 326        if (!nfp_netdev_is_nfp_repr(n->dev))
 327                return NOTIFY_DONE;
 328
 329        app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
 330        app = app_priv->app;
 331
 332        /* Only concerned with changes to routes already added to NFP. */
 333        if (!nfp_tun_has_route(app, flow.daddr))
 334                return NOTIFY_DONE;
 335
 336#if IS_ENABLED(CONFIG_INET)
 337        /* Do a route lookup to populate flow data. */
 338        rt = ip_route_output_key(dev_net(n->dev), &flow);
 339        err = PTR_ERR_OR_ZERO(rt);
 340        if (err)
 341                return NOTIFY_DONE;
 342
 343        ip_rt_put(rt);
 344#else
 345        return NOTIFY_DONE;
 346#endif
 347
 348        flow.flowi4_proto = IPPROTO_UDP;
 349        nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
 350
 351        return NOTIFY_OK;
 352}
 353
 354void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
 355{
 356        struct nfp_tun_req_route_ipv4 *payload;
 357        struct net_device *netdev;
 358        struct flowi4 flow = {};
 359        struct neighbour *n;
 360        struct rtable *rt;
 361        int err;
 362
 363        payload = nfp_flower_cmsg_get_data(skb);
 364
 365        netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
 366        if (!netdev)
 367                goto route_fail_warning;
 368
 369        flow.daddr = payload->ipv4_addr;
 370        flow.flowi4_proto = IPPROTO_UDP;
 371
 372#if IS_ENABLED(CONFIG_INET)
 373        /* Do a route lookup on same namespace as ingress port. */
 374        rt = ip_route_output_key(dev_net(netdev), &flow);
 375        err = PTR_ERR_OR_ZERO(rt);
 376        if (err)
 377                goto route_fail_warning;
 378#else
 379        goto route_fail_warning;
 380#endif
 381
 382        /* Get the neighbour entry for the lookup */
 383        n = dst_neigh_lookup(&rt->dst, &flow.daddr);
 384        ip_rt_put(rt);
 385        if (!n)
 386                goto route_fail_warning;
 387        nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
 388        neigh_release(n);
 389        return;
 390
 391route_fail_warning:
 392        nfp_flower_cmsg_warn(app, "Requested route not found.\n");
 393}
 394
 395static void nfp_tun_write_ipv4_list(struct nfp_app *app)
 396{
 397        struct nfp_flower_priv *priv = app->priv;
 398        struct nfp_ipv4_addr_entry *entry;
 399        struct nfp_tun_ipv4_addr payload;
 400        struct list_head *ptr, *storage;
 401        int count;
 402
 403        memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
 404        mutex_lock(&priv->nfp_ipv4_off_lock);
 405        count = 0;
 406        list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
 407                if (count >= NFP_FL_IPV4_ADDRS_MAX) {
 408                        mutex_unlock(&priv->nfp_ipv4_off_lock);
 409                        nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
 410                        return;
 411                }
 412                entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
 413                payload.ipv4_addr[count++] = entry->ipv4_addr;
 414        }
 415        payload.count = cpu_to_be32(count);
 416        mutex_unlock(&priv->nfp_ipv4_off_lock);
 417
 418        nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
 419                                 sizeof(struct nfp_tun_ipv4_addr),
 420                                 &payload, GFP_KERNEL);
 421}
 422
 423void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
 424{
 425        struct nfp_flower_priv *priv = app->priv;
 426        struct nfp_ipv4_addr_entry *entry;
 427        struct list_head *ptr, *storage;
 428
 429        mutex_lock(&priv->nfp_ipv4_off_lock);
 430        list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
 431                entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
 432                if (entry->ipv4_addr == ipv4) {
 433                        entry->ref_count++;
 434                        mutex_unlock(&priv->nfp_ipv4_off_lock);
 435                        return;
 436                }
 437        }
 438
 439        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 440        if (!entry) {
 441                mutex_unlock(&priv->nfp_ipv4_off_lock);
 442                nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
 443                return;
 444        }
 445        entry->ipv4_addr = ipv4;
 446        entry->ref_count = 1;
 447        list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
 448        mutex_unlock(&priv->nfp_ipv4_off_lock);
 449
 450        nfp_tun_write_ipv4_list(app);
 451}
 452
 453void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
 454{
 455        struct nfp_flower_priv *priv = app->priv;
 456        struct nfp_ipv4_addr_entry *entry;
 457        struct list_head *ptr, *storage;
 458
 459        mutex_lock(&priv->nfp_ipv4_off_lock);
 460        list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
 461                entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
 462                if (entry->ipv4_addr == ipv4) {
 463                        entry->ref_count--;
 464                        if (!entry->ref_count) {
 465                                list_del(&entry->list);
 466                                kfree(entry);
 467                        }
 468                        break;
 469                }
 470        }
 471        mutex_unlock(&priv->nfp_ipv4_off_lock);
 472
 473        nfp_tun_write_ipv4_list(app);
 474}
 475
 476void nfp_tunnel_write_macs(struct nfp_app *app)
 477{
 478        struct nfp_flower_priv *priv = app->priv;
 479        struct nfp_tun_mac_offload_entry *entry;
 480        struct nfp_tun_mac_addr *payload;
 481        struct list_head *ptr, *storage;
 482        int mac_count, err, pay_size;
 483
 484        mutex_lock(&priv->nfp_mac_off_lock);
 485        if (!priv->nfp_mac_off_count) {
 486                mutex_unlock(&priv->nfp_mac_off_lock);
 487                return;
 488        }
 489
 490        pay_size = sizeof(struct nfp_tun_mac_addr) +
 491                   sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
 492
 493        payload = kzalloc(pay_size, GFP_KERNEL);
 494        if (!payload) {
 495                mutex_unlock(&priv->nfp_mac_off_lock);
 496                return;
 497        }
 498
 499        payload->count = cpu_to_be16(priv->nfp_mac_off_count);
 500
 501        mac_count = 0;
 502        list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
 503                entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
 504                                   list);
 505                payload->addresses[mac_count].index = entry->index;
 506                ether_addr_copy(payload->addresses[mac_count].addr,
 507                                entry->addr);
 508                mac_count++;
 509        }
 510
 511        err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
 512                                       pay_size, payload, GFP_KERNEL);
 513
 514        kfree(payload);
 515
 516        if (err) {
 517                mutex_unlock(&priv->nfp_mac_off_lock);
 518                /* Write failed so retain list for future retry. */
 519                return;
 520        }
 521
 522        /* If list was successfully offloaded, flush it. */
 523        list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
 524                entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
 525                                   list);
 526                list_del(&entry->list);
 527                kfree(entry);
 528        }
 529
 530        priv->nfp_mac_off_count = 0;
 531        mutex_unlock(&priv->nfp_mac_off_lock);
 532}
 533
 534static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
 535{
 536        struct nfp_flower_priv *priv = app->priv;
 537        struct nfp_tun_mac_non_nfp_idx *entry;
 538        struct list_head *ptr, *storage;
 539        int idx;
 540
 541        mutex_lock(&priv->nfp_mac_index_lock);
 542        list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
 543                entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
 544                if (entry->ifindex == ifindex) {
 545                        idx = entry->index;
 546                        mutex_unlock(&priv->nfp_mac_index_lock);
 547                        return idx;
 548                }
 549        }
 550
 551        idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
 552                             NFP_MAX_MAC_INDEX, GFP_KERNEL);
 553        if (idx < 0) {
 554                mutex_unlock(&priv->nfp_mac_index_lock);
 555                return idx;
 556        }
 557
 558        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 559        if (!entry) {
 560                mutex_unlock(&priv->nfp_mac_index_lock);
 561                return -ENOMEM;
 562        }
 563        entry->ifindex = ifindex;
 564        entry->index = idx;
 565        list_add_tail(&entry->list, &priv->nfp_mac_index_list);
 566        mutex_unlock(&priv->nfp_mac_index_lock);
 567
 568        return idx;
 569}
 570
 571static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
 572{
 573        struct nfp_flower_priv *priv = app->priv;
 574        struct nfp_tun_mac_non_nfp_idx *entry;
 575        struct list_head *ptr, *storage;
 576
 577        mutex_lock(&priv->nfp_mac_index_lock);
 578        list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
 579                entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
 580                if (entry->ifindex == ifindex) {
 581                        ida_simple_remove(&priv->nfp_mac_off_ids,
 582                                          entry->index);
 583                        list_del(&entry->list);
 584                        kfree(entry);
 585                        break;
 586                }
 587        }
 588        mutex_unlock(&priv->nfp_mac_index_lock);
 589}
 590
 591static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
 592                                            struct nfp_app *app)
 593{
 594        struct nfp_flower_priv *priv = app->priv;
 595        struct nfp_tun_mac_offload_entry *entry;
 596        u16 nfp_mac_idx;
 597        int port = 0;
 598
 599        /* Check if MAC should be offloaded. */
 600        if (!is_valid_ether_addr(netdev->dev_addr))
 601                return;
 602
 603        if (nfp_netdev_is_nfp_repr(netdev))
 604                port = nfp_repr_get_port_id(netdev);
 605        else if (!nfp_fl_is_netdev_to_offload(netdev))
 606                return;
 607
 608        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 609        if (!entry) {
 610                nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
 611                return;
 612        }
 613
 614        if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
 615            NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
 616                nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
 617        } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
 618                   NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
 619                port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
 620                nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
 621        } else {
 622                /* Must assign our own unique 8-bit index. */
 623                int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
 624
 625                if (idx < 0) {
 626                        nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
 627                        kfree(entry);
 628                        return;
 629                }
 630                nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
 631        }
 632
 633        entry->index = cpu_to_be16(nfp_mac_idx);
 634        ether_addr_copy(entry->addr, netdev->dev_addr);
 635
 636        mutex_lock(&priv->nfp_mac_off_lock);
 637        priv->nfp_mac_off_count++;
 638        list_add_tail(&entry->list, &priv->nfp_mac_off_list);
 639        mutex_unlock(&priv->nfp_mac_off_lock);
 640}
 641
 642int nfp_tunnel_mac_event_handler(struct nfp_app *app,
 643                                 struct net_device *netdev,
 644                                 unsigned long event, void *ptr)
 645{
 646        if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
 647                /* If non-nfp netdev then free its offload index. */
 648                if (nfp_fl_is_netdev_to_offload(netdev))
 649                        nfp_tun_del_mac_idx(app, netdev->ifindex);
 650        } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
 651                   event == NETDEV_REGISTER) {
 652                nfp_tun_add_to_mac_offload_list(netdev, app);
 653
 654                /* Force a list write to keep NFP up to date. */
 655                nfp_tunnel_write_macs(app);
 656        }
 657        return NOTIFY_OK;
 658}
 659
 660int nfp_tunnel_config_start(struct nfp_app *app)
 661{
 662        struct nfp_flower_priv *priv = app->priv;
 663
 664        /* Initialise priv data for MAC offloading. */
 665        priv->nfp_mac_off_count = 0;
 666        mutex_init(&priv->nfp_mac_off_lock);
 667        INIT_LIST_HEAD(&priv->nfp_mac_off_list);
 668        mutex_init(&priv->nfp_mac_index_lock);
 669        INIT_LIST_HEAD(&priv->nfp_mac_index_list);
 670        ida_init(&priv->nfp_mac_off_ids);
 671
 672        /* Initialise priv data for IPv4 offloading. */
 673        mutex_init(&priv->nfp_ipv4_off_lock);
 674        INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
 675
 676        /* Initialise priv data for neighbour offloading. */
 677        spin_lock_init(&priv->nfp_neigh_off_lock);
 678        INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
 679        priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
 680
 681        return register_netevent_notifier(&priv->nfp_tun_neigh_nb);
 682}
 683
 684void nfp_tunnel_config_stop(struct nfp_app *app)
 685{
 686        struct nfp_tun_mac_offload_entry *mac_entry;
 687        struct nfp_flower_priv *priv = app->priv;
 688        struct nfp_ipv4_route_entry *route_entry;
 689        struct nfp_tun_mac_non_nfp_idx *mac_idx;
 690        struct nfp_ipv4_addr_entry *ip_entry;
 691        struct list_head *ptr, *storage;
 692
 693        unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
 694
 695        /* Free any memory that may be occupied by MAC list. */
 696        list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
 697                mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
 698                                       list);
 699                list_del(&mac_entry->list);
 700                kfree(mac_entry);
 701        }
 702
 703        /* Free any memory that may be occupied by MAC index list. */
 704        list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
 705                mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
 706                                     list);
 707                list_del(&mac_idx->list);
 708                kfree(mac_idx);
 709        }
 710
 711        ida_destroy(&priv->nfp_mac_off_ids);
 712
 713        /* Free any memory that may be occupied by ipv4 list. */
 714        list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
 715                ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
 716                list_del(&ip_entry->list);
 717                kfree(ip_entry);
 718        }
 719
 720        /* Free any memory that may be occupied by the route list. */
 721        list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
 722                route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
 723                                         list);
 724                list_del(&route_entry->list);
 725                kfree(route_entry);
 726        }
 727}
 728