linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
   3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
   5 *
   6 * Redistribution and use in source and binary forms, with or without
   7 * modification, are permitted provided that the following conditions are met:
   8 *
   9 * 1. Redistributions of source code must retain the above copyright
  10 *    notice, this list of conditions and the following disclaimer.
  11 * 2. Redistributions in binary form must reproduce the above copyright
  12 *    notice, this list of conditions and the following disclaimer in the
  13 *    documentation and/or other materials provided with the distribution.
  14 * 3. Neither the names of the copyright holders nor the names of its
  15 *    contributors may be used to endorse or promote products derived from
  16 *    this software without specific prior written permission.
  17 *
  18 * Alternatively, this software may be distributed under the terms of the
  19 * GNU General Public License ("GPL") version 2 as published by the Free
  20 * Software Foundation.
  21 *
  22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  32 * POSSIBILITY OF SUCH DAMAGE.
  33 */
  34
  35#include <linux/rhashtable.h>
  36
  37#include "spectrum_mr.h"
  38#include "spectrum_router.h"
  39
  40struct mlxsw_sp_mr {
  41        const struct mlxsw_sp_mr_ops *mr_ops;
  42        void *catchall_route_priv;
  43        struct delayed_work stats_update_dw;
  44        struct list_head table_list;
  45#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
  46        unsigned long priv[0];
  47        /* priv has to be always the last item */
  48};
  49
  50struct mlxsw_sp_mr_vif {
  51        struct net_device *dev;
  52        const struct mlxsw_sp_rif *rif;
  53        unsigned long vif_flags;
  54
  55        /* A list of route_vif_entry structs that point to routes that the VIF
  56         * instance is used as one of the egress VIFs
  57         */
  58        struct list_head route_evif_list;
  59
  60        /* A list of route_vif_entry structs that point to routes that the VIF
  61         * instance is used as an ingress VIF
  62         */
  63        struct list_head route_ivif_list;
  64};
  65
  66struct mlxsw_sp_mr_route_vif_entry {
  67        struct list_head vif_node;
  68        struct list_head route_node;
  69        struct mlxsw_sp_mr_vif *mr_vif;
  70        struct mlxsw_sp_mr_route *mr_route;
  71};
  72
  73struct mlxsw_sp_mr_table {
  74        struct list_head node;
  75        enum mlxsw_sp_l3proto proto;
  76        struct mlxsw_sp *mlxsw_sp;
  77        u32 vr_id;
  78        struct mlxsw_sp_mr_vif vifs[MAXVIFS];
  79        struct list_head route_list;
  80        struct rhashtable route_ht;
  81        char catchall_route_priv[0];
  82        /* catchall_route_priv has to be always the last item */
  83};
  84
  85struct mlxsw_sp_mr_route {
  86        struct list_head node;
  87        struct rhash_head ht_node;
  88        struct mlxsw_sp_mr_route_key key;
  89        enum mlxsw_sp_mr_route_action route_action;
  90        u16 min_mtu;
  91        struct mfc_cache *mfc4;
  92        void *route_priv;
  93        const struct mlxsw_sp_mr_table *mr_table;
  94        /* A list of route_vif_entry structs that point to the egress VIFs */
  95        struct list_head evif_list;
  96        /* A route_vif_entry struct that point to the ingress VIF */
  97        struct mlxsw_sp_mr_route_vif_entry ivif;
  98};
  99
 100static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
 101        .key_len = sizeof(struct mlxsw_sp_mr_route_key),
 102        .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
 103        .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
 104        .automatic_shrinking = true,
 105};
 106
 107static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif)
 108{
 109        return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
 110}
 111
 112static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
 113{
 114        return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif;
 115}
 116
 117static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
 118{
 119        return vif->dev;
 120}
 121
 122static bool
 123mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
 124{
 125        vifi_t ivif;
 126
 127        switch (mr_route->mr_table->proto) {
 128        case MLXSW_SP_L3_PROTO_IPV4:
 129                ivif = mr_route->mfc4->mfc_parent;
 130                return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
 131        case MLXSW_SP_L3_PROTO_IPV6:
 132                /* fall through */
 133        default:
 134                WARN_ON_ONCE(1);
 135        }
 136        return false;
 137}
 138
 139static int
 140mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
 141{
 142        struct mlxsw_sp_mr_route_vif_entry *rve;
 143        int valid_evifs;
 144
 145        valid_evifs = 0;
 146        list_for_each_entry(rve, &mr_route->evif_list, route_node)
 147                if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
 148                        valid_evifs++;
 149        return valid_evifs;
 150}
 151
 152static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route)
 153{
 154        switch (mr_route->mr_table->proto) {
 155        case MLXSW_SP_L3_PROTO_IPV4:
 156                return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
 157        case MLXSW_SP_L3_PROTO_IPV6:
 158                /* fall through */
 159        default:
 160                WARN_ON_ONCE(1);
 161        }
 162        return false;
 163}
 164
 165static enum mlxsw_sp_mr_route_action
 166mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
 167{
 168        struct mlxsw_sp_mr_route_vif_entry *rve;
 169
 170        /* If the ingress port is not regular and resolved, trap the route */
 171        if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
 172                return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
 173
 174        /* The kernel does not match a (*,G) route that the ingress interface is
 175         * not one of the egress interfaces, so trap these kind of routes.
 176         */
 177        if (mlxsw_sp_mr_route_starg(mr_route) &&
 178            !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
 179                return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
 180
 181        /* If the route has no valid eVIFs, trap it. */
 182        if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
 183                return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
 184
 185        /* If one of the eVIFs has no RIF, trap-and-forward the route as there
 186         * is some more routing to do in software too.
 187         */
 188        list_for_each_entry(rve, &mr_route->evif_list, route_node)
 189                if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif)
 190                        return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD;
 191
 192        return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
 193}
 194
 195static enum mlxsw_sp_mr_route_prio
 196mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
 197{
 198        return mlxsw_sp_mr_route_starg(mr_route) ?
 199                MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
 200}
 201
 202static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
 203                                   struct mlxsw_sp_mr_route_key *key,
 204                                   const struct mfc_cache *mfc)
 205{
 206        bool starg = (mfc->mfc_origin == htonl(INADDR_ANY));
 207
 208        memset(key, 0, sizeof(*key));
 209        key->vrid = mr_table->vr_id;
 210        key->proto = mr_table->proto;
 211        key->group.addr4 = mfc->mfc_mcastgrp;
 212        key->group_mask.addr4 = htonl(0xffffffff);
 213        key->source.addr4 = mfc->mfc_origin;
 214        key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
 215}
 216
 217static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
 218                                       struct mlxsw_sp_mr_vif *mr_vif)
 219{
 220        struct mlxsw_sp_mr_route_vif_entry *rve;
 221
 222        rve = kzalloc(sizeof(*rve), GFP_KERNEL);
 223        if (!rve)
 224                return -ENOMEM;
 225        rve->mr_route = mr_route;
 226        rve->mr_vif = mr_vif;
 227        list_add_tail(&rve->route_node, &mr_route->evif_list);
 228        list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
 229        return 0;
 230}
 231
 232static void
 233mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
 234{
 235        list_del(&rve->route_node);
 236        list_del(&rve->vif_node);
 237        kfree(rve);
 238}
 239
 240static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
 241                                        struct mlxsw_sp_mr_vif *mr_vif)
 242{
 243        mr_route->ivif.mr_route = mr_route;
 244        mr_route->ivif.mr_vif = mr_vif;
 245        list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
 246}
 247
 248static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
 249{
 250        list_del(&mr_route->ivif.vif_node);
 251}
 252
 253static int
 254mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
 255                              struct mlxsw_sp_mr_route *mr_route,
 256                              struct mlxsw_sp_mr_route_info *route_info)
 257{
 258        struct mlxsw_sp_mr_route_vif_entry *rve;
 259        u16 *erif_indices;
 260        u16 irif_index;
 261        u16 erif = 0;
 262
 263        erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
 264                                     GFP_KERNEL);
 265        if (!erif_indices)
 266                return -ENOMEM;
 267
 268        list_for_each_entry(rve, &mr_route->evif_list, route_node) {
 269                if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
 270                        u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
 271
 272                        erif_indices[erif++] = rifi;
 273                }
 274        }
 275
 276        if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
 277                irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
 278        else
 279                irif_index = 0;
 280
 281        route_info->irif_index = irif_index;
 282        route_info->erif_indices = erif_indices;
 283        route_info->min_mtu = mr_route->min_mtu;
 284        route_info->route_action = mr_route->route_action;
 285        route_info->erif_num = erif;
 286        return 0;
 287}
 288
 289static void
 290mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
 291{
 292        kfree(route_info->erif_indices);
 293}
 294
 295static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
 296                                   struct mlxsw_sp_mr_route *mr_route,
 297                                   bool replace)
 298{
 299        struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
 300        struct mlxsw_sp_mr_route_info route_info;
 301        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 302        int err;
 303
 304        err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
 305        if (err)
 306                return err;
 307
 308        if (!replace) {
 309                struct mlxsw_sp_mr_route_params route_params;
 310
 311                mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
 312                                               GFP_KERNEL);
 313                if (!mr_route->route_priv) {
 314                        err = -ENOMEM;
 315                        goto out;
 316                }
 317
 318                route_params.key = mr_route->key;
 319                route_params.value = route_info;
 320                route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
 321                err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
 322                                               mr_route->route_priv,
 323                                               &route_params);
 324                if (err)
 325                        kfree(mr_route->route_priv);
 326        } else {
 327                err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
 328                                               &route_info);
 329        }
 330out:
 331        mlxsw_sp_mr_route_info_destroy(&route_info);
 332        return err;
 333}
 334
 335static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
 336                                    struct mlxsw_sp_mr_route *mr_route)
 337{
 338        struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
 339        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 340
 341        mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
 342        kfree(mr_route->route_priv);
 343}
 344
 345static struct mlxsw_sp_mr_route *
 346mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
 347                          struct mfc_cache *mfc)
 348{
 349        struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
 350        struct mlxsw_sp_mr_route *mr_route;
 351        int err = 0;
 352        int i;
 353
 354        /* Allocate and init a new route and fill it with parameters */
 355        mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
 356        if (!mr_route)
 357                return ERR_PTR(-ENOMEM);
 358        INIT_LIST_HEAD(&mr_route->evif_list);
 359        mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc);
 360
 361        /* Find min_mtu and link iVIF and eVIFs */
 362        mr_route->min_mtu = ETH_MAX_MTU;
 363        ipmr_cache_hold(mfc);
 364        mr_route->mfc4 = mfc;
 365        mr_route->mr_table = mr_table;
 366        for (i = 0; i < MAXVIFS; i++) {
 367                if (mfc->mfc_un.res.ttls[i] != 255) {
 368                        err = mlxsw_sp_mr_route_evif_link(mr_route,
 369                                                          &mr_table->vifs[i]);
 370                        if (err)
 371                                goto err;
 372                        if (mr_table->vifs[i].dev &&
 373                            mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
 374                                mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
 375                }
 376        }
 377        mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
 378
 379        mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
 380        return mr_route;
 381err:
 382        ipmr_cache_put(mfc);
 383        list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
 384                mlxsw_sp_mr_route_evif_unlink(rve);
 385        kfree(mr_route);
 386        return ERR_PTR(err);
 387}
 388
 389static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table,
 390                                       struct mlxsw_sp_mr_route *mr_route)
 391{
 392        struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
 393
 394        mlxsw_sp_mr_route_ivif_unlink(mr_route);
 395        ipmr_cache_put(mr_route->mfc4);
 396        list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
 397                mlxsw_sp_mr_route_evif_unlink(rve);
 398        kfree(mr_route);
 399}
 400
 401static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
 402                                      struct mlxsw_sp_mr_route *mr_route)
 403{
 404        switch (mr_table->proto) {
 405        case MLXSW_SP_L3_PROTO_IPV4:
 406                mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
 407                break;
 408        case MLXSW_SP_L3_PROTO_IPV6:
 409                /* fall through */
 410        default:
 411                WARN_ON_ONCE(1);
 412        }
 413}
 414
 415static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
 416                                        bool offload)
 417{
 418        switch (mr_route->mr_table->proto) {
 419        case MLXSW_SP_L3_PROTO_IPV4:
 420                if (offload)
 421                        mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
 422                else
 423                        mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
 424                break;
 425        case MLXSW_SP_L3_PROTO_IPV6:
 426                /* fall through */
 427        default:
 428                WARN_ON_ONCE(1);
 429        }
 430}
 431
 432static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
 433{
 434        bool offload;
 435
 436        offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
 437        mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
 438}
 439
 440static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
 441                                    struct mlxsw_sp_mr_route *mr_route)
 442{
 443        mlxsw_sp_mr_mfc_offload_set(mr_route, false);
 444        mlxsw_sp_mr_route_erase(mr_table, mr_route);
 445        rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
 446                               mlxsw_sp_mr_route_ht_params);
 447        list_del(&mr_route->node);
 448        mlxsw_sp_mr_route_destroy(mr_table, mr_route);
 449}
 450
 451int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
 452                           struct mfc_cache *mfc, bool replace)
 453{
 454        struct mlxsw_sp_mr_route *mr_orig_route = NULL;
 455        struct mlxsw_sp_mr_route *mr_route;
 456        int err;
 457
 458        /* If the route is a (*,*) route, abort, as these kind of routes are
 459         * used for proxy routes.
 460         */
 461        if (mfc->mfc_origin == htonl(INADDR_ANY) &&
 462            mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
 463                dev_warn(mr_table->mlxsw_sp->bus_info->dev,
 464                         "Offloading proxy routes is not supported.\n");
 465                return -EINVAL;
 466        }
 467
 468        /* Create a new route */
 469        mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc);
 470        if (IS_ERR(mr_route))
 471                return PTR_ERR(mr_route);
 472
 473        /* Find any route with a matching key */
 474        mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
 475                                               &mr_route->key,
 476                                               mlxsw_sp_mr_route_ht_params);
 477        if (replace) {
 478                /* On replace case, make the route point to the new route_priv.
 479                 */
 480                if (WARN_ON(!mr_orig_route)) {
 481                        err = -ENOENT;
 482                        goto err_no_orig_route;
 483                }
 484                mr_route->route_priv = mr_orig_route->route_priv;
 485        } else if (mr_orig_route) {
 486                /* On non replace case, if another route with the same key was
 487                 * found, abort, as duplicate routes are used for proxy routes.
 488                 */
 489                dev_warn(mr_table->mlxsw_sp->bus_info->dev,
 490                         "Offloading proxy routes is not supported.\n");
 491                err = -EINVAL;
 492                goto err_duplicate_route;
 493        }
 494
 495        /* Put it in the table data-structures */
 496        list_add_tail(&mr_route->node, &mr_table->route_list);
 497        err = rhashtable_insert_fast(&mr_table->route_ht,
 498                                     &mr_route->ht_node,
 499                                     mlxsw_sp_mr_route_ht_params);
 500        if (err)
 501                goto err_rhashtable_insert;
 502
 503        /* Write the route to the hardware */
 504        err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
 505        if (err)
 506                goto err_mr_route_write;
 507
 508        /* Destroy the original route */
 509        if (replace) {
 510                rhashtable_remove_fast(&mr_table->route_ht,
 511                                       &mr_orig_route->ht_node,
 512                                       mlxsw_sp_mr_route_ht_params);
 513                list_del(&mr_orig_route->node);
 514                mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route);
 515        }
 516
 517        mlxsw_sp_mr_mfc_offload_update(mr_route);
 518        return 0;
 519
 520err_mr_route_write:
 521        rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
 522                               mlxsw_sp_mr_route_ht_params);
 523err_rhashtable_insert:
 524        list_del(&mr_route->node);
 525err_no_orig_route:
 526err_duplicate_route:
 527        mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
 528        return err;
 529}
 530
 531void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
 532                            struct mfc_cache *mfc)
 533{
 534        struct mlxsw_sp_mr_route *mr_route;
 535        struct mlxsw_sp_mr_route_key key;
 536
 537        mlxsw_sp_mr_route4_key(mr_table, &key, mfc);
 538        mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
 539                                          mlxsw_sp_mr_route_ht_params);
 540        if (mr_route)
 541                __mlxsw_sp_mr_route_del(mr_table, mr_route);
 542}
 543
 544/* Should be called after the VIF struct is updated */
 545static int
 546mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
 547                               struct mlxsw_sp_mr_route_vif_entry *rve)
 548{
 549        struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
 550        enum mlxsw_sp_mr_route_action route_action;
 551        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 552        u16 irif_index;
 553        int err;
 554
 555        route_action = mlxsw_sp_mr_route_action(rve->mr_route);
 556        if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
 557                return 0;
 558
 559        /* rve->mr_vif->rif is guaranteed to be valid at this stage */
 560        irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
 561        err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
 562                                            irif_index);
 563        if (err)
 564                return err;
 565
 566        err = mr->mr_ops->route_action_update(mlxsw_sp,
 567                                              rve->mr_route->route_priv,
 568                                              route_action);
 569        if (err)
 570                /* No need to rollback here because the iRIF change only takes
 571                 * place after the action has been updated.
 572                 */
 573                return err;
 574
 575        rve->mr_route->route_action = route_action;
 576        mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
 577        return 0;
 578}
 579
 580static void
 581mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
 582                                 struct mlxsw_sp_mr_route_vif_entry *rve)
 583{
 584        struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
 585        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 586
 587        mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
 588                                        MLXSW_SP_MR_ROUTE_ACTION_TRAP);
 589        rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
 590        mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
 591}
 592
 593/* Should be called after the RIF struct is updated */
 594static int
 595mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
 596                               struct mlxsw_sp_mr_route_vif_entry *rve)
 597{
 598        struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
 599        enum mlxsw_sp_mr_route_action route_action;
 600        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 601        u16 erif_index = 0;
 602        int err;
 603
 604        /* Update the route action, as the new eVIF can be a tunnel or a pimreg
 605         * device which will require updating the action.
 606         */
 607        route_action = mlxsw_sp_mr_route_action(rve->mr_route);
 608        if (route_action != rve->mr_route->route_action) {
 609                err = mr->mr_ops->route_action_update(mlxsw_sp,
 610                                                      rve->mr_route->route_priv,
 611                                                      route_action);
 612                if (err)
 613                        return err;
 614        }
 615
 616        /* Add the eRIF */
 617        if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
 618                erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
 619                err = mr->mr_ops->route_erif_add(mlxsw_sp,
 620                                                 rve->mr_route->route_priv,
 621                                                 erif_index);
 622                if (err)
 623                        goto err_route_erif_add;
 624        }
 625
 626        /* Update the minimum MTU */
 627        if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
 628                rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
 629                err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
 630                                                       rve->mr_route->route_priv,
 631                                                       rve->mr_route->min_mtu);
 632                if (err)
 633                        goto err_route_min_mtu_update;
 634        }
 635
 636        rve->mr_route->route_action = route_action;
 637        mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
 638        return 0;
 639
 640err_route_min_mtu_update:
 641        if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
 642                mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
 643                                           erif_index);
 644err_route_erif_add:
 645        if (route_action != rve->mr_route->route_action)
 646                mr->mr_ops->route_action_update(mlxsw_sp,
 647                                                rve->mr_route->route_priv,
 648                                                rve->mr_route->route_action);
 649        return err;
 650}
 651
 652/* Should be called before the RIF struct is updated */
 653static void
 654mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
 655                                 struct mlxsw_sp_mr_route_vif_entry *rve)
 656{
 657        struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
 658        enum mlxsw_sp_mr_route_action route_action;
 659        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 660        u16 rifi;
 661
 662        /* If the unresolved RIF was not valid, no need to delete it */
 663        if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
 664                return;
 665
 666        /* Update the route action: if there is only one valid eVIF in the
 667         * route, set the action to trap as the VIF deletion will lead to zero
 668         * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
 669         * determine the route action.
 670         */
 671        if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
 672                route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
 673        else
 674                route_action = mlxsw_sp_mr_route_action(rve->mr_route);
 675        if (route_action != rve->mr_route->route_action)
 676                mr->mr_ops->route_action_update(mlxsw_sp,
 677                                                rve->mr_route->route_priv,
 678                                                route_action);
 679
 680        /* Delete the erif from the route */
 681        rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
 682        mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
 683        rve->mr_route->route_action = route_action;
 684        mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
 685}
 686
 687static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
 688                                   struct net_device *dev,
 689                                   struct mlxsw_sp_mr_vif *mr_vif,
 690                                   unsigned long vif_flags,
 691                                   const struct mlxsw_sp_rif *rif)
 692{
 693        struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
 694        int err;
 695
 696        /* Update the VIF */
 697        mr_vif->dev = dev;
 698        mr_vif->rif = rif;
 699        mr_vif->vif_flags = vif_flags;
 700
 701        /* Update all routes where this VIF is used as an unresolved iRIF */
 702        list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
 703                err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
 704                if (err)
 705                        goto err_irif_unresolve;
 706        }
 707
 708        /* Update all routes where this VIF is used as an unresolved eRIF */
 709        list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
 710                err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
 711                if (err)
 712                        goto err_erif_unresolve;
 713        }
 714        return 0;
 715
 716err_erif_unresolve:
 717        list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
 718                                         vif_node)
 719                mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
 720err_irif_unresolve:
 721        list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
 722                                         vif_node)
 723                mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
 724        mr_vif->rif = NULL;
 725        return err;
 726}
 727
 728static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
 729                                      struct net_device *dev,
 730                                      struct mlxsw_sp_mr_vif *mr_vif)
 731{
 732        struct mlxsw_sp_mr_route_vif_entry *rve;
 733
 734        /* Update all routes where this VIF is used as an unresolved eRIF */
 735        list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
 736                mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
 737
 738        /* Update all routes where this VIF is used as an unresolved iRIF */
 739        list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
 740                mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
 741
 742        /* Update the VIF */
 743        mr_vif->dev = dev;
 744        mr_vif->rif = NULL;
 745}
 746
 747int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
 748                        struct net_device *dev, vifi_t vif_index,
 749                        unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
 750{
 751        struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
 752
 753        if (WARN_ON(vif_index >= MAXVIFS))
 754                return -EINVAL;
 755        if (mr_vif->dev)
 756                return -EEXIST;
 757        return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
 758}
 759
 760void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
 761{
 762        struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
 763
 764        if (WARN_ON(vif_index >= MAXVIFS))
 765                return;
 766        if (WARN_ON(!mr_vif->dev))
 767                return;
 768        mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
 769}
 770
 771static struct mlxsw_sp_mr_vif *
 772mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
 773                           const struct net_device *dev)
 774{
 775        vifi_t vif_index;
 776
 777        for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
 778                if (mr_table->vifs[vif_index].dev == dev)
 779                        return &mr_table->vifs[vif_index];
 780        return NULL;
 781}
 782
 783int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
 784                        const struct mlxsw_sp_rif *rif)
 785{
 786        const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
 787        struct mlxsw_sp_mr_vif *mr_vif;
 788
 789        if (!rif_dev)
 790                return 0;
 791
 792        mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
 793        if (!mr_vif)
 794                return 0;
 795        return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
 796                                       mr_vif->vif_flags, rif);
 797}
 798
 799void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
 800                         const struct mlxsw_sp_rif *rif)
 801{
 802        const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
 803        struct mlxsw_sp_mr_vif *mr_vif;
 804
 805        if (!rif_dev)
 806                return;
 807
 808        mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
 809        if (!mr_vif)
 810                return;
 811        mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
 812}
 813
 814void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
 815                                const struct mlxsw_sp_rif *rif, int mtu)
 816{
 817        const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
 818        struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
 819        struct mlxsw_sp_mr_route_vif_entry *rve;
 820        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 821        struct mlxsw_sp_mr_vif *mr_vif;
 822
 823        if (!rif_dev)
 824                return;
 825
 826        /* Search for a VIF that use that RIF */
 827        mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
 828        if (!mr_vif)
 829                return;
 830
 831        /* Update all the routes that uses that VIF as eVIF */
 832        list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
 833                if (mtu < rve->mr_route->min_mtu) {
 834                        rve->mr_route->min_mtu = mtu;
 835                        mr->mr_ops->route_min_mtu_update(mlxsw_sp,
 836                                                         rve->mr_route->route_priv,
 837                                                         mtu);
 838                }
 839        }
 840}
 841
 842struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
 843                                                   u32 vr_id,
 844                                                   enum mlxsw_sp_l3proto proto)
 845{
 846        struct mlxsw_sp_mr_route_params catchall_route_params = {
 847                .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
 848                .key = {
 849                        .vrid = vr_id,
 850                },
 851                .value = {
 852                        .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
 853                }
 854        };
 855        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 856        struct mlxsw_sp_mr_table *mr_table;
 857        int err;
 858        int i;
 859
 860        mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
 861                           GFP_KERNEL);
 862        if (!mr_table)
 863                return ERR_PTR(-ENOMEM);
 864
 865        mr_table->vr_id = vr_id;
 866        mr_table->mlxsw_sp = mlxsw_sp;
 867        mr_table->proto = proto;
 868        INIT_LIST_HEAD(&mr_table->route_list);
 869
 870        err = rhashtable_init(&mr_table->route_ht,
 871                              &mlxsw_sp_mr_route_ht_params);
 872        if (err)
 873                goto err_route_rhashtable_init;
 874
 875        for (i = 0; i < MAXVIFS; i++) {
 876                INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
 877                INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
 878        }
 879
 880        err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
 881                                       mr_table->catchall_route_priv,
 882                                       &catchall_route_params);
 883        if (err)
 884                goto err_ops_route_create;
 885        list_add_tail(&mr_table->node, &mr->table_list);
 886        return mr_table;
 887
 888err_ops_route_create:
 889        rhashtable_destroy(&mr_table->route_ht);
 890err_route_rhashtable_init:
 891        kfree(mr_table);
 892        return ERR_PTR(err);
 893}
 894
 895void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
 896{
 897        struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
 898        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 899
 900        WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
 901        list_del(&mr_table->node);
 902        mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
 903                                  &mr_table->catchall_route_priv);
 904        rhashtable_destroy(&mr_table->route_ht);
 905        kfree(mr_table);
 906}
 907
 908void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
 909{
 910        struct mlxsw_sp_mr_route *mr_route, *tmp;
 911        int i;
 912
 913        list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
 914                __mlxsw_sp_mr_route_del(mr_table, mr_route);
 915
 916        for (i = 0; i < MAXVIFS; i++) {
 917                mr_table->vifs[i].dev = NULL;
 918                mr_table->vifs[i].rif = NULL;
 919        }
 920}
 921
 922bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
 923{
 924        int i;
 925
 926        for (i = 0; i < MAXVIFS; i++)
 927                if (mr_table->vifs[i].dev)
 928                        return false;
 929        return list_empty(&mr_table->route_list);
 930}
 931
 932static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
 933                                           struct mlxsw_sp_mr_route *mr_route)
 934{
 935        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 936        u64 packets, bytes;
 937
 938        if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
 939                return;
 940
 941        mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
 942                                &bytes);
 943
 944        switch (mr_route->mr_table->proto) {
 945        case MLXSW_SP_L3_PROTO_IPV4:
 946                if (mr_route->mfc4->mfc_un.res.pkt != packets)
 947                        mr_route->mfc4->mfc_un.res.lastuse = jiffies;
 948                mr_route->mfc4->mfc_un.res.pkt = packets;
 949                mr_route->mfc4->mfc_un.res.bytes = bytes;
 950                break;
 951        case MLXSW_SP_L3_PROTO_IPV6:
 952                /* fall through */
 953        default:
 954                WARN_ON_ONCE(1);
 955        }
 956}
 957
 958static void mlxsw_sp_mr_stats_update(struct work_struct *work)
 959{
 960        struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
 961                                              stats_update_dw.work);
 962        struct mlxsw_sp_mr_table *mr_table;
 963        struct mlxsw_sp_mr_route *mr_route;
 964        unsigned long interval;
 965
 966        rtnl_lock();
 967        list_for_each_entry(mr_table, &mr->table_list, node)
 968                list_for_each_entry(mr_route, &mr_table->route_list, node)
 969                        mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
 970                                                       mr_route);
 971        rtnl_unlock();
 972
 973        interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
 974        mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
 975}
 976
 977int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
 978                     const struct mlxsw_sp_mr_ops *mr_ops)
 979{
 980        struct mlxsw_sp_mr *mr;
 981        unsigned long interval;
 982        int err;
 983
 984        mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
 985        if (!mr)
 986                return -ENOMEM;
 987        mr->mr_ops = mr_ops;
 988        mlxsw_sp->mr = mr;
 989        INIT_LIST_HEAD(&mr->table_list);
 990
 991        err = mr_ops->init(mlxsw_sp, mr->priv);
 992        if (err)
 993                goto err;
 994
 995        /* Create the delayed work for counter updates */
 996        INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
 997        interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
 998        mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
 999        return 0;
1000err:
1001        kfree(mr);
1002        return err;
1003}
1004
1005void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
1006{
1007        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
1008
1009        cancel_delayed_work_sync(&mr->stats_update_dw);
1010        mr->mr_ops->fini(mr->priv);
1011        kfree(mr);
1012}
1013