linux/drivers/net/ethernet/mellanox/mlx4/mcg.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/string.h>
  35#include <linux/etherdevice.h>
  36
  37#include <linux/mlx4/cmd.h>
  38#include <linux/mlx4/qp.h>
  39#include <linux/export.h>
  40
  41#include "mlx4.h"
  42
  43int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
  44{
  45        return 1 << dev->oper_log_mgm_entry_size;
  46}
  47
  48int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
  49{
  50        return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
  51}
  52
  53static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
  54                                        struct mlx4_cmd_mailbox *mailbox,
  55                                        u32 size,
  56                                        u64 *reg_id)
  57{
  58        u64 imm;
  59        int err = 0;
  60
  61        err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
  62                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  63                           MLX4_CMD_NATIVE);
  64        if (err)
  65                return err;
  66        *reg_id = imm;
  67
  68        return err;
  69}
  70
  71static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
  72{
  73        int err = 0;
  74
  75        err = mlx4_cmd(dev, regid, 0, 0,
  76                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  77                       MLX4_CMD_NATIVE);
  78
  79        return err;
  80}
  81
  82static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
  83                           struct mlx4_cmd_mailbox *mailbox)
  84{
  85        return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
  86                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  87}
  88
  89static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
  90                            struct mlx4_cmd_mailbox *mailbox)
  91{
  92        return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
  93                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  94}
  95
  96static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
  97                              struct mlx4_cmd_mailbox *mailbox)
  98{
  99        u32 in_mod;
 100
 101        in_mod = (u32) port << 16 | steer << 1;
 102        return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
 103                        MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
 104                        MLX4_CMD_NATIVE);
 105}
 106
 107static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 108                         u16 *hash, u8 op_mod)
 109{
 110        u64 imm;
 111        int err;
 112
 113        err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
 114                           MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
 115                           MLX4_CMD_NATIVE);
 116
 117        if (!err)
 118                *hash = imm;
 119
 120        return err;
 121}
 122
 123static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
 124                                              enum mlx4_steer_type steer,
 125                                              u32 qpn)
 126{
 127        struct mlx4_steer *s_steer;
 128        struct mlx4_promisc_qp *pqp;
 129
 130        if (port < 1 || port > dev->caps.num_ports)
 131                return NULL;
 132
 133        s_steer = &mlx4_priv(dev)->steer[port - 1];
 134
 135        list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
 136                if (pqp->qpn == qpn)
 137                        return pqp;
 138        }
 139        /* not found */
 140        return NULL;
 141}
 142
 143/*
 144 * Add new entry to steering data structure.
 145 * All promisc QPs should be added as well
 146 */
 147static int new_steering_entry(struct mlx4_dev *dev, u8 port,
 148                              enum mlx4_steer_type steer,
 149                              unsigned int index, u32 qpn)
 150{
 151        struct mlx4_steer *s_steer;
 152        struct mlx4_cmd_mailbox *mailbox;
 153        struct mlx4_mgm *mgm;
 154        u32 members_count;
 155        struct mlx4_steer_index *new_entry;
 156        struct mlx4_promisc_qp *pqp;
 157        struct mlx4_promisc_qp *dqp = NULL;
 158        u32 prot;
 159        int err;
 160
 161        if (port < 1 || port > dev->caps.num_ports)
 162                return -EINVAL;
 163
 164        s_steer = &mlx4_priv(dev)->steer[port - 1];
 165        new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
 166        if (!new_entry)
 167                return -ENOMEM;
 168
 169        INIT_LIST_HEAD(&new_entry->duplicates);
 170        new_entry->index = index;
 171        list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
 172
 173        /* If the given qpn is also a promisc qp,
 174         * it should be inserted to duplicates list
 175         */
 176        pqp = get_promisc_qp(dev, port, steer, qpn);
 177        if (pqp) {
 178                dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
 179                if (!dqp) {
 180                        err = -ENOMEM;
 181                        goto out_alloc;
 182                }
 183                dqp->qpn = qpn;
 184                list_add_tail(&dqp->list, &new_entry->duplicates);
 185        }
 186
 187        /* if no promisc qps for this vep, we are done */
 188        if (list_empty(&s_steer->promisc_qps[steer]))
 189                return 0;
 190
 191        /* now need to add all the promisc qps to the new
 192         * steering entry, as they should also receive the packets
 193         * destined to this address */
 194        mailbox = mlx4_alloc_cmd_mailbox(dev);
 195        if (IS_ERR(mailbox)) {
 196                err = -ENOMEM;
 197                goto out_alloc;
 198        }
 199        mgm = mailbox->buf;
 200
 201        err = mlx4_READ_ENTRY(dev, index, mailbox);
 202        if (err)
 203                goto out_mailbox;
 204
 205        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
 206        prot = be32_to_cpu(mgm->members_count) >> 30;
 207        list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
 208                /* don't add already existing qpn */
 209                if (pqp->qpn == qpn)
 210                        continue;
 211                if (members_count == dev->caps.num_qp_per_mgm) {
 212                        /* out of space */
 213                        err = -ENOMEM;
 214                        goto out_mailbox;
 215                }
 216
 217                /* add the qpn */
 218                mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
 219        }
 220        /* update the qps count and update the entry with all the promisc qps*/
 221        mgm->members_count = cpu_to_be32(members_count | (prot << 30));
 222        err = mlx4_WRITE_ENTRY(dev, index, mailbox);
 223
 224out_mailbox:
 225        mlx4_free_cmd_mailbox(dev, mailbox);
 226        if (!err)
 227                return 0;
 228out_alloc:
 229        if (dqp) {
 230                list_del(&dqp->list);
 231                kfree(dqp);
 232        }
 233        list_del(&new_entry->list);
 234        kfree(new_entry);
 235        return err;
 236}
 237
 238/* update the data structures with existing steering entry */
 239static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
 240                                   enum mlx4_steer_type steer,
 241                                   unsigned int index, u32 qpn)
 242{
 243        struct mlx4_steer *s_steer;
 244        struct mlx4_steer_index *tmp_entry, *entry = NULL;
 245        struct mlx4_promisc_qp *pqp;
 246        struct mlx4_promisc_qp *dqp;
 247
 248        if (port < 1 || port > dev->caps.num_ports)
 249                return -EINVAL;
 250
 251        s_steer = &mlx4_priv(dev)->steer[port - 1];
 252
 253        pqp = get_promisc_qp(dev, port, steer, qpn);
 254        if (!pqp)
 255                return 0; /* nothing to do */
 256
 257        list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
 258                if (tmp_entry->index == index) {
 259                        entry = tmp_entry;
 260                        break;
 261                }
 262        }
 263        if (unlikely(!entry)) {
 264                mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
 265                return -EINVAL;
 266        }
 267
 268        /* the given qpn is listed as a promisc qpn
 269         * we need to add it as a duplicate to this entry
 270         * for future references */
 271        list_for_each_entry(dqp, &entry->duplicates, list) {
 272                if (qpn == dqp->qpn)
 273                        return 0; /* qp is already duplicated */
 274        }
 275
 276        /* add the qp as a duplicate on this index */
 277        dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
 278        if (!dqp)
 279                return -ENOMEM;
 280        dqp->qpn = qpn;
 281        list_add_tail(&dqp->list, &entry->duplicates);
 282
 283        return 0;
 284}
 285
 286/* Check whether a qpn is a duplicate on steering entry
 287 * If so, it should not be removed from mgm */
 288static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
 289                                  enum mlx4_steer_type steer,
 290                                  unsigned int index, u32 qpn)
 291{
 292        struct mlx4_steer *s_steer;
 293        struct mlx4_steer_index *tmp_entry, *entry = NULL;
 294        struct mlx4_promisc_qp *dqp, *tmp_dqp;
 295
 296        if (port < 1 || port > dev->caps.num_ports)
 297                return NULL;
 298
 299        s_steer = &mlx4_priv(dev)->steer[port - 1];
 300
 301        /* if qp is not promisc, it cannot be duplicated */
 302        if (!get_promisc_qp(dev, port, steer, qpn))
 303                return false;
 304
 305        /* The qp is promisc qp so it is a duplicate on this index
 306         * Find the index entry, and remove the duplicate */
 307        list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
 308                if (tmp_entry->index == index) {
 309                        entry = tmp_entry;
 310                        break;
 311                }
 312        }
 313        if (unlikely(!entry)) {
 314                mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
 315                return false;
 316        }
 317        list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
 318                if (dqp->qpn == qpn) {
 319                        list_del(&dqp->list);
 320                        kfree(dqp);
 321                }
 322        }
 323        return true;
 324}
 325
 326/* Returns true if all the QPs != tqpn contained in this entry
 327 * are Promisc QPs. Returns false otherwise.
 328 */
 329static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
 330                                   enum mlx4_steer_type steer,
 331                                   unsigned int index, u32 tqpn,
 332                                   u32 *members_count)
 333{
 334        struct mlx4_cmd_mailbox *mailbox;
 335        struct mlx4_mgm *mgm;
 336        u32 m_count;
 337        bool ret = false;
 338        int i;
 339
 340        if (port < 1 || port > dev->caps.num_ports)
 341                return false;
 342
 343        mailbox = mlx4_alloc_cmd_mailbox(dev);
 344        if (IS_ERR(mailbox))
 345                return false;
 346        mgm = mailbox->buf;
 347
 348        if (mlx4_READ_ENTRY(dev, index, mailbox))
 349                goto out;
 350        m_count = be32_to_cpu(mgm->members_count) & 0xffffff;
 351        if (members_count)
 352                *members_count = m_count;
 353
 354        for (i = 0;  i < m_count; i++) {
 355                u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
 356                if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
 357                        /* the qp is not promisc, the entry can't be removed */
 358                        goto out;
 359                }
 360        }
 361        ret = true;
 362out:
 363        mlx4_free_cmd_mailbox(dev, mailbox);
 364        return ret;
 365}
 366
 367/* IF a steering entry contains only promisc QPs, it can be removed. */
 368static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
 369                                      enum mlx4_steer_type steer,
 370                                      unsigned int index, u32 tqpn)
 371{
 372        struct mlx4_steer *s_steer;
 373        struct mlx4_steer_index *entry = NULL, *tmp_entry;
 374        u32 members_count;
 375        bool ret = false;
 376
 377        if (port < 1 || port > dev->caps.num_ports)
 378                return NULL;
 379
 380        s_steer = &mlx4_priv(dev)->steer[port - 1];
 381
 382        if (!promisc_steering_entry(dev, port, steer, index,
 383                                    tqpn, &members_count))
 384                goto out;
 385
 386        /* All the qps currently registered for this entry are promiscuous,
 387          * Checking for duplicates */
 388        ret = true;
 389        list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
 390                if (entry->index == index) {
 391                        if (list_empty(&entry->duplicates) ||
 392                            members_count == 1) {
 393                                struct mlx4_promisc_qp *pqp, *tmp_pqp;
 394                                /* If there is only 1 entry in duplicates then
 395                                 * this is the QP we want to delete, going over
 396                                 * the list and deleting the entry.
 397                                 */
 398                                list_del(&entry->list);
 399                                list_for_each_entry_safe(pqp, tmp_pqp,
 400                                                         &entry->duplicates,
 401                                                         list) {
 402                                        list_del(&pqp->list);
 403                                        kfree(pqp);
 404                                }
 405                                kfree(entry);
 406                        } else {
 407                                /* This entry contains duplicates so it shouldn't be removed */
 408                                ret = false;
 409                                goto out;
 410                        }
 411                }
 412        }
 413
 414out:
 415        return ret;
 416}
 417
 418static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
 419                          enum mlx4_steer_type steer, u32 qpn)
 420{
 421        struct mlx4_steer *s_steer;
 422        struct mlx4_cmd_mailbox *mailbox;
 423        struct mlx4_mgm *mgm;
 424        struct mlx4_steer_index *entry;
 425        struct mlx4_promisc_qp *pqp;
 426        struct mlx4_promisc_qp *dqp;
 427        u32 members_count;
 428        u32 prot;
 429        int i;
 430        bool found;
 431        int err;
 432        struct mlx4_priv *priv = mlx4_priv(dev);
 433
 434        if (port < 1 || port > dev->caps.num_ports)
 435                return -EINVAL;
 436
 437        s_steer = &mlx4_priv(dev)->steer[port - 1];
 438
 439        mutex_lock(&priv->mcg_table.mutex);
 440
 441        if (get_promisc_qp(dev, port, steer, qpn)) {
 442                err = 0;  /* Noting to do, already exists */
 443                goto out_mutex;
 444        }
 445
 446        pqp = kmalloc(sizeof(*pqp), GFP_KERNEL);
 447        if (!pqp) {
 448                err = -ENOMEM;
 449                goto out_mutex;
 450        }
 451        pqp->qpn = qpn;
 452
 453        mailbox = mlx4_alloc_cmd_mailbox(dev);
 454        if (IS_ERR(mailbox)) {
 455                err = -ENOMEM;
 456                goto out_alloc;
 457        }
 458        mgm = mailbox->buf;
 459
 460        if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
 461                /* The promisc QP needs to be added for each one of the steering
 462                 * entries. If it already exists, needs to be added as
 463                 * a duplicate for this entry.
 464                 */
 465                list_for_each_entry(entry,
 466                                    &s_steer->steer_entries[steer],
 467                                    list) {
 468                        err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
 469                        if (err)
 470                                goto out_mailbox;
 471
 472                        members_count = be32_to_cpu(mgm->members_count) &
 473                                        0xffffff;
 474                        prot = be32_to_cpu(mgm->members_count) >> 30;
 475                        found = false;
 476                        for (i = 0; i < members_count; i++) {
 477                                if ((be32_to_cpu(mgm->qp[i]) &
 478                                     MGM_QPN_MASK) == qpn) {
 479                                        /* Entry already exists.
 480                                         * Add to duplicates.
 481                                         */
 482                                        dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
 483                                        if (!dqp) {
 484                                                err = -ENOMEM;
 485                                                goto out_mailbox;
 486                                        }
 487                                        dqp->qpn = qpn;
 488                                        list_add_tail(&dqp->list,
 489                                                      &entry->duplicates);
 490                                        found = true;
 491                                }
 492                        }
 493                        if (!found) {
 494                                /* Need to add the qpn to mgm */
 495                                if (members_count ==
 496                                    dev->caps.num_qp_per_mgm) {
 497                                        /* entry is full */
 498                                        err = -ENOMEM;
 499                                        goto out_mailbox;
 500                                }
 501                                mgm->qp[members_count++] =
 502                                        cpu_to_be32(qpn & MGM_QPN_MASK);
 503                                mgm->members_count =
 504                                        cpu_to_be32(members_count |
 505                                                    (prot << 30));
 506                                err = mlx4_WRITE_ENTRY(dev, entry->index,
 507                                                       mailbox);
 508                                if (err)
 509                                        goto out_mailbox;
 510                        }
 511                }
 512        }
 513
 514        /* add the new qpn to list of promisc qps */
 515        list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
 516        /* now need to add all the promisc qps to default entry */
 517        memset(mgm, 0, sizeof(*mgm));
 518        members_count = 0;
 519        list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) {
 520                if (members_count == dev->caps.num_qp_per_mgm) {
 521                        /* entry is full */
 522                        err = -ENOMEM;
 523                        goto out_list;
 524                }
 525                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
 526        }
 527        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 528
 529        err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
 530        if (err)
 531                goto out_list;
 532
 533        mlx4_free_cmd_mailbox(dev, mailbox);
 534        mutex_unlock(&priv->mcg_table.mutex);
 535        return 0;
 536
 537out_list:
 538        list_del(&pqp->list);
 539out_mailbox:
 540        mlx4_free_cmd_mailbox(dev, mailbox);
 541out_alloc:
 542        kfree(pqp);
 543out_mutex:
 544        mutex_unlock(&priv->mcg_table.mutex);
 545        return err;
 546}
 547
 548static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
 549                             enum mlx4_steer_type steer, u32 qpn)
 550{
 551        struct mlx4_priv *priv = mlx4_priv(dev);
 552        struct mlx4_steer *s_steer;
 553        struct mlx4_cmd_mailbox *mailbox;
 554        struct mlx4_mgm *mgm;
 555        struct mlx4_steer_index *entry, *tmp_entry;
 556        struct mlx4_promisc_qp *pqp;
 557        struct mlx4_promisc_qp *dqp;
 558        u32 members_count;
 559        bool found;
 560        bool back_to_list = false;
 561        int i;
 562        int err;
 563
 564        if (port < 1 || port > dev->caps.num_ports)
 565                return -EINVAL;
 566
 567        s_steer = &mlx4_priv(dev)->steer[port - 1];
 568        mutex_lock(&priv->mcg_table.mutex);
 569
 570        pqp = get_promisc_qp(dev, port, steer, qpn);
 571        if (unlikely(!pqp)) {
 572                mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
 573                /* nothing to do */
 574                err = 0;
 575                goto out_mutex;
 576        }
 577
 578        /*remove from list of promisc qps */
 579        list_del(&pqp->list);
 580
 581        /* set the default entry not to include the removed one */
 582        mailbox = mlx4_alloc_cmd_mailbox(dev);
 583        if (IS_ERR(mailbox)) {
 584                err = -ENOMEM;
 585                back_to_list = true;
 586                goto out_list;
 587        }
 588        mgm = mailbox->buf;
 589        members_count = 0;
 590        list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
 591                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
 592        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 593
 594        err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
 595        if (err)
 596                goto out_mailbox;
 597
 598        if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
 599                /* Remove the QP from all the steering entries */
 600                list_for_each_entry_safe(entry, tmp_entry,
 601                                         &s_steer->steer_entries[steer],
 602                                         list) {
 603                        found = false;
 604                        list_for_each_entry(dqp, &entry->duplicates, list) {
 605                                if (dqp->qpn == qpn) {
 606                                        found = true;
 607                                        break;
 608                                }
 609                        }
 610                        if (found) {
 611                                /* A duplicate, no need to change the MGM,
 612                                 * only update the duplicates list
 613                                 */
 614                                list_del(&dqp->list);
 615                                kfree(dqp);
 616                        } else {
 617                                int loc = -1;
 618
 619                                err = mlx4_READ_ENTRY(dev,
 620                                                      entry->index,
 621                                                      mailbox);
 622                                if (err)
 623                                        goto out_mailbox;
 624                                members_count =
 625                                        be32_to_cpu(mgm->members_count) &
 626                                        0xffffff;
 627                                if (!members_count) {
 628                                        mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n",
 629                                                  qpn, entry->index);
 630                                        list_del(&entry->list);
 631                                        kfree(entry);
 632                                        continue;
 633                                }
 634
 635                                for (i = 0; i < members_count; ++i)
 636                                        if ((be32_to_cpu(mgm->qp[i]) &
 637                                             MGM_QPN_MASK) == qpn) {
 638                                                loc = i;
 639                                                break;
 640                                        }
 641
 642                                if (loc < 0) {
 643                                        mlx4_err(dev, "QP %06x wasn't found in entry %d\n",
 644                                                 qpn, entry->index);
 645                                        err = -EINVAL;
 646                                        goto out_mailbox;
 647                                }
 648
 649                                /* Copy the last QP in this MGM
 650                                 * over removed QP
 651                                 */
 652                                mgm->qp[loc] = mgm->qp[members_count - 1];
 653                                mgm->qp[members_count - 1] = 0;
 654                                mgm->members_count =
 655                                        cpu_to_be32(--members_count |
 656                                                    (MLX4_PROT_ETH << 30));
 657
 658                                err = mlx4_WRITE_ENTRY(dev,
 659                                                       entry->index,
 660                                                       mailbox);
 661                                if (err)
 662                                        goto out_mailbox;
 663                        }
 664                }
 665        }
 666
 667out_mailbox:
 668        mlx4_free_cmd_mailbox(dev, mailbox);
 669out_list:
 670        if (back_to_list)
 671                list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
 672        else
 673                kfree(pqp);
 674out_mutex:
 675        mutex_unlock(&priv->mcg_table.mutex);
 676        return err;
 677}
 678
 679/*
 680 * Caller must hold MCG table semaphore.  gid and mgm parameters must
 681 * be properly aligned for command interface.
 682 *
 683 *  Returns 0 unless a firmware command error occurs.
 684 *
 685 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
 686 * and *mgm holds MGM entry.
 687 *
 688 * if GID is found in AMGM, *index = index in AMGM, *prev = index of
 689 * previous entry in hash chain and *mgm holds AMGM entry.
 690 *
 691 * If no AMGM exists for given gid, *index = -1, *prev = index of last
 692 * entry in hash chain and *mgm holds end of hash chain.
 693 */
 694static int find_entry(struct mlx4_dev *dev, u8 port,
 695                      u8 *gid, enum mlx4_protocol prot,
 696                      struct mlx4_cmd_mailbox *mgm_mailbox,
 697                      int *prev, int *index)
 698{
 699        struct mlx4_cmd_mailbox *mailbox;
 700        struct mlx4_mgm *mgm = mgm_mailbox->buf;
 701        u8 *mgid;
 702        int err;
 703        u16 hash;
 704        u8 op_mod = (prot == MLX4_PROT_ETH) ?
 705                !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
 706
 707        mailbox = mlx4_alloc_cmd_mailbox(dev);
 708        if (IS_ERR(mailbox))
 709                return -ENOMEM;
 710        mgid = mailbox->buf;
 711
 712        memcpy(mgid, gid, 16);
 713
 714        err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
 715        mlx4_free_cmd_mailbox(dev, mailbox);
 716        if (err)
 717                return err;
 718
 719        if (0)
 720                mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
 721
 722        *index = hash;
 723        *prev  = -1;
 724
 725        do {
 726                err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
 727                if (err)
 728                        return err;
 729
 730                if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
 731                        if (*index != hash) {
 732                                mlx4_err(dev, "Found zero MGID in AMGM\n");
 733                                err = -EINVAL;
 734                        }
 735                        return err;
 736                }
 737
 738                if (!memcmp(mgm->gid, gid, 16) &&
 739                    be32_to_cpu(mgm->members_count) >> 30 == prot)
 740                        return err;
 741
 742                *prev = *index;
 743                *index = be32_to_cpu(mgm->next_gid_index) >> 6;
 744        } while (*index);
 745
 746        *index = -1;
 747        return err;
 748}
 749
 750static const u8 __promisc_mode[] = {
 751        [MLX4_FS_REGULAR]   = 0x0,
 752        [MLX4_FS_ALL_DEFAULT] = 0x1,
 753        [MLX4_FS_MC_DEFAULT] = 0x3,
 754        [MLX4_FS_MIRROR_RX_PORT] = 0x4,
 755        [MLX4_FS_MIRROR_SX_PORT] = 0x5,
 756        [MLX4_FS_UC_SNIFFER] = 0x6,
 757        [MLX4_FS_MC_SNIFFER] = 0x7,
 758};
 759
 760int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
 761                                    enum mlx4_net_trans_promisc_mode flow_type)
 762{
 763        if (flow_type >= MLX4_FS_MODE_NUM) {
 764                mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
 765                return -EINVAL;
 766        }
 767        return __promisc_mode[flow_type];
 768}
 769EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
 770
 771static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
 772                                  struct mlx4_net_trans_rule_hw_ctrl *hw)
 773{
 774        u8 flags = 0;
 775
 776        flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
 777        flags |= ctrl->exclusive ? (1 << 2) : 0;
 778        flags |= ctrl->allow_loopback ? (1 << 3) : 0;
 779
 780        hw->flags = flags;
 781        hw->type = __promisc_mode[ctrl->promisc_mode];
 782        hw->prio = cpu_to_be16(ctrl->priority);
 783        hw->port = ctrl->port;
 784        hw->qpn = cpu_to_be32(ctrl->qpn);
 785}
 786
 787const u16 __sw_id_hw[] = {
 788        [MLX4_NET_TRANS_RULE_ID_ETH]     = 0xE001,
 789        [MLX4_NET_TRANS_RULE_ID_IB]      = 0xE005,
 790        [MLX4_NET_TRANS_RULE_ID_IPV6]    = 0xE003,
 791        [MLX4_NET_TRANS_RULE_ID_IPV4]    = 0xE002,
 792        [MLX4_NET_TRANS_RULE_ID_TCP]     = 0xE004,
 793        [MLX4_NET_TRANS_RULE_ID_UDP]     = 0xE006,
 794        [MLX4_NET_TRANS_RULE_ID_VXLAN]   = 0xE008
 795};
 796
 797int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
 798                                  enum mlx4_net_trans_rule_id id)
 799{
 800        if (id >= MLX4_NET_TRANS_RULE_NUM) {
 801                mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
 802                return -EINVAL;
 803        }
 804        return __sw_id_hw[id];
 805}
 806EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
 807
 808static const int __rule_hw_sz[] = {
 809        [MLX4_NET_TRANS_RULE_ID_ETH] =
 810                sizeof(struct mlx4_net_trans_rule_hw_eth),
 811        [MLX4_NET_TRANS_RULE_ID_IB] =
 812                sizeof(struct mlx4_net_trans_rule_hw_ib),
 813        [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
 814        [MLX4_NET_TRANS_RULE_ID_IPV4] =
 815                sizeof(struct mlx4_net_trans_rule_hw_ipv4),
 816        [MLX4_NET_TRANS_RULE_ID_TCP] =
 817                sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
 818        [MLX4_NET_TRANS_RULE_ID_UDP] =
 819                sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
 820        [MLX4_NET_TRANS_RULE_ID_VXLAN] =
 821                sizeof(struct mlx4_net_trans_rule_hw_vxlan)
 822};
 823
 824int mlx4_hw_rule_sz(struct mlx4_dev *dev,
 825               enum mlx4_net_trans_rule_id id)
 826{
 827        if (id >= MLX4_NET_TRANS_RULE_NUM) {
 828                mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
 829                return -EINVAL;
 830        }
 831
 832        return __rule_hw_sz[id];
 833}
 834EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
 835
 836static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
 837                            struct _rule_hw *rule_hw)
 838{
 839        if (mlx4_hw_rule_sz(dev, spec->id) < 0)
 840                return -EINVAL;
 841        memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
 842        rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
 843        rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
 844
 845        switch (spec->id) {
 846        case MLX4_NET_TRANS_RULE_ID_ETH:
 847                memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
 848                memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
 849                       ETH_ALEN);
 850                memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
 851                memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
 852                       ETH_ALEN);
 853                if (spec->eth.ether_type_enable) {
 854                        rule_hw->eth.ether_type_enable = 1;
 855                        rule_hw->eth.ether_type = spec->eth.ether_type;
 856                }
 857                rule_hw->eth.vlan_tag = spec->eth.vlan_id;
 858                rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
 859                break;
 860
 861        case MLX4_NET_TRANS_RULE_ID_IB:
 862                rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
 863                rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
 864                memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
 865                memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
 866                break;
 867
 868        case MLX4_NET_TRANS_RULE_ID_IPV6:
 869                return -EOPNOTSUPP;
 870
 871        case MLX4_NET_TRANS_RULE_ID_IPV4:
 872                rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
 873                rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
 874                rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
 875                rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
 876                break;
 877
 878        case MLX4_NET_TRANS_RULE_ID_TCP:
 879        case MLX4_NET_TRANS_RULE_ID_UDP:
 880                rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
 881                rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
 882                rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
 883                rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
 884                break;
 885
 886        case MLX4_NET_TRANS_RULE_ID_VXLAN:
 887                rule_hw->vxlan.vni =
 888                        cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8);
 889                rule_hw->vxlan.vni_mask =
 890                        cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8);
 891                break;
 892
 893        default:
 894                return -EINVAL;
 895        }
 896
 897        return __rule_hw_sz[spec->id];
 898}
 899
 900static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
 901                          struct mlx4_net_trans_rule *rule)
 902{
 903#define BUF_SIZE 256
 904        struct mlx4_spec_list *cur;
 905        char buf[BUF_SIZE];
 906        int len = 0;
 907
 908        mlx4_err(dev, "%s", str);
 909        len += snprintf(buf + len, BUF_SIZE - len,
 910                        "port = %d prio = 0x%x qp = 0x%x ",
 911                        rule->port, rule->priority, rule->qpn);
 912
 913        list_for_each_entry(cur, &rule->list, list) {
 914                switch (cur->id) {
 915                case MLX4_NET_TRANS_RULE_ID_ETH:
 916                        len += snprintf(buf + len, BUF_SIZE - len,
 917                                        "dmac = %pM ", &cur->eth.dst_mac);
 918                        if (cur->eth.ether_type)
 919                                len += snprintf(buf + len, BUF_SIZE - len,
 920                                                "ethertype = 0x%x ",
 921                                                be16_to_cpu(cur->eth.ether_type));
 922                        if (cur->eth.vlan_id)
 923                                len += snprintf(buf + len, BUF_SIZE - len,
 924                                                "vlan-id = %d ",
 925                                                be16_to_cpu(cur->eth.vlan_id));
 926                        break;
 927
 928                case MLX4_NET_TRANS_RULE_ID_IPV4:
 929                        if (cur->ipv4.src_ip)
 930                                len += snprintf(buf + len, BUF_SIZE - len,
 931                                                "src-ip = %pI4 ",
 932                                                &cur->ipv4.src_ip);
 933                        if (cur->ipv4.dst_ip)
 934                                len += snprintf(buf + len, BUF_SIZE - len,
 935                                                "dst-ip = %pI4 ",
 936                                                &cur->ipv4.dst_ip);
 937                        break;
 938
 939                case MLX4_NET_TRANS_RULE_ID_TCP:
 940                case MLX4_NET_TRANS_RULE_ID_UDP:
 941                        if (cur->tcp_udp.src_port)
 942                                len += snprintf(buf + len, BUF_SIZE - len,
 943                                                "src-port = %d ",
 944                                                be16_to_cpu(cur->tcp_udp.src_port));
 945                        if (cur->tcp_udp.dst_port)
 946                                len += snprintf(buf + len, BUF_SIZE - len,
 947                                                "dst-port = %d ",
 948                                                be16_to_cpu(cur->tcp_udp.dst_port));
 949                        break;
 950
 951                case MLX4_NET_TRANS_RULE_ID_IB:
 952                        len += snprintf(buf + len, BUF_SIZE - len,
 953                                        "dst-gid = %pI6\n", cur->ib.dst_gid);
 954                        len += snprintf(buf + len, BUF_SIZE - len,
 955                                        "dst-gid-mask = %pI6\n",
 956                                        cur->ib.dst_gid_msk);
 957                        break;
 958
 959                case MLX4_NET_TRANS_RULE_ID_VXLAN:
 960                        len += snprintf(buf + len, BUF_SIZE - len,
 961                                        "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
 962                        break;
 963                case MLX4_NET_TRANS_RULE_ID_IPV6:
 964                        break;
 965
 966                default:
 967                        break;
 968                }
 969        }
 970        len += snprintf(buf + len, BUF_SIZE - len, "\n");
 971        mlx4_err(dev, "%s", buf);
 972
 973        if (len >= BUF_SIZE)
 974                mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
 975}
 976
 977int mlx4_flow_attach(struct mlx4_dev *dev,
 978                     struct mlx4_net_trans_rule *rule, u64 *reg_id)
 979{
 980        struct mlx4_cmd_mailbox *mailbox;
 981        struct mlx4_spec_list *cur;
 982        u32 size = 0;
 983        int ret;
 984
 985        mailbox = mlx4_alloc_cmd_mailbox(dev);
 986        if (IS_ERR(mailbox))
 987                return PTR_ERR(mailbox);
 988
 989        if (!mlx4_qp_lookup(dev, rule->qpn)) {
 990                mlx4_err_rule(dev, "QP doesn't exist\n", rule);
 991                ret = -EINVAL;
 992                goto out;
 993        }
 994
 995        trans_rule_ctrl_to_hw(rule, mailbox->buf);
 996
 997        size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
 998
 999        list_for_each_entry(cur, &rule->list, list) {
1000                ret = parse_trans_rule(dev, cur, mailbox->buf + size);
1001                if (ret < 0)
1002                        goto out;
1003
1004                size += ret;
1005        }
1006
1007        ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
1008        if (ret == -ENOMEM) {
1009                mlx4_err_rule(dev,
1010                              "mcg table is full. Fail to register network rule\n",
1011                              rule);
1012        } else if (ret) {
1013                if (ret == -ENXIO) {
1014                        if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
1015                                mlx4_err_rule(dev,
1016                                              "DMFS is not enabled, "
1017                                              "failed to register network rule.\n",
1018                                              rule);
1019                        else
1020                                mlx4_err_rule(dev,
1021                                              "Rule exceeds the dmfs_high_rate_mode limitations, "
1022                                              "failed to register network rule.\n",
1023                                              rule);
1024
1025                } else {
1026                        mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
1027                }
1028        }
1029
1030out:
1031        mlx4_free_cmd_mailbox(dev, mailbox);
1032
1033        return ret;
1034}
1035EXPORT_SYMBOL_GPL(mlx4_flow_attach);
1036
1037int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
1038{
1039        int err;
1040
1041        err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
1042        if (err)
1043                mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
1044                         reg_id);
1045        return err;
1046}
1047EXPORT_SYMBOL_GPL(mlx4_flow_detach);
1048
1049int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
1050                          int port, int qpn, u16 prio, u64 *reg_id)
1051{
1052        int err;
1053        struct mlx4_spec_list spec_eth_outer = { {NULL} };
1054        struct mlx4_spec_list spec_vxlan     = { {NULL} };
1055        struct mlx4_spec_list spec_eth_inner = { {NULL} };
1056
1057        struct mlx4_net_trans_rule rule = {
1058                .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1059                .exclusive = 0,
1060                .allow_loopback = 1,
1061                .promisc_mode = MLX4_FS_REGULAR,
1062        };
1063
1064        __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1065
1066        rule.port = port;
1067        rule.qpn = qpn;
1068        rule.priority = prio;
1069        INIT_LIST_HEAD(&rule.list);
1070
1071        spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
1072        memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
1073        memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1074
1075        spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN;    /* any vxlan header */
1076        spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH;  /* any inner eth header */
1077
1078        list_add_tail(&spec_eth_outer.list, &rule.list);
1079        list_add_tail(&spec_vxlan.list,     &rule.list);
1080        list_add_tail(&spec_eth_inner.list, &rule.list);
1081
1082        err = mlx4_flow_attach(dev, &rule, reg_id);
1083        return err;
1084}
1085EXPORT_SYMBOL(mlx4_tunnel_steer_add);
1086
1087int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
1088                                      u32 max_range_qpn)
1089{
1090        int err;
1091        u64 in_param;
1092
1093        in_param = ((u64) min_range_qpn) << 32;
1094        in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF;
1095
1096        err = mlx4_cmd(dev, in_param, 0, 0,
1097                        MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1098                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1099
1100        return err;
1101}
1102EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE);
1103
1104int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1105                          int block_mcast_loopback, enum mlx4_protocol prot,
1106                          enum mlx4_steer_type steer)
1107{
1108        struct mlx4_priv *priv = mlx4_priv(dev);
1109        struct mlx4_cmd_mailbox *mailbox;
1110        struct mlx4_mgm *mgm;
1111        u32 members_count;
1112        int index = -1, prev;
1113        int link = 0;
1114        int i;
1115        int err;
1116        u8 port = gid[5];
1117        u8 new_entry = 0;
1118
1119        mailbox = mlx4_alloc_cmd_mailbox(dev);
1120        if (IS_ERR(mailbox))
1121                return PTR_ERR(mailbox);
1122        mgm = mailbox->buf;
1123
1124        mutex_lock(&priv->mcg_table.mutex);
1125        err = find_entry(dev, port, gid, prot,
1126                         mailbox, &prev, &index);
1127        if (err)
1128                goto out;
1129
1130        if (index != -1) {
1131                if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
1132                        new_entry = 1;
1133                        memcpy(mgm->gid, gid, 16);
1134                }
1135        } else {
1136                link = 1;
1137
1138                index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
1139                if (index == -1) {
1140                        mlx4_err(dev, "No AMGM entries left\n");
1141                        err = -ENOMEM;
1142                        goto out;
1143                }
1144                index += dev->caps.num_mgms;
1145
1146                new_entry = 1;
1147                memset(mgm, 0, sizeof(*mgm));
1148                memcpy(mgm->gid, gid, 16);
1149        }
1150
1151        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
1152        if (members_count == dev->caps.num_qp_per_mgm) {
1153                mlx4_err(dev, "MGM at index %x is full\n", index);
1154                err = -ENOMEM;
1155                goto out;
1156        }
1157
1158        for (i = 0; i < members_count; ++i)
1159                if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
1160                        mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
1161                        err = 0;
1162                        goto out;
1163                }
1164
1165        if (block_mcast_loopback)
1166                mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
1167                                                       (1U << MGM_BLCK_LB_BIT));
1168        else
1169                mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
1170
1171        mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
1172
1173        err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1174        if (err)
1175                goto out;
1176
1177        if (!link)
1178                goto out;
1179
1180        err = mlx4_READ_ENTRY(dev, prev, mailbox);
1181        if (err)
1182                goto out;
1183
1184        mgm->next_gid_index = cpu_to_be32(index << 6);
1185
1186        err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
1187        if (err)
1188                goto out;
1189
1190out:
1191        if (prot == MLX4_PROT_ETH && index != -1) {
1192                /* manage the steering entry for promisc mode */
1193                if (new_entry)
1194                        err = new_steering_entry(dev, port, steer,
1195                                                 index, qp->qpn);
1196                else
1197                        err = existing_steering_entry(dev, port, steer,
1198                                                      index, qp->qpn);
1199        }
1200        if (err && link && index != -1) {
1201                if (index < dev->caps.num_mgms)
1202                        mlx4_warn(dev, "Got AMGM index %d < %d\n",
1203                                  index, dev->caps.num_mgms);
1204                else
1205                        mlx4_bitmap_free(&priv->mcg_table.bitmap,
1206                                         index - dev->caps.num_mgms, MLX4_USE_RR);
1207        }
1208        mutex_unlock(&priv->mcg_table.mutex);
1209
1210        mlx4_free_cmd_mailbox(dev, mailbox);
1211        return err;
1212}
1213
1214int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1215                          enum mlx4_protocol prot, enum mlx4_steer_type steer)
1216{
1217        struct mlx4_priv *priv = mlx4_priv(dev);
1218        struct mlx4_cmd_mailbox *mailbox;
1219        struct mlx4_mgm *mgm;
1220        u32 members_count;
1221        int prev, index;
1222        int i, loc = -1;
1223        int err;
1224        u8 port = gid[5];
1225        bool removed_entry = false;
1226
1227        mailbox = mlx4_alloc_cmd_mailbox(dev);
1228        if (IS_ERR(mailbox))
1229                return PTR_ERR(mailbox);
1230        mgm = mailbox->buf;
1231
1232        mutex_lock(&priv->mcg_table.mutex);
1233
1234        err = find_entry(dev, port, gid, prot,
1235                         mailbox, &prev, &index);
1236        if (err)
1237                goto out;
1238
1239        if (index == -1) {
1240                mlx4_err(dev, "MGID %pI6 not found\n", gid);
1241                err = -EINVAL;
1242                goto out;
1243        }
1244
1245        /* If this QP is also a promisc QP, it shouldn't be removed only if
1246         * at least one none promisc QP is also attached to this MCG
1247         */
1248        if (prot == MLX4_PROT_ETH &&
1249            check_duplicate_entry(dev, port, steer, index, qp->qpn) &&
1250            !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL))
1251                        goto out;
1252
1253        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
1254        for (i = 0; i < members_count; ++i)
1255                if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
1256                        loc = i;
1257                        break;
1258                }
1259
1260        if (loc == -1) {
1261                mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
1262                err = -EINVAL;
1263                goto out;
1264        }
1265
1266        /* copy the last QP in this MGM over removed QP */
1267        mgm->qp[loc] = mgm->qp[members_count - 1];
1268        mgm->qp[members_count - 1] = 0;
1269        mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
1270
1271        if (prot == MLX4_PROT_ETH)
1272                removed_entry = can_remove_steering_entry(dev, port, steer,
1273                                                                index, qp->qpn);
1274        if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) {
1275                err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1276                goto out;
1277        }
1278
1279        /* We are going to delete the entry, members count should be 0 */
1280        mgm->members_count = cpu_to_be32((u32) prot << 30);
1281
1282        if (prev == -1) {
1283                /* Remove entry from MGM */
1284                int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1285                if (amgm_index) {
1286                        err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
1287                        if (err)
1288                                goto out;
1289                } else
1290                        memset(mgm->gid, 0, 16);
1291
1292                err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1293                if (err)
1294                        goto out;
1295
1296                if (amgm_index) {
1297                        if (amgm_index < dev->caps.num_mgms)
1298                                mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
1299                                          index, amgm_index, dev->caps.num_mgms);
1300                        else
1301                                mlx4_bitmap_free(&priv->mcg_table.bitmap,
1302                                                 amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
1303                }
1304        } else {
1305                /* Remove entry from AMGM */
1306                int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1307                err = mlx4_READ_ENTRY(dev, prev, mailbox);
1308                if (err)
1309                        goto out;
1310
1311                mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
1312
1313                err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
1314                if (err)
1315                        goto out;
1316
1317                if (index < dev->caps.num_mgms)
1318                        mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
1319                                  prev, index, dev->caps.num_mgms);
1320                else
1321                        mlx4_bitmap_free(&priv->mcg_table.bitmap,
1322                                         index - dev->caps.num_mgms, MLX4_USE_RR);
1323        }
1324
1325out:
1326        mutex_unlock(&priv->mcg_table.mutex);
1327
1328        mlx4_free_cmd_mailbox(dev, mailbox);
1329        if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
1330                /* In case device is under an error, return success as a closing command */
1331                err = 0;
1332        return err;
1333}
1334
1335static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
1336                          u8 gid[16], u8 attach, u8 block_loopback,
1337                          enum mlx4_protocol prot)
1338{
1339        struct mlx4_cmd_mailbox *mailbox;
1340        int err = 0;
1341        int qpn;
1342
1343        if (!mlx4_is_mfunc(dev))
1344                return -EBADF;
1345
1346        mailbox = mlx4_alloc_cmd_mailbox(dev);
1347        if (IS_ERR(mailbox))
1348                return PTR_ERR(mailbox);
1349
1350        memcpy(mailbox->buf, gid, 16);
1351        qpn = qp->qpn;
1352        qpn |= (prot << 28);
1353        if (attach && block_loopback)
1354                qpn |= (1 << 31);
1355
1356        err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
1357                       MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
1358                       MLX4_CMD_WRAPPED);
1359
1360        mlx4_free_cmd_mailbox(dev, mailbox);
1361        if (err && !attach &&
1362            dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
1363                err = 0;
1364        return err;
1365}
1366
1367int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1368                              u8 gid[16], u8 port,
1369                              int block_mcast_loopback,
1370                              enum mlx4_protocol prot, u64 *reg_id)
1371{
1372                struct mlx4_spec_list spec = { {NULL} };
1373                __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1374
1375                struct mlx4_net_trans_rule rule = {
1376                        .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1377                        .exclusive = 0,
1378                        .promisc_mode = MLX4_FS_REGULAR,
1379                        .priority = MLX4_DOMAIN_NIC,
1380                };
1381
1382                rule.allow_loopback = !block_mcast_loopback;
1383                rule.port = port;
1384                rule.qpn = qp->qpn;
1385                INIT_LIST_HEAD(&rule.list);
1386
1387                switch (prot) {
1388                case MLX4_PROT_ETH:
1389                        spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
1390                        memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
1391                        memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1392                        break;
1393
1394                case MLX4_PROT_IB_IPV6:
1395                        spec.id = MLX4_NET_TRANS_RULE_ID_IB;
1396                        memcpy(spec.ib.dst_gid, gid, 16);
1397                        memset(&spec.ib.dst_gid_msk, 0xff, 16);
1398                        break;
1399                default:
1400                        return -EINVAL;
1401                }
1402                list_add_tail(&spec.list, &rule.list);
1403
1404                return mlx4_flow_attach(dev, &rule, reg_id);
1405}
1406
1407int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1408                          u8 port, int block_mcast_loopback,
1409                          enum mlx4_protocol prot, u64 *reg_id)
1410{
1411        switch (dev->caps.steering_mode) {
1412        case MLX4_STEERING_MODE_A0:
1413                if (prot == MLX4_PROT_ETH)
1414                        return 0;
1415                /* fall through */
1416
1417        case MLX4_STEERING_MODE_B0:
1418                if (prot == MLX4_PROT_ETH)
1419                        gid[7] |= (MLX4_MC_STEER << 1);
1420
1421                if (mlx4_is_mfunc(dev))
1422                        return mlx4_QP_ATTACH(dev, qp, gid, 1,
1423                                              block_mcast_loopback, prot);
1424                return mlx4_qp_attach_common(dev, qp, gid,
1425                                             block_mcast_loopback, prot,
1426                                             MLX4_MC_STEER);
1427
1428        case MLX4_STEERING_MODE_DEVICE_MANAGED:
1429                return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
1430                                                 block_mcast_loopback,
1431                                                 prot, reg_id);
1432        default:
1433                return -EINVAL;
1434        }
1435}
1436EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
1437
1438int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1439                          enum mlx4_protocol prot, u64 reg_id)
1440{
1441        switch (dev->caps.steering_mode) {
1442        case MLX4_STEERING_MODE_A0:
1443                if (prot == MLX4_PROT_ETH)
1444                        return 0;
1445                /* fall through */
1446
1447        case MLX4_STEERING_MODE_B0:
1448                if (prot == MLX4_PROT_ETH)
1449                        gid[7] |= (MLX4_MC_STEER << 1);
1450
1451                if (mlx4_is_mfunc(dev))
1452                        return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1453
1454                return mlx4_qp_detach_common(dev, qp, gid, prot,
1455                                             MLX4_MC_STEER);
1456
1457        case MLX4_STEERING_MODE_DEVICE_MANAGED:
1458                return mlx4_flow_detach(dev, reg_id);
1459
1460        default:
1461                return -EINVAL;
1462        }
1463}
1464EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
1465
1466int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1467                                u32 qpn, enum mlx4_net_trans_promisc_mode mode)
1468{
1469        struct mlx4_net_trans_rule rule = {
1470                .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1471                .exclusive = 0,
1472                .allow_loopback = 1,
1473        };
1474
1475        u64 *regid_p;
1476
1477        switch (mode) {
1478        case MLX4_FS_ALL_DEFAULT:
1479                regid_p = &dev->regid_promisc_array[port];
1480                break;
1481        case MLX4_FS_MC_DEFAULT:
1482                regid_p = &dev->regid_allmulti_array[port];
1483                break;
1484        default:
1485                return -1;
1486        }
1487
1488        if (*regid_p != 0)
1489                return -1;
1490
1491        rule.promisc_mode = mode;
1492        rule.port = port;
1493        rule.qpn = qpn;
1494        INIT_LIST_HEAD(&rule.list);
1495        mlx4_err(dev, "going promisc on %x\n", port);
1496
1497        return  mlx4_flow_attach(dev, &rule, regid_p);
1498}
1499EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
1500
1501int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1502                                   enum mlx4_net_trans_promisc_mode mode)
1503{
1504        int ret;
1505        u64 *regid_p;
1506
1507        switch (mode) {
1508        case MLX4_FS_ALL_DEFAULT:
1509                regid_p = &dev->regid_promisc_array[port];
1510                break;
1511        case MLX4_FS_MC_DEFAULT:
1512                regid_p = &dev->regid_allmulti_array[port];
1513                break;
1514        default:
1515                return -1;
1516        }
1517
1518        if (*regid_p == 0)
1519                return -1;
1520
1521        ret =  mlx4_flow_detach(dev, *regid_p);
1522        if (ret == 0)
1523                *regid_p = 0;
1524
1525        return ret;
1526}
1527EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
1528
1529int mlx4_unicast_attach(struct mlx4_dev *dev,
1530                        struct mlx4_qp *qp, u8 gid[16],
1531                        int block_mcast_loopback, enum mlx4_protocol prot)
1532{
1533        if (prot == MLX4_PROT_ETH)
1534                gid[7] |= (MLX4_UC_STEER << 1);
1535
1536        if (mlx4_is_mfunc(dev))
1537                return mlx4_QP_ATTACH(dev, qp, gid, 1,
1538                                        block_mcast_loopback, prot);
1539
1540        return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
1541                                        prot, MLX4_UC_STEER);
1542}
1543EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
1544
1545int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1546                               u8 gid[16], enum mlx4_protocol prot)
1547{
1548        if (prot == MLX4_PROT_ETH)
1549                gid[7] |= (MLX4_UC_STEER << 1);
1550
1551        if (mlx4_is_mfunc(dev))
1552                return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1553
1554        return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
1555}
1556EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
1557
1558int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1559                         struct mlx4_vhcr *vhcr,
1560                         struct mlx4_cmd_mailbox *inbox,
1561                         struct mlx4_cmd_mailbox *outbox,
1562                         struct mlx4_cmd_info *cmd)
1563{
1564        u32 qpn = (u32) vhcr->in_param & 0xffffffff;
1565        int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
1566        enum mlx4_steer_type steer = vhcr->in_modifier;
1567
1568        if (port < 0)
1569                return -EINVAL;
1570
1571        /* Promiscuous unicast is not allowed in mfunc */
1572        if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
1573                return 0;
1574
1575        if (vhcr->op_modifier)
1576                return add_promisc_qp(dev, port, steer, qpn);
1577        else
1578                return remove_promisc_qp(dev, port, steer, qpn);
1579}
1580
1581static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
1582                        enum mlx4_steer_type steer, u8 add, u8 port)
1583{
1584        return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
1585                        MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
1586                        MLX4_CMD_WRAPPED);
1587}
1588
1589int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1590{
1591        if (mlx4_is_mfunc(dev))
1592                return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
1593
1594        return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1595}
1596EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
1597
1598int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1599{
1600        if (mlx4_is_mfunc(dev))
1601                return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
1602
1603        return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1604}
1605EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
1606
1607int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1608{
1609        if (mlx4_is_mfunc(dev))
1610                return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
1611
1612        return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1613}
1614EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
1615
1616int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1617{
1618        if (mlx4_is_mfunc(dev))
1619                return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
1620
1621        return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1622}
1623EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
1624
1625int mlx4_init_mcg_table(struct mlx4_dev *dev)
1626{
1627        struct mlx4_priv *priv = mlx4_priv(dev);
1628        int err;
1629
1630        /* No need for mcg_table when fw managed the mcg table*/
1631        if (dev->caps.steering_mode ==
1632            MLX4_STEERING_MODE_DEVICE_MANAGED)
1633                return 0;
1634        err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1635                               dev->caps.num_amgms - 1, 0, 0);
1636        if (err)
1637                return err;
1638
1639        mutex_init(&priv->mcg_table.mutex);
1640
1641        return 0;
1642}
1643
1644void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
1645{
1646        if (dev->caps.steering_mode !=
1647            MLX4_STEERING_MODE_DEVICE_MANAGED)
1648                mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
1649}
1650