linux/drivers/infiniband/hw/mlx4/main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/module.h>
  35#include <linux/init.h>
  36#include <linux/slab.h>
  37#include <linux/errno.h>
  38#include <linux/netdevice.h>
  39#include <linux/inetdevice.h>
  40#include <linux/rtnetlink.h>
  41#include <linux/if_vlan.h>
  42#include <linux/sched/mm.h>
  43#include <linux/sched/task.h>
  44
  45#include <net/ipv6.h>
  46#include <net/addrconf.h>
  47#include <net/devlink.h>
  48
  49#include <rdma/ib_smi.h>
  50#include <rdma/ib_user_verbs.h>
  51#include <rdma/ib_addr.h>
  52#include <rdma/ib_cache.h>
  53
  54#include <net/bonding.h>
  55
  56#include <linux/mlx4/driver.h>
  57#include <linux/mlx4/cmd.h>
  58#include <linux/mlx4/qp.h>
  59
  60#include "mlx4_ib.h"
  61#include <rdma/mlx4-abi.h>
  62
  63#define DRV_NAME        MLX4_IB_DRV_NAME
  64#define DRV_VERSION     "4.0-0"
  65
  66#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
  67#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
  68#define MLX4_IB_CARD_REV_A0   0xA0
  69
  70MODULE_AUTHOR("Roland Dreier");
  71MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
  72MODULE_LICENSE("Dual BSD/GPL");
  73
  74int mlx4_ib_sm_guid_assign = 0;
  75module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
  76MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
  77
  78static const char mlx4_ib_version[] =
  79        DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
  80        DRV_VERSION "\n";
  81
  82static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
  83static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
  84                                                    u8 port_num);
  85
  86static struct workqueue_struct *wq;
  87
  88static void init_query_mad(struct ib_smp *mad)
  89{
  90        mad->base_version  = 1;
  91        mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  92        mad->class_version = 1;
  93        mad->method        = IB_MGMT_METHOD_GET;
  94}
  95
  96static int check_flow_steering_support(struct mlx4_dev *dev)
  97{
  98        int eth_num_ports = 0;
  99        int ib_num_ports = 0;
 100
 101        int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
 102
 103        if (dmfs) {
 104                int i;
 105                mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
 106                        eth_num_ports++;
 107                mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
 108                        ib_num_ports++;
 109                dmfs &= (!ib_num_ports ||
 110                         (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
 111                        (!eth_num_ports ||
 112                         (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
 113                if (ib_num_ports && mlx4_is_mfunc(dev)) {
 114                        pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
 115                        dmfs = 0;
 116                }
 117        }
 118        return dmfs;
 119}
 120
 121static int num_ib_ports(struct mlx4_dev *dev)
 122{
 123        int ib_ports = 0;
 124        int i;
 125
 126        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
 127                ib_ports++;
 128
 129        return ib_ports;
 130}
 131
 132static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
 133{
 134        struct mlx4_ib_dev *ibdev = to_mdev(device);
 135        struct net_device *dev;
 136
 137        rcu_read_lock();
 138        dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
 139
 140        if (dev) {
 141                if (mlx4_is_bonded(ibdev->dev)) {
 142                        struct net_device *upper = NULL;
 143
 144                        upper = netdev_master_upper_dev_get_rcu(dev);
 145                        if (upper) {
 146                                struct net_device *active;
 147
 148                                active = bond_option_active_slave_get_rcu(netdev_priv(upper));
 149                                if (active)
 150                                        dev = active;
 151                        }
 152                }
 153        }
 154        if (dev)
 155                dev_hold(dev);
 156
 157        rcu_read_unlock();
 158        return dev;
 159}
 160
 161static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
 162                                  struct mlx4_ib_dev *ibdev,
 163                                  u8 port_num)
 164{
 165        struct mlx4_cmd_mailbox *mailbox;
 166        int err;
 167        struct mlx4_dev *dev = ibdev->dev;
 168        int i;
 169        union ib_gid *gid_tbl;
 170
 171        mailbox = mlx4_alloc_cmd_mailbox(dev);
 172        if (IS_ERR(mailbox))
 173                return -ENOMEM;
 174
 175        gid_tbl = mailbox->buf;
 176
 177        for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
 178                memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
 179
 180        err = mlx4_cmd(dev, mailbox->dma,
 181                       MLX4_SET_PORT_GID_TABLE << 8 | port_num,
 182                       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
 183                       MLX4_CMD_WRAPPED);
 184        if (mlx4_is_bonded(dev))
 185                err += mlx4_cmd(dev, mailbox->dma,
 186                                MLX4_SET_PORT_GID_TABLE << 8 | 2,
 187                                1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
 188                                MLX4_CMD_WRAPPED);
 189
 190        mlx4_free_cmd_mailbox(dev, mailbox);
 191        return err;
 192}
 193
 194static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
 195                                     struct mlx4_ib_dev *ibdev,
 196                                     u8 port_num)
 197{
 198        struct mlx4_cmd_mailbox *mailbox;
 199        int err;
 200        struct mlx4_dev *dev = ibdev->dev;
 201        int i;
 202        struct {
 203                union ib_gid    gid;
 204                __be32          rsrvd1[2];
 205                __be16          rsrvd2;
 206                u8              type;
 207                u8              version;
 208                __be32          rsrvd3;
 209        } *gid_tbl;
 210
 211        mailbox = mlx4_alloc_cmd_mailbox(dev);
 212        if (IS_ERR(mailbox))
 213                return -ENOMEM;
 214
 215        gid_tbl = mailbox->buf;
 216        for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
 217                memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
 218                if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
 219                        gid_tbl[i].version = 2;
 220                        if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
 221                                gid_tbl[i].type = 1;
 222                }
 223        }
 224
 225        err = mlx4_cmd(dev, mailbox->dma,
 226                       MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
 227                       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
 228                       MLX4_CMD_WRAPPED);
 229        if (mlx4_is_bonded(dev))
 230                err += mlx4_cmd(dev, mailbox->dma,
 231                                MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
 232                                1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
 233                                MLX4_CMD_WRAPPED);
 234
 235        mlx4_free_cmd_mailbox(dev, mailbox);
 236        return err;
 237}
 238
 239static int mlx4_ib_update_gids(struct gid_entry *gids,
 240                               struct mlx4_ib_dev *ibdev,
 241                               u8 port_num)
 242{
 243        if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
 244                return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
 245
 246        return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
 247}
 248
 249static void free_gid_entry(struct gid_entry *entry)
 250{
 251        memset(&entry->gid, 0, sizeof(entry->gid));
 252        kfree(entry->ctx);
 253        entry->ctx = NULL;
 254}
 255
 256static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
 257{
 258        struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
 259        struct mlx4_ib_iboe *iboe = &ibdev->iboe;
 260        struct mlx4_port_gid_table   *port_gid_table;
 261        int free = -1, found = -1;
 262        int ret = 0;
 263        int hw_update = 0;
 264        int i;
 265        struct gid_entry *gids = NULL;
 266        u16 vlan_id = 0xffff;
 267        u8 mac[ETH_ALEN];
 268
 269        if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
 270                return -EINVAL;
 271
 272        if (attr->port_num > MLX4_MAX_PORTS)
 273                return -EINVAL;
 274
 275        if (!context)
 276                return -EINVAL;
 277
 278        ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
 279        if (ret)
 280                return ret;
 281        port_gid_table = &iboe->gids[attr->port_num - 1];
 282        spin_lock_bh(&iboe->lock);
 283        for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
 284                if (!memcmp(&port_gid_table->gids[i].gid,
 285                            &attr->gid, sizeof(attr->gid)) &&
 286                    port_gid_table->gids[i].gid_type == attr->gid_type &&
 287                    port_gid_table->gids[i].vlan_id == vlan_id)  {
 288                        found = i;
 289                        break;
 290                }
 291                if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
 292                        free = i; /* HW has space */
 293        }
 294
 295        if (found < 0) {
 296                if (free < 0) {
 297                        ret = -ENOSPC;
 298                } else {
 299                        port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
 300                        if (!port_gid_table->gids[free].ctx) {
 301                                ret = -ENOMEM;
 302                        } else {
 303                                *context = port_gid_table->gids[free].ctx;
 304                                memcpy(&port_gid_table->gids[free].gid,
 305                                       &attr->gid, sizeof(attr->gid));
 306                                port_gid_table->gids[free].gid_type = attr->gid_type;
 307                                port_gid_table->gids[free].vlan_id = vlan_id;
 308                                port_gid_table->gids[free].ctx->real_index = free;
 309                                port_gid_table->gids[free].ctx->refcount = 1;
 310                                hw_update = 1;
 311                        }
 312                }
 313        } else {
 314                struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
 315                *context = ctx;
 316                ctx->refcount++;
 317        }
 318        if (!ret && hw_update) {
 319                gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
 320                                     GFP_ATOMIC);
 321                if (!gids) {
 322                        ret = -ENOMEM;
 323                        *context = NULL;
 324                        free_gid_entry(&port_gid_table->gids[free]);
 325                } else {
 326                        for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
 327                                memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
 328                                gids[i].gid_type = port_gid_table->gids[i].gid_type;
 329                        }
 330                }
 331        }
 332        spin_unlock_bh(&iboe->lock);
 333
 334        if (!ret && hw_update) {
 335                ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
 336                if (ret) {
 337                        spin_lock_bh(&iboe->lock);
 338                        *context = NULL;
 339                        free_gid_entry(&port_gid_table->gids[free]);
 340                        spin_unlock_bh(&iboe->lock);
 341                }
 342                kfree(gids);
 343        }
 344
 345        return ret;
 346}
 347
 348static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
 349{
 350        struct gid_cache_context *ctx = *context;
 351        struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
 352        struct mlx4_ib_iboe *iboe = &ibdev->iboe;
 353        struct mlx4_port_gid_table   *port_gid_table;
 354        int ret = 0;
 355        int hw_update = 0;
 356        struct gid_entry *gids = NULL;
 357
 358        if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
 359                return -EINVAL;
 360
 361        if (attr->port_num > MLX4_MAX_PORTS)
 362                return -EINVAL;
 363
 364        port_gid_table = &iboe->gids[attr->port_num - 1];
 365        spin_lock_bh(&iboe->lock);
 366        if (ctx) {
 367                ctx->refcount--;
 368                if (!ctx->refcount) {
 369                        unsigned int real_index = ctx->real_index;
 370
 371                        free_gid_entry(&port_gid_table->gids[real_index]);
 372                        hw_update = 1;
 373                }
 374        }
 375        if (!ret && hw_update) {
 376                int i;
 377
 378                gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
 379                                     GFP_ATOMIC);
 380                if (!gids) {
 381                        ret = -ENOMEM;
 382                } else {
 383                        for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
 384                                memcpy(&gids[i].gid,
 385                                       &port_gid_table->gids[i].gid,
 386                                       sizeof(union ib_gid));
 387                                gids[i].gid_type =
 388                                    port_gid_table->gids[i].gid_type;
 389                        }
 390                }
 391        }
 392        spin_unlock_bh(&iboe->lock);
 393
 394        if (!ret && hw_update) {
 395                ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
 396                kfree(gids);
 397        }
 398        return ret;
 399}
 400
 401int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
 402                                    const struct ib_gid_attr *attr)
 403{
 404        struct mlx4_ib_iboe *iboe = &ibdev->iboe;
 405        struct gid_cache_context *ctx = NULL;
 406        struct mlx4_port_gid_table   *port_gid_table;
 407        int real_index = -EINVAL;
 408        int i;
 409        unsigned long flags;
 410        u8 port_num = attr->port_num;
 411
 412        if (port_num > MLX4_MAX_PORTS)
 413                return -EINVAL;
 414
 415        if (mlx4_is_bonded(ibdev->dev))
 416                port_num = 1;
 417
 418        if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
 419                return attr->index;
 420
 421        spin_lock_irqsave(&iboe->lock, flags);
 422        port_gid_table = &iboe->gids[port_num - 1];
 423
 424        for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
 425                if (!memcmp(&port_gid_table->gids[i].gid,
 426                            &attr->gid, sizeof(attr->gid)) &&
 427                    attr->gid_type == port_gid_table->gids[i].gid_type) {
 428                        ctx = port_gid_table->gids[i].ctx;
 429                        break;
 430                }
 431        if (ctx)
 432                real_index = ctx->real_index;
 433        spin_unlock_irqrestore(&iboe->lock, flags);
 434        return real_index;
 435}
 436
 437static int mlx4_ib_query_device(struct ib_device *ibdev,
 438                                struct ib_device_attr *props,
 439                                struct ib_udata *uhw)
 440{
 441        struct mlx4_ib_dev *dev = to_mdev(ibdev);
 442        struct ib_smp *in_mad  = NULL;
 443        struct ib_smp *out_mad = NULL;
 444        int err;
 445        int have_ib_ports;
 446        struct mlx4_uverbs_ex_query_device cmd;
 447        struct mlx4_uverbs_ex_query_device_resp resp = {};
 448        struct mlx4_clock_params clock_params;
 449
 450        if (uhw->inlen) {
 451                if (uhw->inlen < sizeof(cmd))
 452                        return -EINVAL;
 453
 454                err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
 455                if (err)
 456                        return err;
 457
 458                if (cmd.comp_mask)
 459                        return -EINVAL;
 460
 461                if (cmd.reserved)
 462                        return -EINVAL;
 463        }
 464
 465        resp.response_length = offsetof(typeof(resp), response_length) +
 466                sizeof(resp.response_length);
 467        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 468        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 469        err = -ENOMEM;
 470        if (!in_mad || !out_mad)
 471                goto out;
 472
 473        init_query_mad(in_mad);
 474        in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
 475
 476        err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
 477                           1, NULL, NULL, in_mad, out_mad);
 478        if (err)
 479                goto out;
 480
 481        memset(props, 0, sizeof *props);
 482
 483        have_ib_ports = num_ib_ports(dev->dev);
 484
 485        props->fw_ver = dev->dev->caps.fw_ver;
 486        props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
 487                IB_DEVICE_PORT_ACTIVE_EVENT             |
 488                IB_DEVICE_SYS_IMAGE_GUID                |
 489                IB_DEVICE_RC_RNR_NAK_GEN                |
 490                IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
 491        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
 492                props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
 493        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
 494                props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
 495        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
 496                props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
 497        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
 498                props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
 499        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
 500                props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
 501        if (dev->dev->caps.max_gso_sz &&
 502            (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
 503            (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
 504                props->device_cap_flags |= IB_DEVICE_UD_TSO;
 505        if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
 506                props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
 507        if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
 508            (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
 509            (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
 510                props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
 511        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
 512                props->device_cap_flags |= IB_DEVICE_XRC;
 513        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
 514                props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
 515        if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
 516                if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
 517                        props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
 518                else
 519                        props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
 520        }
 521        if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
 522                props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
 523
 524        props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
 525
 526        props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
 527                0xffffff;
 528        props->vendor_part_id      = dev->dev->persist->pdev->device;
 529        props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
 530        memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
 531
 532        props->max_mr_size         = ~0ull;
 533        props->page_size_cap       = dev->dev->caps.page_size_cap;
 534        props->max_qp              = dev->dev->quotas.qp;
 535        props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
 536        props->max_send_sge =
 537                min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
 538        props->max_recv_sge =
 539                min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
 540        props->max_sge_rd = MLX4_MAX_SGE_RD;
 541        props->max_cq              = dev->dev->quotas.cq;
 542        props->max_cqe             = dev->dev->caps.max_cqes;
 543        props->max_mr              = dev->dev->quotas.mpt;
 544        props->max_pd              = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
 545        props->max_qp_rd_atom      = dev->dev->caps.max_qp_dest_rdma;
 546        props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
 547        props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
 548        props->max_srq             = dev->dev->quotas.srq;
 549        props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
 550        props->max_srq_sge         = dev->dev->caps.max_srq_sge;
 551        props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
 552        props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
 553        props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
 554                IB_ATOMIC_HCA : IB_ATOMIC_NONE;
 555        props->masked_atomic_cap   = props->atomic_cap;
 556        props->max_pkeys           = dev->dev->caps.pkey_table_len[1];
 557        props->max_mcast_grp       = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
 558        props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
 559        props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
 560                                           props->max_mcast_grp;
 561        props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
 562        props->timestamp_mask = 0xFFFFFFFFFFFFULL;
 563        props->max_ah = INT_MAX;
 564
 565        if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
 566            mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
 567                if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
 568                        props->rss_caps.max_rwq_indirection_tables =
 569                                props->max_qp;
 570                        props->rss_caps.max_rwq_indirection_table_size =
 571                                dev->dev->caps.max_rss_tbl_sz;
 572                        props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
 573                        props->max_wq_type_rq = props->max_qp;
 574                }
 575
 576                if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
 577                        props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
 578        }
 579
 580        props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
 581        props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
 582
 583        if (!mlx4_is_slave(dev->dev))
 584                err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
 585
 586        if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
 587                resp.response_length += sizeof(resp.hca_core_clock_offset);
 588                if (!err && !mlx4_is_slave(dev->dev)) {
 589                        resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
 590                        resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
 591                }
 592        }
 593
 594        if (uhw->outlen >= resp.response_length +
 595            sizeof(resp.max_inl_recv_sz)) {
 596                resp.response_length += sizeof(resp.max_inl_recv_sz);
 597                resp.max_inl_recv_sz  = dev->dev->caps.max_rq_sg *
 598                        sizeof(struct mlx4_wqe_data_seg);
 599        }
 600
 601        if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
 602                if (props->rss_caps.supported_qpts) {
 603                        resp.rss_caps.rx_hash_function =
 604                                MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
 605
 606                        resp.rss_caps.rx_hash_fields_mask =
 607                                MLX4_IB_RX_HASH_SRC_IPV4 |
 608                                MLX4_IB_RX_HASH_DST_IPV4 |
 609                                MLX4_IB_RX_HASH_SRC_IPV6 |
 610                                MLX4_IB_RX_HASH_DST_IPV6 |
 611                                MLX4_IB_RX_HASH_SRC_PORT_TCP |
 612                                MLX4_IB_RX_HASH_DST_PORT_TCP |
 613                                MLX4_IB_RX_HASH_SRC_PORT_UDP |
 614                                MLX4_IB_RX_HASH_DST_PORT_UDP;
 615
 616                        if (dev->dev->caps.tunnel_offload_mode ==
 617                            MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
 618                                resp.rss_caps.rx_hash_fields_mask |=
 619                                        MLX4_IB_RX_HASH_INNER;
 620                }
 621                resp.response_length = offsetof(typeof(resp), rss_caps) +
 622                                       sizeof(resp.rss_caps);
 623        }
 624
 625        if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
 626                if (dev->dev->caps.max_gso_sz &&
 627                    ((mlx4_ib_port_link_layer(ibdev, 1) ==
 628                    IB_LINK_LAYER_ETHERNET) ||
 629                    (mlx4_ib_port_link_layer(ibdev, 2) ==
 630                    IB_LINK_LAYER_ETHERNET))) {
 631                        resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
 632                        resp.tso_caps.supported_qpts |=
 633                                1 << IB_QPT_RAW_PACKET;
 634                }
 635                resp.response_length = offsetof(typeof(resp), tso_caps) +
 636                                       sizeof(resp.tso_caps);
 637        }
 638
 639        if (uhw->outlen) {
 640                err = ib_copy_to_udata(uhw, &resp, resp.response_length);
 641                if (err)
 642                        goto out;
 643        }
 644out:
 645        kfree(in_mad);
 646        kfree(out_mad);
 647
 648        return err;
 649}
 650
 651static enum rdma_link_layer
 652mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
 653{
 654        struct mlx4_dev *dev = to_mdev(device)->dev;
 655
 656        return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
 657                IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
 658}
 659
 660static int ib_link_query_port(struct ib_device *ibdev, u8 port,
 661                              struct ib_port_attr *props, int netw_view)
 662{
 663        struct ib_smp *in_mad  = NULL;
 664        struct ib_smp *out_mad = NULL;
 665        int ext_active_speed;
 666        int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
 667        int err = -ENOMEM;
 668
 669        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 670        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 671        if (!in_mad || !out_mad)
 672                goto out;
 673
 674        init_query_mad(in_mad);
 675        in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
 676        in_mad->attr_mod = cpu_to_be32(port);
 677
 678        if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
 679                mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
 680
 681        err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
 682                                in_mad, out_mad);
 683        if (err)
 684                goto out;
 685
 686
 687        props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
 688        props->lmc              = out_mad->data[34] & 0x7;
 689        props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
 690        props->sm_sl            = out_mad->data[36] & 0xf;
 691        props->state            = out_mad->data[32] & 0xf;
 692        props->phys_state       = out_mad->data[33] >> 4;
 693        props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
 694        if (netw_view)
 695                props->gid_tbl_len = out_mad->data[50];
 696        else
 697                props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
 698        props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
 699        props->pkey_tbl_len     = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
 700        props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
 701        props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
 702        props->active_width     = out_mad->data[31] & 0xf;
 703        props->active_speed     = out_mad->data[35] >> 4;
 704        props->max_mtu          = out_mad->data[41] & 0xf;
 705        props->active_mtu       = out_mad->data[36] >> 4;
 706        props->subnet_timeout   = out_mad->data[51] & 0x1f;
 707        props->max_vl_num       = out_mad->data[37] >> 4;
 708        props->init_type_reply  = out_mad->data[41] >> 4;
 709
 710        /* Check if extended speeds (EDR/FDR/...) are supported */
 711        if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
 712                ext_active_speed = out_mad->data[62] >> 4;
 713
 714                switch (ext_active_speed) {
 715                case 1:
 716                        props->active_speed = IB_SPEED_FDR;
 717                        break;
 718                case 2:
 719                        props->active_speed = IB_SPEED_EDR;
 720                        break;
 721                }
 722        }
 723
 724        /* If reported active speed is QDR, check if is FDR-10 */
 725        if (props->active_speed == IB_SPEED_QDR) {
 726                init_query_mad(in_mad);
 727                in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
 728                in_mad->attr_mod = cpu_to_be32(port);
 729
 730                err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
 731                                   NULL, NULL, in_mad, out_mad);
 732                if (err)
 733                        goto out;
 734
 735                /* Checking LinkSpeedActive for FDR-10 */
 736                if (out_mad->data[15] & 0x1)
 737                        props->active_speed = IB_SPEED_FDR10;
 738        }
 739
 740        /* Avoid wrong speed value returned by FW if the IB link is down. */
 741        if (props->state == IB_PORT_DOWN)
 742                 props->active_speed = IB_SPEED_SDR;
 743
 744out:
 745        kfree(in_mad);
 746        kfree(out_mad);
 747        return err;
 748}
 749
 750static u8 state_to_phys_state(enum ib_port_state state)
 751{
 752        return state == IB_PORT_ACTIVE ?
 753                IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
 754}
 755
 756static int eth_link_query_port(struct ib_device *ibdev, u8 port,
 757                               struct ib_port_attr *props)
 758{
 759
 760        struct mlx4_ib_dev *mdev = to_mdev(ibdev);
 761        struct mlx4_ib_iboe *iboe = &mdev->iboe;
 762        struct net_device *ndev;
 763        enum ib_mtu tmp;
 764        struct mlx4_cmd_mailbox *mailbox;
 765        int err = 0;
 766        int is_bonded = mlx4_is_bonded(mdev->dev);
 767
 768        mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
 769        if (IS_ERR(mailbox))
 770                return PTR_ERR(mailbox);
 771
 772        err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
 773                           MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
 774                           MLX4_CMD_WRAPPED);
 775        if (err)
 776                goto out;
 777
 778        props->active_width     =  (((u8 *)mailbox->buf)[5] == 0x40) ||
 779                                   (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
 780                                           IB_WIDTH_4X : IB_WIDTH_1X;
 781        props->active_speed     =  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
 782                                           IB_SPEED_FDR : IB_SPEED_QDR;
 783        props->port_cap_flags   = IB_PORT_CM_SUP;
 784        props->ip_gids = true;
 785        props->gid_tbl_len      = mdev->dev->caps.gid_table_len[port];
 786        props->max_msg_sz       = mdev->dev->caps.max_msg_sz;
 787        if (mdev->dev->caps.pkey_table_len[port])
 788                props->pkey_tbl_len = 1;
 789        props->max_mtu          = IB_MTU_4096;
 790        props->max_vl_num       = 2;
 791        props->state            = IB_PORT_DOWN;
 792        props->phys_state       = state_to_phys_state(props->state);
 793        props->active_mtu       = IB_MTU_256;
 794        spin_lock_bh(&iboe->lock);
 795        ndev = iboe->netdevs[port - 1];
 796        if (ndev && is_bonded) {
 797                rcu_read_lock(); /* required to get upper dev */
 798                ndev = netdev_master_upper_dev_get_rcu(ndev);
 799                rcu_read_unlock();
 800        }
 801        if (!ndev)
 802                goto out_unlock;
 803
 804        tmp = iboe_get_mtu(ndev->mtu);
 805        props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
 806
 807        props->state            = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
 808                                        IB_PORT_ACTIVE : IB_PORT_DOWN;
 809        props->phys_state       = state_to_phys_state(props->state);
 810out_unlock:
 811        spin_unlock_bh(&iboe->lock);
 812out:
 813        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
 814        return err;
 815}
 816
 817int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
 818                         struct ib_port_attr *props, int netw_view)
 819{
 820        int err;
 821
 822        /* props being zeroed by the caller, avoid zeroing it here */
 823
 824        err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
 825                ib_link_query_port(ibdev, port, props, netw_view) :
 826                                eth_link_query_port(ibdev, port, props);
 827
 828        return err;
 829}
 830
 831static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
 832                              struct ib_port_attr *props)
 833{
 834        /* returns host view */
 835        return __mlx4_ib_query_port(ibdev, port, props, 0);
 836}
 837
 838int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
 839                        union ib_gid *gid, int netw_view)
 840{
 841        struct ib_smp *in_mad  = NULL;
 842        struct ib_smp *out_mad = NULL;
 843        int err = -ENOMEM;
 844        struct mlx4_ib_dev *dev = to_mdev(ibdev);
 845        int clear = 0;
 846        int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
 847
 848        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 849        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 850        if (!in_mad || !out_mad)
 851                goto out;
 852
 853        init_query_mad(in_mad);
 854        in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
 855        in_mad->attr_mod = cpu_to_be32(port);
 856
 857        if (mlx4_is_mfunc(dev->dev) && netw_view)
 858                mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
 859
 860        err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
 861        if (err)
 862                goto out;
 863
 864        memcpy(gid->raw, out_mad->data + 8, 8);
 865
 866        if (mlx4_is_mfunc(dev->dev) && !netw_view) {
 867                if (index) {
 868                        /* For any index > 0, return the null guid */
 869                        err = 0;
 870                        clear = 1;
 871                        goto out;
 872                }
 873        }
 874
 875        init_query_mad(in_mad);
 876        in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
 877        in_mad->attr_mod = cpu_to_be32(index / 8);
 878
 879        err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
 880                           NULL, NULL, in_mad, out_mad);
 881        if (err)
 882                goto out;
 883
 884        memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
 885
 886out:
 887        if (clear)
 888                memset(gid->raw + 8, 0, 8);
 889        kfree(in_mad);
 890        kfree(out_mad);
 891        return err;
 892}
 893
 894static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
 895                             union ib_gid *gid)
 896{
 897        if (rdma_protocol_ib(ibdev, port))
 898                return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
 899        return 0;
 900}
 901
 902static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
 903{
 904        union sl2vl_tbl_to_u64 sl2vl64;
 905        struct ib_smp *in_mad  = NULL;
 906        struct ib_smp *out_mad = NULL;
 907        int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
 908        int err = -ENOMEM;
 909        int jj;
 910
 911        if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
 912                *sl2vl_tbl = 0;
 913                return 0;
 914        }
 915
 916        in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
 917        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 918        if (!in_mad || !out_mad)
 919                goto out;
 920
 921        init_query_mad(in_mad);
 922        in_mad->attr_id  = IB_SMP_ATTR_SL_TO_VL_TABLE;
 923        in_mad->attr_mod = 0;
 924
 925        if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
 926                mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
 927
 928        err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
 929                           in_mad, out_mad);
 930        if (err)
 931                goto out;
 932
 933        for (jj = 0; jj < 8; jj++)
 934                sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
 935        *sl2vl_tbl = sl2vl64.sl64;
 936
 937out:
 938        kfree(in_mad);
 939        kfree(out_mad);
 940        return err;
 941}
 942
 943static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
 944{
 945        u64 sl2vl;
 946        int i;
 947        int err;
 948
 949        for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
 950                if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
 951                        continue;
 952                err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
 953                if (err) {
 954                        pr_err("Unable to get default sl to vl mapping for port %d.  Using all zeroes (%d)\n",
 955                               i, err);
 956                        sl2vl = 0;
 957                }
 958                atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
 959        }
 960}
 961
 962int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
 963                         u16 *pkey, int netw_view)
 964{
 965        struct ib_smp *in_mad  = NULL;
 966        struct ib_smp *out_mad = NULL;
 967        int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
 968        int err = -ENOMEM;
 969
 970        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 971        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 972        if (!in_mad || !out_mad)
 973                goto out;
 974
 975        init_query_mad(in_mad);
 976        in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
 977        in_mad->attr_mod = cpu_to_be32(index / 32);
 978
 979        if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
 980                mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
 981
 982        err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
 983                           in_mad, out_mad);
 984        if (err)
 985                goto out;
 986
 987        *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
 988
 989out:
 990        kfree(in_mad);
 991        kfree(out_mad);
 992        return err;
 993}
 994
 995static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
 996{
 997        return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
 998}
 999
1000static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
1001                                 struct ib_device_modify *props)
1002{
1003        struct mlx4_cmd_mailbox *mailbox;
1004        unsigned long flags;
1005
1006        if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1007                return -EOPNOTSUPP;
1008
1009        if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1010                return 0;
1011
1012        if (mlx4_is_slave(to_mdev(ibdev)->dev))
1013                return -EOPNOTSUPP;
1014
1015        spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1016        memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1017        spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1018
1019        /*
1020         * If possible, pass node desc to FW, so it can generate
1021         * a 144 trap.  If cmd fails, just ignore.
1022         */
1023        mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1024        if (IS_ERR(mailbox))
1025                return 0;
1026
1027        memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1028        mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1029                 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1030
1031        mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1032
1033        return 0;
1034}
1035
1036static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1037                            u32 cap_mask)
1038{
1039        struct mlx4_cmd_mailbox *mailbox;
1040        int err;
1041
1042        mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1043        if (IS_ERR(mailbox))
1044                return PTR_ERR(mailbox);
1045
1046        if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1047                *(u8 *) mailbox->buf         = !!reset_qkey_viols << 6;
1048                ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1049        } else {
1050                ((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
1051                ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1052        }
1053
1054        err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1055                       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1056                       MLX4_CMD_WRAPPED);
1057
1058        mlx4_free_cmd_mailbox(dev->dev, mailbox);
1059        return err;
1060}
1061
1062static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1063                               struct ib_port_modify *props)
1064{
1065        struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1066        u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1067        struct ib_port_attr attr;
1068        u32 cap_mask;
1069        int err;
1070
1071        /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1072         * of whether port link layer is ETH or IB. For ETH ports, qkey
1073         * violations and port capabilities are not meaningful.
1074         */
1075        if (is_eth)
1076                return 0;
1077
1078        mutex_lock(&mdev->cap_mask_mutex);
1079
1080        err = ib_query_port(ibdev, port, &attr);
1081        if (err)
1082                goto out;
1083
1084        cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1085                ~props->clr_port_cap_mask;
1086
1087        err = mlx4_ib_SET_PORT(mdev, port,
1088                               !!(mask & IB_PORT_RESET_QKEY_CNTR),
1089                               cap_mask);
1090
1091out:
1092        mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1093        return err;
1094}
1095
1096static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1097                                  struct ib_udata *udata)
1098{
1099        struct ib_device *ibdev = uctx->device;
1100        struct mlx4_ib_dev *dev = to_mdev(ibdev);
1101        struct mlx4_ib_ucontext *context = to_mucontext(uctx);
1102        struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1103        struct mlx4_ib_alloc_ucontext_resp resp;
1104        int err;
1105
1106        if (!dev->ib_active)
1107                return -EAGAIN;
1108
1109        if (ibdev->ops.uverbs_abi_ver ==
1110            MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1111                resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
1112                resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
1113                resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1114        } else {
1115                resp.dev_caps         = dev->dev->caps.userspace_caps;
1116                resp.qp_tab_size      = dev->dev->caps.num_qps;
1117                resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
1118                resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1119                resp.cqe_size         = dev->dev->caps.cqe_size;
1120        }
1121
1122        err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1123        if (err)
1124                return err;
1125
1126        INIT_LIST_HEAD(&context->db_page_list);
1127        mutex_init(&context->db_page_mutex);
1128
1129        INIT_LIST_HEAD(&context->wqn_ranges_list);
1130        mutex_init(&context->wqn_ranges_mutex);
1131
1132        if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1133                err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1134        else
1135                err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1136
1137        if (err) {
1138                mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1139                return -EFAULT;
1140        }
1141
1142        return err;
1143}
1144
1145static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1146{
1147        struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1148
1149        mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1150}
1151
1152static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1153{
1154}
1155
1156static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1157{
1158        struct mlx4_ib_dev *dev = to_mdev(context->device);
1159
1160        switch (vma->vm_pgoff) {
1161        case 0:
1162                return rdma_user_mmap_io(context, vma,
1163                                         to_mucontext(context)->uar.pfn,
1164                                         PAGE_SIZE,
1165                                         pgprot_noncached(vma->vm_page_prot),
1166                                         NULL);
1167
1168        case 1:
1169                if (dev->dev->caps.bf_reg_size == 0)
1170                        return -EINVAL;
1171                return rdma_user_mmap_io(
1172                        context, vma,
1173                        to_mucontext(context)->uar.pfn +
1174                                dev->dev->caps.num_uars,
1175                        PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1176                        NULL);
1177
1178        case 3: {
1179                struct mlx4_clock_params params;
1180                int ret;
1181
1182                ret = mlx4_get_internal_clock_params(dev->dev, &params);
1183                if (ret)
1184                        return ret;
1185
1186                return rdma_user_mmap_io(
1187                        context, vma,
1188                        (pci_resource_start(dev->dev->persist->pdev,
1189                                            params.bar) +
1190                         params.offset) >>
1191                                PAGE_SHIFT,
1192                        PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1193                        NULL);
1194        }
1195
1196        default:
1197                return -EINVAL;
1198        }
1199}
1200
1201static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1202{
1203        struct mlx4_ib_pd *pd = to_mpd(ibpd);
1204        struct ib_device *ibdev = ibpd->device;
1205        int err;
1206
1207        err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1208        if (err)
1209                return err;
1210
1211        if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1212                mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1213                return -EFAULT;
1214        }
1215        return 0;
1216}
1217
1218static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1219{
1220        mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1221}
1222
1223static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
1224{
1225        struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
1226        struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
1227        struct ib_cq_init_attr cq_attr = {};
1228        int err;
1229
1230        if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1231                return -EOPNOTSUPP;
1232
1233        err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
1234        if (err)
1235                return err;
1236
1237        xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
1238        if (IS_ERR(xrcd->pd)) {
1239                err = PTR_ERR(xrcd->pd);
1240                goto err2;
1241        }
1242
1243        cq_attr.cqe = 1;
1244        xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
1245        if (IS_ERR(xrcd->cq)) {
1246                err = PTR_ERR(xrcd->cq);
1247                goto err3;
1248        }
1249
1250        return 0;
1251
1252err3:
1253        ib_dealloc_pd(xrcd->pd);
1254err2:
1255        mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
1256        return err;
1257}
1258
1259static void mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
1260{
1261        ib_destroy_cq(to_mxrcd(xrcd)->cq);
1262        ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1263        mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1264}
1265
1266static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1267{
1268        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1269        struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1270        struct mlx4_ib_gid_entry *ge;
1271
1272        ge = kzalloc(sizeof *ge, GFP_KERNEL);
1273        if (!ge)
1274                return -ENOMEM;
1275
1276        ge->gid = *gid;
1277        if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1278                ge->port = mqp->port;
1279                ge->added = 1;
1280        }
1281
1282        mutex_lock(&mqp->mutex);
1283        list_add_tail(&ge->list, &mqp->gid_list);
1284        mutex_unlock(&mqp->mutex);
1285
1286        return 0;
1287}
1288
1289static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1290                                          struct mlx4_ib_counters *ctr_table)
1291{
1292        struct counter_index *counter, *tmp_count;
1293
1294        mutex_lock(&ctr_table->mutex);
1295        list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1296                                 list) {
1297                if (counter->allocated)
1298                        mlx4_counter_free(ibdev->dev, counter->index);
1299                list_del(&counter->list);
1300                kfree(counter);
1301        }
1302        mutex_unlock(&ctr_table->mutex);
1303}
1304
1305int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1306                   union ib_gid *gid)
1307{
1308        struct net_device *ndev;
1309        int ret = 0;
1310
1311        if (!mqp->port)
1312                return 0;
1313
1314        spin_lock_bh(&mdev->iboe.lock);
1315        ndev = mdev->iboe.netdevs[mqp->port - 1];
1316        if (ndev)
1317                dev_hold(ndev);
1318        spin_unlock_bh(&mdev->iboe.lock);
1319
1320        if (ndev) {
1321                ret = 1;
1322                dev_put(ndev);
1323        }
1324
1325        return ret;
1326}
1327
1328struct mlx4_ib_steering {
1329        struct list_head list;
1330        struct mlx4_flow_reg_id reg_id;
1331        union ib_gid gid;
1332};
1333
1334#define LAST_ETH_FIELD vlan_tag
1335#define LAST_IB_FIELD sl
1336#define LAST_IPV4_FIELD dst_ip
1337#define LAST_TCP_UDP_FIELD src_port
1338
1339/* Field is the last supported field */
1340#define FIELDS_NOT_SUPPORTED(filter, field)\
1341        memchr_inv((void *)&filter.field  +\
1342                   sizeof(filter.field), 0,\
1343                   sizeof(filter) -\
1344                   offsetof(typeof(filter), field) -\
1345                   sizeof(filter.field))
1346
1347static int parse_flow_attr(struct mlx4_dev *dev,
1348                           u32 qp_num,
1349                           union ib_flow_spec *ib_spec,
1350                           struct _rule_hw *mlx4_spec)
1351{
1352        enum mlx4_net_trans_rule_id type;
1353
1354        switch (ib_spec->type) {
1355        case IB_FLOW_SPEC_ETH:
1356                if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1357                        return -ENOTSUPP;
1358
1359                type = MLX4_NET_TRANS_RULE_ID_ETH;
1360                memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1361                       ETH_ALEN);
1362                memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1363                       ETH_ALEN);
1364                mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1365                mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1366                break;
1367        case IB_FLOW_SPEC_IB:
1368                if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1369                        return -ENOTSUPP;
1370
1371                type = MLX4_NET_TRANS_RULE_ID_IB;
1372                mlx4_spec->ib.l3_qpn =
1373                        cpu_to_be32(qp_num);
1374                mlx4_spec->ib.qpn_mask =
1375                        cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1376                break;
1377
1378
1379        case IB_FLOW_SPEC_IPV4:
1380                if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1381                        return -ENOTSUPP;
1382
1383                type = MLX4_NET_TRANS_RULE_ID_IPV4;
1384                mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1385                mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1386                mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1387                mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1388                break;
1389
1390        case IB_FLOW_SPEC_TCP:
1391        case IB_FLOW_SPEC_UDP:
1392                if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1393                        return -ENOTSUPP;
1394
1395                type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1396                                        MLX4_NET_TRANS_RULE_ID_TCP :
1397                                        MLX4_NET_TRANS_RULE_ID_UDP;
1398                mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1399                mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1400                mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1401                mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1402                break;
1403
1404        default:
1405                return -EINVAL;
1406        }
1407        if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1408            mlx4_hw_rule_sz(dev, type) < 0)
1409                return -EINVAL;
1410        mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1411        mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1412        return mlx4_hw_rule_sz(dev, type);
1413}
1414
1415struct default_rules {
1416        __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1417        __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1418        __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1419        __u8  link_layer;
1420};
1421static const struct default_rules default_table[] = {
1422        {
1423                .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1424                .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1425                .rules_create_list = {IB_FLOW_SPEC_IB},
1426                .link_layer = IB_LINK_LAYER_INFINIBAND
1427        }
1428};
1429
1430static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1431                                         struct ib_flow_attr *flow_attr)
1432{
1433        int i, j, k;
1434        void *ib_flow;
1435        const struct default_rules *pdefault_rules = default_table;
1436        u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1437
1438        for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1439                __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1440                memset(&field_types, 0, sizeof(field_types));
1441
1442                if (link_layer != pdefault_rules->link_layer)
1443                        continue;
1444
1445                ib_flow = flow_attr + 1;
1446                /* we assume the specs are sorted */
1447                for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1448                     j < flow_attr->num_of_specs; k++) {
1449                        union ib_flow_spec *current_flow =
1450                                (union ib_flow_spec *)ib_flow;
1451
1452                        /* same layer but different type */
1453                        if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1454                             (pdefault_rules->mandatory_fields[k] &
1455                              IB_FLOW_SPEC_LAYER_MASK)) &&
1456                            (current_flow->type !=
1457                             pdefault_rules->mandatory_fields[k]))
1458                                goto out;
1459
1460                        /* same layer, try match next one */
1461                        if (current_flow->type ==
1462                            pdefault_rules->mandatory_fields[k]) {
1463                                j++;
1464                                ib_flow +=
1465                                        ((union ib_flow_spec *)ib_flow)->size;
1466                        }
1467                }
1468
1469                ib_flow = flow_attr + 1;
1470                for (j = 0; j < flow_attr->num_of_specs;
1471                     j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1472                        for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1473                                /* same layer and same type */
1474                                if (((union ib_flow_spec *)ib_flow)->type ==
1475                                    pdefault_rules->mandatory_not_fields[k])
1476                                        goto out;
1477
1478                return i;
1479        }
1480out:
1481        return -1;
1482}
1483
1484static int __mlx4_ib_create_default_rules(
1485                struct mlx4_ib_dev *mdev,
1486                struct ib_qp *qp,
1487                const struct default_rules *pdefault_rules,
1488                struct _rule_hw *mlx4_spec) {
1489        int size = 0;
1490        int i;
1491
1492        for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1493                union ib_flow_spec ib_spec = {};
1494                int ret;
1495
1496                switch (pdefault_rules->rules_create_list[i]) {
1497                case 0:
1498                        /* no rule */
1499                        continue;
1500                case IB_FLOW_SPEC_IB:
1501                        ib_spec.type = IB_FLOW_SPEC_IB;
1502                        ib_spec.size = sizeof(struct ib_flow_spec_ib);
1503
1504                        break;
1505                default:
1506                        /* invalid rule */
1507                        return -EINVAL;
1508                }
1509                /* We must put empty rule, qpn is being ignored */
1510                ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1511                                      mlx4_spec);
1512                if (ret < 0) {
1513                        pr_info("invalid parsing\n");
1514                        return -EINVAL;
1515                }
1516
1517                mlx4_spec = (void *)mlx4_spec + ret;
1518                size += ret;
1519        }
1520        return size;
1521}
1522
1523static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1524                          int domain,
1525                          enum mlx4_net_trans_promisc_mode flow_type,
1526                          u64 *reg_id)
1527{
1528        int ret, i;
1529        int size = 0;
1530        void *ib_flow;
1531        struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1532        struct mlx4_cmd_mailbox *mailbox;
1533        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1534        int default_flow;
1535
1536        static const u16 __mlx4_domain[] = {
1537                [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1538                [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1539                [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1540                [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1541        };
1542
1543        if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1544                pr_err("Invalid priority value %d\n", flow_attr->priority);
1545                return -EINVAL;
1546        }
1547
1548        if (domain >= IB_FLOW_DOMAIN_NUM) {
1549                pr_err("Invalid domain value %d\n", domain);
1550                return -EINVAL;
1551        }
1552
1553        if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1554                return -EINVAL;
1555
1556        mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1557        if (IS_ERR(mailbox))
1558                return PTR_ERR(mailbox);
1559        ctrl = mailbox->buf;
1560
1561        ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1562                                 flow_attr->priority);
1563        ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1564        ctrl->port = flow_attr->port;
1565        ctrl->qpn = cpu_to_be32(qp->qp_num);
1566
1567        ib_flow = flow_attr + 1;
1568        size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1569        /* Add default flows */
1570        default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1571        if (default_flow >= 0) {
1572                ret = __mlx4_ib_create_default_rules(
1573                                mdev, qp, default_table + default_flow,
1574                                mailbox->buf + size);
1575                if (ret < 0) {
1576                        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1577                        return -EINVAL;
1578                }
1579                size += ret;
1580        }
1581        for (i = 0; i < flow_attr->num_of_specs; i++) {
1582                ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1583                                      mailbox->buf + size);
1584                if (ret < 0) {
1585                        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1586                        return -EINVAL;
1587                }
1588                ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1589                size += ret;
1590        }
1591
1592        if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1593            flow_attr->num_of_specs == 1) {
1594                struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1595                enum ib_flow_spec_type header_spec =
1596                        ((union ib_flow_spec *)(flow_attr + 1))->type;
1597
1598                if (header_spec == IB_FLOW_SPEC_ETH)
1599                        mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1600        }
1601
1602        ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1603                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1604                           MLX4_CMD_NATIVE);
1605        if (ret == -ENOMEM)
1606                pr_err("mcg table is full. Fail to register network rule.\n");
1607        else if (ret == -ENXIO)
1608                pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1609        else if (ret)
1610                pr_err("Invalid argument. Fail to register network rule.\n");
1611
1612        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1613        return ret;
1614}
1615
1616static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1617{
1618        int err;
1619        err = mlx4_cmd(dev, reg_id, 0, 0,
1620                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1621                       MLX4_CMD_NATIVE);
1622        if (err)
1623                pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1624                       reg_id);
1625        return err;
1626}
1627
1628static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1629                                    u64 *reg_id)
1630{
1631        void *ib_flow;
1632        union ib_flow_spec *ib_spec;
1633        struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1634        int err = 0;
1635
1636        if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1637            dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1638                return 0; /* do nothing */
1639
1640        ib_flow = flow_attr + 1;
1641        ib_spec = (union ib_flow_spec *)ib_flow;
1642
1643        if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1644                return 0; /* do nothing */
1645
1646        err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1647                                    flow_attr->port, qp->qp_num,
1648                                    MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1649                                    reg_id);
1650        return err;
1651}
1652
1653static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1654                                      struct ib_flow_attr *flow_attr,
1655                                      enum mlx4_net_trans_promisc_mode *type)
1656{
1657        int err = 0;
1658
1659        if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1660            (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1661            (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1662                return -EOPNOTSUPP;
1663        }
1664
1665        if (flow_attr->num_of_specs == 0) {
1666                type[0] = MLX4_FS_MC_SNIFFER;
1667                type[1] = MLX4_FS_UC_SNIFFER;
1668        } else {
1669                union ib_flow_spec *ib_spec;
1670
1671                ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1672                if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
1673                        return -EINVAL;
1674
1675                /* if all is zero than MC and UC */
1676                if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1677                        type[0] = MLX4_FS_MC_SNIFFER;
1678                        type[1] = MLX4_FS_UC_SNIFFER;
1679                } else {
1680                        u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1681                                            ib_spec->eth.mask.dst_mac[1],
1682                                            ib_spec->eth.mask.dst_mac[2],
1683                                            ib_spec->eth.mask.dst_mac[3],
1684                                            ib_spec->eth.mask.dst_mac[4],
1685                                            ib_spec->eth.mask.dst_mac[5]};
1686
1687                        /* Above xor was only on MC bit, non empty mask is valid
1688                         * only if this bit is set and rest are zero.
1689                         */
1690                        if (!is_zero_ether_addr(&mac[0]))
1691                                return -EINVAL;
1692
1693                        if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1694                                type[0] = MLX4_FS_MC_SNIFFER;
1695                        else
1696                                type[0] = MLX4_FS_UC_SNIFFER;
1697                }
1698        }
1699
1700        return err;
1701}
1702
1703static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1704                                    struct ib_flow_attr *flow_attr,
1705                                    int domain, struct ib_udata *udata)
1706{
1707        int err = 0, i = 0, j = 0;
1708        struct mlx4_ib_flow *mflow;
1709        enum mlx4_net_trans_promisc_mode type[2];
1710        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1711        int is_bonded = mlx4_is_bonded(dev);
1712
1713        if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1714                return ERR_PTR(-EINVAL);
1715
1716        if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1717                return ERR_PTR(-EOPNOTSUPP);
1718
1719        if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1720            (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1721                return ERR_PTR(-EOPNOTSUPP);
1722
1723        if (udata &&
1724            udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1725                return ERR_PTR(-EOPNOTSUPP);
1726
1727        memset(type, 0, sizeof(type));
1728
1729        mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1730        if (!mflow) {
1731                err = -ENOMEM;
1732                goto err_free;
1733        }
1734
1735        switch (flow_attr->type) {
1736        case IB_FLOW_ATTR_NORMAL:
1737                /* If dont trap flag (continue match) is set, under specific
1738                 * condition traffic be replicated to given qp,
1739                 * without stealing it
1740                 */
1741                if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1742                        err = mlx4_ib_add_dont_trap_rule(dev,
1743                                                         flow_attr,
1744                                                         type);
1745                        if (err)
1746                                goto err_free;
1747                } else {
1748                        type[0] = MLX4_FS_REGULAR;
1749                }
1750                break;
1751
1752        case IB_FLOW_ATTR_ALL_DEFAULT:
1753                type[0] = MLX4_FS_ALL_DEFAULT;
1754                break;
1755
1756        case IB_FLOW_ATTR_MC_DEFAULT:
1757                type[0] = MLX4_FS_MC_DEFAULT;
1758                break;
1759
1760        case IB_FLOW_ATTR_SNIFFER:
1761                type[0] = MLX4_FS_MIRROR_RX_PORT;
1762                type[1] = MLX4_FS_MIRROR_SX_PORT;
1763                break;
1764
1765        default:
1766                err = -EINVAL;
1767                goto err_free;
1768        }
1769
1770        while (i < ARRAY_SIZE(type) && type[i]) {
1771                err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1772                                            &mflow->reg_id[i].id);
1773                if (err)
1774                        goto err_create_flow;
1775                if (is_bonded) {
1776                        /* Application always sees one port so the mirror rule
1777                         * must be on port #2
1778                         */
1779                        flow_attr->port = 2;
1780                        err = __mlx4_ib_create_flow(qp, flow_attr,
1781                                                    domain, type[j],
1782                                                    &mflow->reg_id[j].mirror);
1783                        flow_attr->port = 1;
1784                        if (err)
1785                                goto err_create_flow;
1786                        j++;
1787                }
1788
1789                i++;
1790        }
1791
1792        if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1793                err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1794                                               &mflow->reg_id[i].id);
1795                if (err)
1796                        goto err_create_flow;
1797
1798                if (is_bonded) {
1799                        flow_attr->port = 2;
1800                        err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1801                                                       &mflow->reg_id[j].mirror);
1802                        flow_attr->port = 1;
1803                        if (err)
1804                                goto err_create_flow;
1805                        j++;
1806                }
1807                /* function to create mirror rule */
1808                i++;
1809        }
1810
1811        return &mflow->ibflow;
1812
1813err_create_flow:
1814        while (i) {
1815                (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1816                                             mflow->reg_id[i].id);
1817                i--;
1818        }
1819
1820        while (j) {
1821                (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1822                                             mflow->reg_id[j].mirror);
1823                j--;
1824        }
1825err_free:
1826        kfree(mflow);
1827        return ERR_PTR(err);
1828}
1829
1830static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1831{
1832        int err, ret = 0;
1833        int i = 0;
1834        struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1835        struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1836
1837        while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1838                err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1839                if (err)
1840                        ret = err;
1841                if (mflow->reg_id[i].mirror) {
1842                        err = __mlx4_ib_destroy_flow(mdev->dev,
1843                                                     mflow->reg_id[i].mirror);
1844                        if (err)
1845                                ret = err;
1846                }
1847                i++;
1848        }
1849
1850        kfree(mflow);
1851        return ret;
1852}
1853
1854static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1855{
1856        int err;
1857        struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1858        struct mlx4_dev *dev = mdev->dev;
1859        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1860        struct mlx4_ib_steering *ib_steering = NULL;
1861        enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1862        struct mlx4_flow_reg_id reg_id;
1863
1864        if (mdev->dev->caps.steering_mode ==
1865            MLX4_STEERING_MODE_DEVICE_MANAGED) {
1866                ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1867                if (!ib_steering)
1868                        return -ENOMEM;
1869        }
1870
1871        err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1872                                    !!(mqp->flags &
1873                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1874                                    prot, &reg_id.id);
1875        if (err) {
1876                pr_err("multicast attach op failed, err %d\n", err);
1877                goto err_malloc;
1878        }
1879
1880        reg_id.mirror = 0;
1881        if (mlx4_is_bonded(dev)) {
1882                err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1883                                            (mqp->port == 1) ? 2 : 1,
1884                                            !!(mqp->flags &
1885                                            MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1886                                            prot, &reg_id.mirror);
1887                if (err)
1888                        goto err_add;
1889        }
1890
1891        err = add_gid_entry(ibqp, gid);
1892        if (err)
1893                goto err_add;
1894
1895        if (ib_steering) {
1896                memcpy(ib_steering->gid.raw, gid->raw, 16);
1897                ib_steering->reg_id = reg_id;
1898                mutex_lock(&mqp->mutex);
1899                list_add(&ib_steering->list, &mqp->steering_rules);
1900                mutex_unlock(&mqp->mutex);
1901        }
1902        return 0;
1903
1904err_add:
1905        mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1906                              prot, reg_id.id);
1907        if (reg_id.mirror)
1908                mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1909                                      prot, reg_id.mirror);
1910err_malloc:
1911        kfree(ib_steering);
1912
1913        return err;
1914}
1915
1916static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1917{
1918        struct mlx4_ib_gid_entry *ge;
1919        struct mlx4_ib_gid_entry *tmp;
1920        struct mlx4_ib_gid_entry *ret = NULL;
1921
1922        list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1923                if (!memcmp(raw, ge->gid.raw, 16)) {
1924                        ret = ge;
1925                        break;
1926                }
1927        }
1928
1929        return ret;
1930}
1931
1932static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1933{
1934        int err;
1935        struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1936        struct mlx4_dev *dev = mdev->dev;
1937        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1938        struct net_device *ndev;
1939        struct mlx4_ib_gid_entry *ge;
1940        struct mlx4_flow_reg_id reg_id = {0, 0};
1941        enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
1942
1943        if (mdev->dev->caps.steering_mode ==
1944            MLX4_STEERING_MODE_DEVICE_MANAGED) {
1945                struct mlx4_ib_steering *ib_steering;
1946
1947                mutex_lock(&mqp->mutex);
1948                list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1949                        if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1950                                list_del(&ib_steering->list);
1951                                break;
1952                        }
1953                }
1954                mutex_unlock(&mqp->mutex);
1955                if (&ib_steering->list == &mqp->steering_rules) {
1956                        pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1957                        return -EINVAL;
1958                }
1959                reg_id = ib_steering->reg_id;
1960                kfree(ib_steering);
1961        }
1962
1963        err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1964                                    prot, reg_id.id);
1965        if (err)
1966                return err;
1967
1968        if (mlx4_is_bonded(dev)) {
1969                err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1970                                            prot, reg_id.mirror);
1971                if (err)
1972                        return err;
1973        }
1974
1975        mutex_lock(&mqp->mutex);
1976        ge = find_gid_entry(mqp, gid->raw);
1977        if (ge) {
1978                spin_lock_bh(&mdev->iboe.lock);
1979                ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1980                if (ndev)
1981                        dev_hold(ndev);
1982                spin_unlock_bh(&mdev->iboe.lock);
1983                if (ndev)
1984                        dev_put(ndev);
1985                list_del(&ge->list);
1986                kfree(ge);
1987        } else
1988                pr_warn("could not find mgid entry\n");
1989
1990        mutex_unlock(&mqp->mutex);
1991
1992        return 0;
1993}
1994
1995static int init_node_data(struct mlx4_ib_dev *dev)
1996{
1997        struct ib_smp *in_mad  = NULL;
1998        struct ib_smp *out_mad = NULL;
1999        int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
2000        int err = -ENOMEM;
2001
2002        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
2003        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
2004        if (!in_mad || !out_mad)
2005                goto out;
2006
2007        init_query_mad(in_mad);
2008        in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
2009        if (mlx4_is_master(dev->dev))
2010                mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
2011
2012        err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2013        if (err)
2014                goto out;
2015
2016        memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2017
2018        in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2019
2020        err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2021        if (err)
2022                goto out;
2023
2024        dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2025        memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2026
2027out:
2028        kfree(in_mad);
2029        kfree(out_mad);
2030        return err;
2031}
2032
2033static ssize_t hca_type_show(struct device *device,
2034                             struct device_attribute *attr, char *buf)
2035{
2036        struct mlx4_ib_dev *dev =
2037                rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2038        return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
2039}
2040static DEVICE_ATTR_RO(hca_type);
2041
2042static ssize_t hw_rev_show(struct device *device,
2043                           struct device_attribute *attr, char *buf)
2044{
2045        struct mlx4_ib_dev *dev =
2046                rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2047        return sprintf(buf, "%x\n", dev->dev->rev_id);
2048}
2049static DEVICE_ATTR_RO(hw_rev);
2050
2051static ssize_t board_id_show(struct device *device,
2052                             struct device_attribute *attr, char *buf)
2053{
2054        struct mlx4_ib_dev *dev =
2055                rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2056
2057        return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
2058                       dev->dev->board_id);
2059}
2060static DEVICE_ATTR_RO(board_id);
2061
2062static struct attribute *mlx4_class_attributes[] = {
2063        &dev_attr_hw_rev.attr,
2064        &dev_attr_hca_type.attr,
2065        &dev_attr_board_id.attr,
2066        NULL
2067};
2068
2069static const struct attribute_group mlx4_attr_group = {
2070        .attrs = mlx4_class_attributes,
2071};
2072
2073struct diag_counter {
2074        const char *name;
2075        u32 offset;
2076};
2077
2078#define DIAG_COUNTER(_name, _offset)                    \
2079        { .name = #_name, .offset = _offset }
2080
2081static const struct diag_counter diag_basic[] = {
2082        DIAG_COUNTER(rq_num_lle, 0x00),
2083        DIAG_COUNTER(sq_num_lle, 0x04),
2084        DIAG_COUNTER(rq_num_lqpoe, 0x08),
2085        DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2086        DIAG_COUNTER(rq_num_lpe, 0x18),
2087        DIAG_COUNTER(sq_num_lpe, 0x1C),
2088        DIAG_COUNTER(rq_num_wrfe, 0x20),
2089        DIAG_COUNTER(sq_num_wrfe, 0x24),
2090        DIAG_COUNTER(sq_num_mwbe, 0x2C),
2091        DIAG_COUNTER(sq_num_bre, 0x34),
2092        DIAG_COUNTER(sq_num_rire, 0x44),
2093        DIAG_COUNTER(rq_num_rire, 0x48),
2094        DIAG_COUNTER(sq_num_rae, 0x4C),
2095        DIAG_COUNTER(rq_num_rae, 0x50),
2096        DIAG_COUNTER(sq_num_roe, 0x54),
2097        DIAG_COUNTER(sq_num_tree, 0x5C),
2098        DIAG_COUNTER(sq_num_rree, 0x64),
2099        DIAG_COUNTER(rq_num_rnr, 0x68),
2100        DIAG_COUNTER(sq_num_rnr, 0x6C),
2101        DIAG_COUNTER(rq_num_oos, 0x100),
2102        DIAG_COUNTER(sq_num_oos, 0x104),
2103};
2104
2105static const struct diag_counter diag_ext[] = {
2106        DIAG_COUNTER(rq_num_dup, 0x130),
2107        DIAG_COUNTER(sq_num_to, 0x134),
2108};
2109
2110static const struct diag_counter diag_device_only[] = {
2111        DIAG_COUNTER(num_cqovf, 0x1A0),
2112        DIAG_COUNTER(rq_num_udsdprd, 0x118),
2113};
2114
2115static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2116                                                    u8 port_num)
2117{
2118        struct mlx4_ib_dev *dev = to_mdev(ibdev);
2119        struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2120
2121        if (!diag[!!port_num].name)
2122                return NULL;
2123
2124        return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2125                                          diag[!!port_num].num_counters,
2126                                          RDMA_HW_STATS_DEFAULT_LIFESPAN);
2127}
2128
2129static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2130                                struct rdma_hw_stats *stats,
2131                                u8 port, int index)
2132{
2133        struct mlx4_ib_dev *dev = to_mdev(ibdev);
2134        struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2135        u32 hw_value[ARRAY_SIZE(diag_device_only) +
2136                ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2137        int ret;
2138        int i;
2139
2140        ret = mlx4_query_diag_counters(dev->dev,
2141                                       MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2142                                       diag[!!port].offset, hw_value,
2143                                       diag[!!port].num_counters, port);
2144
2145        if (ret)
2146                return ret;
2147
2148        for (i = 0; i < diag[!!port].num_counters; i++)
2149                stats->value[i] = hw_value[i];
2150
2151        return diag[!!port].num_counters;
2152}
2153
2154static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2155                                         const char ***name,
2156                                         u32 **offset,
2157                                         u32 *num,
2158                                         bool port)
2159{
2160        u32 num_counters;
2161
2162        num_counters = ARRAY_SIZE(diag_basic);
2163
2164        if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2165                num_counters += ARRAY_SIZE(diag_ext);
2166
2167        if (!port)
2168                num_counters += ARRAY_SIZE(diag_device_only);
2169
2170        *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2171        if (!*name)
2172                return -ENOMEM;
2173
2174        *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2175        if (!*offset)
2176                goto err_name;
2177
2178        *num = num_counters;
2179
2180        return 0;
2181
2182err_name:
2183        kfree(*name);
2184        return -ENOMEM;
2185}
2186
2187static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2188                                       const char **name,
2189                                       u32 *offset,
2190                                       bool port)
2191{
2192        int i;
2193        int j;
2194
2195        for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2196                name[i] = diag_basic[i].name;
2197                offset[i] = diag_basic[i].offset;
2198        }
2199
2200        if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2201                for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2202                        name[j] = diag_ext[i].name;
2203                        offset[j] = diag_ext[i].offset;
2204                }
2205        }
2206
2207        if (!port) {
2208                for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2209                        name[j] = diag_device_only[i].name;
2210                        offset[j] = diag_device_only[i].offset;
2211                }
2212        }
2213}
2214
2215static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2216        .alloc_hw_stats = mlx4_ib_alloc_hw_stats,
2217        .get_hw_stats = mlx4_ib_get_hw_stats,
2218};
2219
2220static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2221{
2222        struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2223        int i;
2224        int ret;
2225        bool per_port = !!(ibdev->dev->caps.flags2 &
2226                MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2227
2228        if (mlx4_is_slave(ibdev->dev))
2229                return 0;
2230
2231        for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2232                /* i == 1 means we are building port counters */
2233                if (i && !per_port)
2234                        continue;
2235
2236                ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2237                                                    &diag[i].offset,
2238                                                    &diag[i].num_counters, i);
2239                if (ret)
2240                        goto err_alloc;
2241
2242                mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2243                                           diag[i].offset, i);
2244        }
2245
2246        ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2247
2248        return 0;
2249
2250err_alloc:
2251        if (i) {
2252                kfree(diag[i - 1].name);
2253                kfree(diag[i - 1].offset);
2254        }
2255
2256        return ret;
2257}
2258
2259static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2260{
2261        int i;
2262
2263        for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2264                kfree(ibdev->diag_counters[i].offset);
2265                kfree(ibdev->diag_counters[i].name);
2266        }
2267}
2268
2269#define MLX4_IB_INVALID_MAC     ((u64)-1)
2270static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2271                               struct net_device *dev,
2272                               int port)
2273{
2274        u64 new_smac = 0;
2275        u64 release_mac = MLX4_IB_INVALID_MAC;
2276        struct mlx4_ib_qp *qp;
2277
2278        read_lock(&dev_base_lock);
2279        new_smac = mlx4_mac_to_u64(dev->dev_addr);
2280        read_unlock(&dev_base_lock);
2281
2282        atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2283
2284        /* no need for update QP1 and mac registration in non-SRIOV */
2285        if (!mlx4_is_mfunc(ibdev->dev))
2286                return;
2287
2288        mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2289        qp = ibdev->qp1_proxy[port - 1];
2290        if (qp) {
2291                int new_smac_index;
2292                u64 old_smac;
2293                struct mlx4_update_qp_params update_params;
2294
2295                mutex_lock(&qp->mutex);
2296                old_smac = qp->pri.smac;
2297                if (new_smac == old_smac)
2298                        goto unlock;
2299
2300                new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2301
2302                if (new_smac_index < 0)
2303                        goto unlock;
2304
2305                update_params.smac_index = new_smac_index;
2306                if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2307                                   &update_params)) {
2308                        release_mac = new_smac;
2309                        goto unlock;
2310                }
2311                /* if old port was zero, no mac was yet registered for this QP */
2312                if (qp->pri.smac_port)
2313                        release_mac = old_smac;
2314                qp->pri.smac = new_smac;
2315                qp->pri.smac_port = port;
2316                qp->pri.smac_index = new_smac_index;
2317        }
2318
2319unlock:
2320        if (release_mac != MLX4_IB_INVALID_MAC)
2321                mlx4_unregister_mac(ibdev->dev, port, release_mac);
2322        if (qp)
2323                mutex_unlock(&qp->mutex);
2324        mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2325}
2326
2327static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2328                                 struct net_device *dev,
2329                                 unsigned long event)
2330
2331{
2332        struct mlx4_ib_iboe *iboe;
2333        int update_qps_port = -1;
2334        int port;
2335
2336        ASSERT_RTNL();
2337
2338        iboe = &ibdev->iboe;
2339
2340        spin_lock_bh(&iboe->lock);
2341        mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2342
2343                iboe->netdevs[port - 1] =
2344                        mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2345
2346                if (dev == iboe->netdevs[port - 1] &&
2347                    (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2348                     event == NETDEV_UP || event == NETDEV_CHANGE))
2349                        update_qps_port = port;
2350
2351                if (dev == iboe->netdevs[port - 1] &&
2352                    (event == NETDEV_UP || event == NETDEV_DOWN)) {
2353                        enum ib_port_state port_state;
2354                        struct ib_event ibev = { };
2355
2356                        if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2357                                                     &port_state))
2358                                continue;
2359
2360                        if (event == NETDEV_UP &&
2361                            (port_state != IB_PORT_ACTIVE ||
2362                             iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2363                                continue;
2364                        if (event == NETDEV_DOWN &&
2365                            (port_state != IB_PORT_DOWN ||
2366                             iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2367                                continue;
2368                        iboe->last_port_state[port - 1] = port_state;
2369
2370                        ibev.device = &ibdev->ib_dev;
2371                        ibev.element.port_num = port;
2372                        ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2373                                                          IB_EVENT_PORT_ERR;
2374                        ib_dispatch_event(&ibev);
2375                }
2376
2377        }
2378        spin_unlock_bh(&iboe->lock);
2379
2380        if (update_qps_port > 0)
2381                mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2382}
2383
2384static int mlx4_ib_netdev_event(struct notifier_block *this,
2385                                unsigned long event, void *ptr)
2386{
2387        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2388        struct mlx4_ib_dev *ibdev;
2389
2390        if (!net_eq(dev_net(dev), &init_net))
2391                return NOTIFY_DONE;
2392
2393        ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2394        mlx4_ib_scan_netdevs(ibdev, dev, event);
2395
2396        return NOTIFY_DONE;
2397}
2398
2399static void init_pkeys(struct mlx4_ib_dev *ibdev)
2400{
2401        int port;
2402        int slave;
2403        int i;
2404
2405        if (mlx4_is_master(ibdev->dev)) {
2406                for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2407                     ++slave) {
2408                        for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2409                                for (i = 0;
2410                                     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2411                                     ++i) {
2412                                        ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2413                                        /* master has the identity virt2phys pkey mapping */
2414                                                (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2415                                                        ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2416                                        mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2417                                                             ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2418                                }
2419                        }
2420                }
2421                /* initialize pkey cache */
2422                for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2423                        for (i = 0;
2424                             i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2425                             ++i)
2426                                ibdev->pkeys.phys_pkey_cache[port-1][i] =
2427                                        (i) ? 0 : 0xFFFF;
2428                }
2429        }
2430}
2431
2432static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2433{
2434        int i, j, eq = 0, total_eqs = 0;
2435
2436        ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2437                                  sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2438        if (!ibdev->eq_table)
2439                return;
2440
2441        for (i = 1; i <= dev->caps.num_ports; i++) {
2442                for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2443                     j++, total_eqs++) {
2444                        if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
2445                                continue;
2446                        ibdev->eq_table[eq] = total_eqs;
2447                        if (!mlx4_assign_eq(dev, i,
2448                                            &ibdev->eq_table[eq]))
2449                                eq++;
2450                        else
2451                                ibdev->eq_table[eq] = -1;
2452                }
2453        }
2454
2455        for (i = eq; i < dev->caps.num_comp_vectors;
2456             ibdev->eq_table[i++] = -1)
2457                ;
2458
2459        /* Advertise the new number of EQs to clients */
2460        ibdev->ib_dev.num_comp_vectors = eq;
2461}
2462
2463static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2464{
2465        int i;
2466        int total_eqs = ibdev->ib_dev.num_comp_vectors;
2467
2468        /* no eqs were allocated */
2469        if (!ibdev->eq_table)
2470                return;
2471
2472        /* Reset the advertised EQ number */
2473        ibdev->ib_dev.num_comp_vectors = 0;
2474
2475        for (i = 0; i < total_eqs; i++)
2476                mlx4_release_eq(dev, ibdev->eq_table[i]);
2477
2478        kfree(ibdev->eq_table);
2479        ibdev->eq_table = NULL;
2480}
2481
2482static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2483                               struct ib_port_immutable *immutable)
2484{
2485        struct ib_port_attr attr;
2486        struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2487        int err;
2488
2489        if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2490                immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2491                immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2492        } else {
2493                if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2494                        immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2495                if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2496                        immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2497                                RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2498                immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2499                if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2500                    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2501                        immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2502        }
2503
2504        err = ib_query_port(ibdev, port_num, &attr);
2505        if (err)
2506                return err;
2507
2508        immutable->pkey_tbl_len = attr.pkey_tbl_len;
2509        immutable->gid_tbl_len = attr.gid_tbl_len;
2510
2511        return 0;
2512}
2513
2514static void get_fw_ver_str(struct ib_device *device, char *str)
2515{
2516        struct mlx4_ib_dev *dev =
2517                container_of(device, struct mlx4_ib_dev, ib_dev);
2518        snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2519                 (int) (dev->dev->caps.fw_ver >> 32),
2520                 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2521                 (int) dev->dev->caps.fw_ver & 0xffff);
2522}
2523
2524static const struct ib_device_ops mlx4_ib_dev_ops = {
2525        .owner = THIS_MODULE,
2526        .driver_id = RDMA_DRIVER_MLX4,
2527        .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2528
2529        .add_gid = mlx4_ib_add_gid,
2530        .alloc_mr = mlx4_ib_alloc_mr,
2531        .alloc_pd = mlx4_ib_alloc_pd,
2532        .alloc_ucontext = mlx4_ib_alloc_ucontext,
2533        .attach_mcast = mlx4_ib_mcg_attach,
2534        .create_ah = mlx4_ib_create_ah,
2535        .create_cq = mlx4_ib_create_cq,
2536        .create_qp = mlx4_ib_create_qp,
2537        .create_srq = mlx4_ib_create_srq,
2538        .dealloc_pd = mlx4_ib_dealloc_pd,
2539        .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2540        .del_gid = mlx4_ib_del_gid,
2541        .dereg_mr = mlx4_ib_dereg_mr,
2542        .destroy_ah = mlx4_ib_destroy_ah,
2543        .destroy_cq = mlx4_ib_destroy_cq,
2544        .destroy_qp = mlx4_ib_destroy_qp,
2545        .destroy_srq = mlx4_ib_destroy_srq,
2546        .detach_mcast = mlx4_ib_mcg_detach,
2547        .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2548        .drain_rq = mlx4_ib_drain_rq,
2549        .drain_sq = mlx4_ib_drain_sq,
2550        .get_dev_fw_str = get_fw_ver_str,
2551        .get_dma_mr = mlx4_ib_get_dma_mr,
2552        .get_link_layer = mlx4_ib_port_link_layer,
2553        .get_netdev = mlx4_ib_get_netdev,
2554        .get_port_immutable = mlx4_port_immutable,
2555        .map_mr_sg = mlx4_ib_map_mr_sg,
2556        .mmap = mlx4_ib_mmap,
2557        .modify_cq = mlx4_ib_modify_cq,
2558        .modify_device = mlx4_ib_modify_device,
2559        .modify_port = mlx4_ib_modify_port,
2560        .modify_qp = mlx4_ib_modify_qp,
2561        .modify_srq = mlx4_ib_modify_srq,
2562        .poll_cq = mlx4_ib_poll_cq,
2563        .post_recv = mlx4_ib_post_recv,
2564        .post_send = mlx4_ib_post_send,
2565        .post_srq_recv = mlx4_ib_post_srq_recv,
2566        .process_mad = mlx4_ib_process_mad,
2567        .query_ah = mlx4_ib_query_ah,
2568        .query_device = mlx4_ib_query_device,
2569        .query_gid = mlx4_ib_query_gid,
2570        .query_pkey = mlx4_ib_query_pkey,
2571        .query_port = mlx4_ib_query_port,
2572        .query_qp = mlx4_ib_query_qp,
2573        .query_srq = mlx4_ib_query_srq,
2574        .reg_user_mr = mlx4_ib_reg_user_mr,
2575        .req_notify_cq = mlx4_ib_arm_cq,
2576        .rereg_user_mr = mlx4_ib_rereg_user_mr,
2577        .resize_cq = mlx4_ib_resize_cq,
2578
2579        INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2580        INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2581        INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2582        INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2583        INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2584};
2585
2586static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2587        .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2588        .create_wq = mlx4_ib_create_wq,
2589        .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2590        .destroy_wq = mlx4_ib_destroy_wq,
2591        .modify_wq = mlx4_ib_modify_wq,
2592};
2593
2594static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2595        .alloc_mw = mlx4_ib_alloc_mw,
2596        .dealloc_mw = mlx4_ib_dealloc_mw,
2597};
2598
2599static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2600        .alloc_xrcd = mlx4_ib_alloc_xrcd,
2601        .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2602
2603        INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
2604};
2605
2606static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2607        .create_flow = mlx4_ib_create_flow,
2608        .destroy_flow = mlx4_ib_destroy_flow,
2609};
2610
2611static void *mlx4_ib_add(struct mlx4_dev *dev)
2612{
2613        struct mlx4_ib_dev *ibdev;
2614        int num_ports = 0;
2615        int i, j;
2616        int err;
2617        struct mlx4_ib_iboe *iboe;
2618        int ib_num_ports = 0;
2619        int num_req_counters;
2620        int allocated;
2621        u32 counter_index;
2622        struct counter_index *new_counter_index = NULL;
2623
2624        pr_info_once("%s", mlx4_ib_version);
2625
2626        num_ports = 0;
2627        mlx4_foreach_ib_transport_port(i, dev)
2628                num_ports++;
2629
2630        /* No point in registering a device with no ports... */
2631        if (num_ports == 0)
2632                return NULL;
2633
2634        ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2635        if (!ibdev) {
2636                dev_err(&dev->persist->pdev->dev,
2637                        "Device struct alloc failed\n");
2638                return NULL;
2639        }
2640
2641        iboe = &ibdev->iboe;
2642
2643        if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2644                goto err_dealloc;
2645
2646        if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2647                goto err_pd;
2648
2649        ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2650                                 PAGE_SIZE);
2651        if (!ibdev->uar_map)
2652                goto err_uar;
2653        MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2654
2655        ibdev->dev = dev;
2656        ibdev->bond_next_port   = 0;
2657
2658        ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
2659        ibdev->ib_dev.local_dma_lkey    = dev->caps.reserved_lkey;
2660        ibdev->num_ports                = num_ports;
2661        ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
2662                                                1 : ibdev->num_ports;
2663        ibdev->ib_dev.num_comp_vectors  = dev->caps.num_comp_vectors;
2664        ibdev->ib_dev.dev.parent        = &dev->persist->pdev->dev;
2665
2666        ibdev->ib_dev.uverbs_cmd_mask   =
2667                (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
2668                (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
2669                (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
2670                (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
2671                (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
2672                (1ull << IB_USER_VERBS_CMD_REG_MR)              |
2673                (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
2674                (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
2675                (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2676                (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
2677                (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
2678                (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
2679                (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
2680                (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
2681                (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
2682                (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
2683                (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
2684                (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
2685                (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
2686                (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
2687                (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
2688                (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
2689                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
2690                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2691
2692        ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2693        ibdev->ib_dev.uverbs_ex_cmd_mask |=
2694                (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
2695                (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2696                (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2697                (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2698
2699        if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2700            ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2701            IB_LINK_LAYER_ETHERNET) ||
2702            (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2703            IB_LINK_LAYER_ETHERNET))) {
2704                ibdev->ib_dev.uverbs_ex_cmd_mask |=
2705                        (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ)          |
2706                        (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ)          |
2707                        (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ)         |
2708                        (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2709                        (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
2710                ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2711        }
2712
2713        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2714            dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2715                ibdev->ib_dev.uverbs_cmd_mask |=
2716                        (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2717                        (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2718                ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2719        }
2720
2721        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2722                ibdev->ib_dev.uverbs_cmd_mask |=
2723                        (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2724                        (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2725                ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2726        }
2727
2728        if (check_flow_steering_support(dev)) {
2729                ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2730                ibdev->ib_dev.uverbs_ex_cmd_mask        |=
2731                        (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2732                        (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2733                ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2734        }
2735
2736        if (!dev->caps.userspace_caps)
2737                ibdev->ib_dev.ops.uverbs_abi_ver =
2738                        MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2739
2740        mlx4_ib_alloc_eqs(dev, ibdev);
2741
2742        spin_lock_init(&iboe->lock);
2743
2744        if (init_node_data(ibdev))
2745                goto err_map;
2746        mlx4_init_sl2vl_tbl(ibdev);
2747
2748        for (i = 0; i < ibdev->num_ports; ++i) {
2749                mutex_init(&ibdev->counters_table[i].mutex);
2750                INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2751                iboe->last_port_state[i] = IB_PORT_DOWN;
2752        }
2753
2754        num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2755        for (i = 0; i < num_req_counters; ++i) {
2756                mutex_init(&ibdev->qp1_proxy_lock[i]);
2757                allocated = 0;
2758                if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2759                                                IB_LINK_LAYER_ETHERNET) {
2760                        err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2761                                                 MLX4_RES_USAGE_DRIVER);
2762                        /* if failed to allocate a new counter, use default */
2763                        if (err)
2764                                counter_index =
2765                                        mlx4_get_default_counter_index(dev,
2766                                                                       i + 1);
2767                        else
2768                                allocated = 1;
2769                } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2770                        counter_index = mlx4_get_default_counter_index(dev,
2771                                                                       i + 1);
2772                }
2773                new_counter_index = kmalloc(sizeof(*new_counter_index),
2774                                            GFP_KERNEL);
2775                if (!new_counter_index) {
2776                        if (allocated)
2777                                mlx4_counter_free(ibdev->dev, counter_index);
2778                        goto err_counter;
2779                }
2780                new_counter_index->index = counter_index;
2781                new_counter_index->allocated = allocated;
2782                list_add_tail(&new_counter_index->list,
2783                              &ibdev->counters_table[i].counters_list);
2784                ibdev->counters_table[i].default_counter = counter_index;
2785                pr_info("counter index %d for port %d allocated %d\n",
2786                        counter_index, i + 1, allocated);
2787        }
2788        if (mlx4_is_bonded(dev))
2789                for (i = 1; i < ibdev->num_ports ; ++i) {
2790                        new_counter_index =
2791                                        kmalloc(sizeof(struct counter_index),
2792                                                GFP_KERNEL);
2793                        if (!new_counter_index)
2794                                goto err_counter;
2795                        new_counter_index->index = counter_index;
2796                        new_counter_index->allocated = 0;
2797                        list_add_tail(&new_counter_index->list,
2798                                      &ibdev->counters_table[i].counters_list);
2799                        ibdev->counters_table[i].default_counter =
2800                                                                counter_index;
2801                }
2802
2803        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2804                ib_num_ports++;
2805
2806        spin_lock_init(&ibdev->sm_lock);
2807        mutex_init(&ibdev->cap_mask_mutex);
2808        INIT_LIST_HEAD(&ibdev->qp_list);
2809        spin_lock_init(&ibdev->reset_flow_resource_lock);
2810
2811        if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2812            ib_num_ports) {
2813                ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2814                err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2815                                            MLX4_IB_UC_STEER_QPN_ALIGN,
2816                                            &ibdev->steer_qpn_base, 0,
2817                                            MLX4_RES_USAGE_DRIVER);
2818                if (err)
2819                        goto err_counter;
2820
2821                ibdev->ib_uc_qpns_bitmap =
2822                        kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2823                                      sizeof(long),
2824                                      GFP_KERNEL);
2825                if (!ibdev->ib_uc_qpns_bitmap)
2826                        goto err_steer_qp_release;
2827
2828                if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2829                        bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2830                                    ibdev->steer_qpn_count);
2831                        err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2832                                        dev, ibdev->steer_qpn_base,
2833                                        ibdev->steer_qpn_base +
2834                                        ibdev->steer_qpn_count - 1);
2835                        if (err)
2836                                goto err_steer_free_bitmap;
2837                } else {
2838                        bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2839                                    ibdev->steer_qpn_count);
2840                }
2841        }
2842
2843        for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2844                atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2845
2846        if (mlx4_ib_alloc_diag_counters(ibdev))
2847                goto err_steer_free_bitmap;
2848
2849        rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
2850        if (ib_register_device(&ibdev->ib_dev, "mlx4_%d"))
2851                goto err_diag_counters;
2852
2853        if (mlx4_ib_mad_init(ibdev))
2854                goto err_reg;
2855
2856        if (mlx4_ib_init_sriov(ibdev))
2857                goto err_mad;
2858
2859        if (!iboe->nb.notifier_call) {
2860                iboe->nb.notifier_call = mlx4_ib_netdev_event;
2861                err = register_netdevice_notifier(&iboe->nb);
2862                if (err) {
2863                        iboe->nb.notifier_call = NULL;
2864                        goto err_notif;
2865                }
2866        }
2867        if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2868                err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2869                if (err)
2870                        goto err_notif;
2871        }
2872
2873        ibdev->ib_active = true;
2874        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2875                devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2876                                         &ibdev->ib_dev);
2877
2878        if (mlx4_is_mfunc(ibdev->dev))
2879                init_pkeys(ibdev);
2880
2881        /* create paravirt contexts for any VFs which are active */
2882        if (mlx4_is_master(ibdev->dev)) {
2883                for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2884                        if (j == mlx4_master_func_num(ibdev->dev))
2885                                continue;
2886                        if (mlx4_is_slave_active(ibdev->dev, j))
2887                                do_slave_init(ibdev, j, 1);
2888                }
2889        }
2890        return ibdev;
2891
2892err_notif:
2893        if (ibdev->iboe.nb.notifier_call) {
2894                if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2895                        pr_warn("failure unregistering notifier\n");
2896                ibdev->iboe.nb.notifier_call = NULL;
2897        }
2898        flush_workqueue(wq);
2899
2900        mlx4_ib_close_sriov(ibdev);
2901
2902err_mad:
2903        mlx4_ib_mad_cleanup(ibdev);
2904
2905err_reg:
2906        ib_unregister_device(&ibdev->ib_dev);
2907
2908err_diag_counters:
2909        mlx4_ib_diag_cleanup(ibdev);
2910
2911err_steer_free_bitmap:
2912        kfree(ibdev->ib_uc_qpns_bitmap);
2913
2914err_steer_qp_release:
2915        mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2916                              ibdev->steer_qpn_count);
2917err_counter:
2918        for (i = 0; i < ibdev->num_ports; ++i)
2919                mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2920
2921err_map:
2922        mlx4_ib_free_eqs(dev, ibdev);
2923        iounmap(ibdev->uar_map);
2924
2925err_uar:
2926        mlx4_uar_free(dev, &ibdev->priv_uar);
2927
2928err_pd:
2929        mlx4_pd_free(dev, ibdev->priv_pdn);
2930
2931err_dealloc:
2932        ib_dealloc_device(&ibdev->ib_dev);
2933
2934        return NULL;
2935}
2936
2937int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2938{
2939        int offset;
2940
2941        WARN_ON(!dev->ib_uc_qpns_bitmap);
2942
2943        offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2944                                         dev->steer_qpn_count,
2945                                         get_count_order(count));
2946        if (offset < 0)
2947                return offset;
2948
2949        *qpn = dev->steer_qpn_base + offset;
2950        return 0;
2951}
2952
2953void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2954{
2955        if (!qpn ||
2956            dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2957                return;
2958
2959        if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2960                 qpn, dev->steer_qpn_base))
2961                /* not supposed to be here */
2962                return;
2963
2964        bitmap_release_region(dev->ib_uc_qpns_bitmap,
2965                              qpn - dev->steer_qpn_base,
2966                              get_count_order(count));
2967}
2968
2969int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2970                         int is_attach)
2971{
2972        int err;
2973        size_t flow_size;
2974        struct ib_flow_attr *flow = NULL;
2975        struct ib_flow_spec_ib *ib_spec;
2976
2977        if (is_attach) {
2978                flow_size = sizeof(struct ib_flow_attr) +
2979                            sizeof(struct ib_flow_spec_ib);
2980                flow = kzalloc(flow_size, GFP_KERNEL);
2981                if (!flow)
2982                        return -ENOMEM;
2983                flow->port = mqp->port;
2984                flow->num_of_specs = 1;
2985                flow->size = flow_size;
2986                ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2987                ib_spec->type = IB_FLOW_SPEC_IB;
2988                ib_spec->size = sizeof(struct ib_flow_spec_ib);
2989                /* Add an empty rule for IB L2 */
2990                memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2991
2992                err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2993                                            IB_FLOW_DOMAIN_NIC,
2994                                            MLX4_FS_REGULAR,
2995                                            &mqp->reg_id);
2996        } else {
2997                err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2998        }
2999        kfree(flow);
3000        return err;
3001}
3002
3003static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
3004{
3005        struct mlx4_ib_dev *ibdev = ibdev_ptr;
3006        int p;
3007        int i;
3008
3009        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
3010                devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
3011        ibdev->ib_active = false;
3012        flush_workqueue(wq);
3013
3014        if (ibdev->iboe.nb.notifier_call) {
3015                if (unregister_netdevice_notifier(&ibdev->iboe.nb))
3016                        pr_warn("failure unregistering notifier\n");
3017                ibdev->iboe.nb.notifier_call = NULL;
3018        }
3019
3020        mlx4_ib_close_sriov(ibdev);
3021        mlx4_ib_mad_cleanup(ibdev);
3022        ib_unregister_device(&ibdev->ib_dev);
3023        mlx4_ib_diag_cleanup(ibdev);
3024
3025        mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3026                              ibdev->steer_qpn_count);
3027        kfree(ibdev->ib_uc_qpns_bitmap);
3028
3029        iounmap(ibdev->uar_map);
3030        for (p = 0; p < ibdev->num_ports; ++p)
3031                mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3032
3033        mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
3034                mlx4_CLOSE_PORT(dev, p);
3035
3036        mlx4_ib_free_eqs(dev, ibdev);
3037
3038        mlx4_uar_free(dev, &ibdev->priv_uar);
3039        mlx4_pd_free(dev, ibdev->priv_pdn);
3040        ib_dealloc_device(&ibdev->ib_dev);
3041}
3042
3043static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3044{
3045        struct mlx4_ib_demux_work **dm = NULL;
3046        struct mlx4_dev *dev = ibdev->dev;
3047        int i;
3048        unsigned long flags;
3049        struct mlx4_active_ports actv_ports;
3050        unsigned int ports;
3051        unsigned int first_port;
3052
3053        if (!mlx4_is_master(dev))
3054                return;
3055
3056        actv_ports = mlx4_get_active_ports(dev, slave);
3057        ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3058        first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3059
3060        dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3061        if (!dm)
3062                return;
3063
3064        for (i = 0; i < ports; i++) {
3065                dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3066                if (!dm[i]) {
3067                        while (--i >= 0)
3068                                kfree(dm[i]);
3069                        goto out;
3070                }
3071                INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3072                dm[i]->port = first_port + i + 1;
3073                dm[i]->slave = slave;
3074                dm[i]->do_init = do_init;
3075                dm[i]->dev = ibdev;
3076        }
3077        /* initialize or tear down tunnel QPs for the slave */
3078        spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3079        if (!ibdev->sriov.is_going_down) {
3080                for (i = 0; i < ports; i++)
3081                        queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3082                spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3083        } else {
3084                spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3085                for (i = 0; i < ports; i++)
3086                        kfree(dm[i]);
3087        }
3088out:
3089        kfree(dm);
3090        return;
3091}
3092
3093static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3094{
3095        struct mlx4_ib_qp *mqp;
3096        unsigned long flags_qp;
3097        unsigned long flags_cq;
3098        struct mlx4_ib_cq *send_mcq, *recv_mcq;
3099        struct list_head    cq_notify_list;
3100        struct mlx4_cq *mcq;
3101        unsigned long flags;
3102
3103        pr_warn("mlx4_ib_handle_catas_error was started\n");
3104        INIT_LIST_HEAD(&cq_notify_list);
3105
3106        /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3107        spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3108
3109        list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3110                spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3111                if (mqp->sq.tail != mqp->sq.head) {
3112                        send_mcq = to_mcq(mqp->ibqp.send_cq);
3113                        spin_lock_irqsave(&send_mcq->lock, flags_cq);
3114                        if (send_mcq->mcq.comp &&
3115                            mqp->ibqp.send_cq->comp_handler) {
3116                                if (!send_mcq->mcq.reset_notify_added) {
3117                                        send_mcq->mcq.reset_notify_added = 1;
3118                                        list_add_tail(&send_mcq->mcq.reset_notify,
3119                                                      &cq_notify_list);
3120                                }
3121                        }
3122                        spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3123                }
3124                spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3125                /* Now, handle the QP's receive queue */
3126                spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3127                /* no handling is needed for SRQ */
3128                if (!mqp->ibqp.srq) {
3129                        if (mqp->rq.tail != mqp->rq.head) {
3130                                recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3131                                spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3132                                if (recv_mcq->mcq.comp &&
3133                                    mqp->ibqp.recv_cq->comp_handler) {
3134                                        if (!recv_mcq->mcq.reset_notify_added) {
3135                                                recv_mcq->mcq.reset_notify_added = 1;
3136                                                list_add_tail(&recv_mcq->mcq.reset_notify,
3137                                                              &cq_notify_list);
3138                                        }
3139                                }
3140                                spin_unlock_irqrestore(&recv_mcq->lock,
3141                                                       flags_cq);
3142                        }
3143                }
3144                spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3145        }
3146
3147        list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3148                mcq->comp(mcq);
3149        }
3150        spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3151        pr_warn("mlx4_ib_handle_catas_error ended\n");
3152}
3153
3154static void handle_bonded_port_state_event(struct work_struct *work)
3155{
3156        struct ib_event_work *ew =
3157                container_of(work, struct ib_event_work, work);
3158        struct mlx4_ib_dev *ibdev = ew->ib_dev;
3159        enum ib_port_state bonded_port_state = IB_PORT_NOP;
3160        int i;
3161        struct ib_event ibev;
3162
3163        kfree(ew);
3164        spin_lock_bh(&ibdev->iboe.lock);
3165        for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3166                struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3167                enum ib_port_state curr_port_state;
3168
3169                if (!curr_netdev)
3170                        continue;
3171
3172                curr_port_state =
3173                        (netif_running(curr_netdev) &&
3174                         netif_carrier_ok(curr_netdev)) ?
3175                        IB_PORT_ACTIVE : IB_PORT_DOWN;
3176
3177                bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3178                        curr_port_state : IB_PORT_ACTIVE;
3179        }
3180        spin_unlock_bh(&ibdev->iboe.lock);
3181
3182        ibev.device = &ibdev->ib_dev;
3183        ibev.element.port_num = 1;
3184        ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3185                IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3186
3187        ib_dispatch_event(&ibev);
3188}
3189
3190void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3191{
3192        u64 sl2vl;
3193        int err;
3194
3195        err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3196        if (err) {
3197                pr_err("Unable to get current sl to vl mapping for port %d.  Using all zeroes (%d)\n",
3198                       port, err);
3199                sl2vl = 0;
3200        }
3201        atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3202}
3203
3204static void ib_sl2vl_update_work(struct work_struct *work)
3205{
3206        struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3207        struct mlx4_ib_dev *mdev = ew->ib_dev;
3208        int port = ew->port;
3209
3210        mlx4_ib_sl2vl_update(mdev, port);
3211
3212        kfree(ew);
3213}
3214
3215void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3216                                     int port)
3217{
3218        struct ib_event_work *ew;
3219
3220        ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3221        if (ew) {
3222                INIT_WORK(&ew->work, ib_sl2vl_update_work);
3223                ew->port = port;
3224                ew->ib_dev = ibdev;
3225                queue_work(wq, &ew->work);
3226        }
3227}
3228
3229static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3230                          enum mlx4_dev_event event, unsigned long param)
3231{
3232        struct ib_event ibev;
3233        struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3234        struct mlx4_eqe *eqe = NULL;
3235        struct ib_event_work *ew;
3236        int p = 0;
3237
3238        if (mlx4_is_bonded(dev) &&
3239            ((event == MLX4_DEV_EVENT_PORT_UP) ||
3240            (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3241                ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3242                if (!ew)
3243                        return;
3244                INIT_WORK(&ew->work, handle_bonded_port_state_event);
3245                ew->ib_dev = ibdev;
3246                queue_work(wq, &ew->work);
3247                return;
3248        }
3249
3250        if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3251                eqe = (struct mlx4_eqe *)param;
3252        else
3253                p = (int) param;
3254
3255        switch (event) {
3256        case MLX4_DEV_EVENT_PORT_UP:
3257                if (p > ibdev->num_ports)
3258                        return;
3259                if (!mlx4_is_slave(dev) &&
3260                    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3261                        IB_LINK_LAYER_INFINIBAND) {
3262                        if (mlx4_is_master(dev))
3263                                mlx4_ib_invalidate_all_guid_record(ibdev, p);
3264                        if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3265                            !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3266                                mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3267                }
3268                ibev.event = IB_EVENT_PORT_ACTIVE;
3269                break;
3270
3271        case MLX4_DEV_EVENT_PORT_DOWN:
3272                if (p > ibdev->num_ports)
3273                        return;
3274                ibev.event = IB_EVENT_PORT_ERR;
3275                break;
3276
3277        case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3278                ibdev->ib_active = false;
3279                ibev.event = IB_EVENT_DEVICE_FATAL;
3280                mlx4_ib_handle_catas_error(ibdev);
3281                break;
3282
3283        case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3284                ew = kmalloc(sizeof *ew, GFP_ATOMIC);
3285                if (!ew)
3286                        break;
3287
3288                INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3289                memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3290                ew->ib_dev = ibdev;
3291                /* need to queue only for port owner, which uses GEN_EQE */
3292                if (mlx4_is_master(dev))
3293                        queue_work(wq, &ew->work);
3294                else
3295                        handle_port_mgmt_change_event(&ew->work);
3296                return;
3297
3298        case MLX4_DEV_EVENT_SLAVE_INIT:
3299                /* here, p is the slave id */
3300                do_slave_init(ibdev, p, 1);
3301                if (mlx4_is_master(dev)) {
3302                        int i;
3303
3304                        for (i = 1; i <= ibdev->num_ports; i++) {
3305                                if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3306                                        == IB_LINK_LAYER_INFINIBAND)
3307                                        mlx4_ib_slave_alias_guid_event(ibdev,
3308                                                                       p, i,
3309                                                                       1);
3310                        }
3311                }
3312                return;
3313
3314        case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3315                if (mlx4_is_master(dev)) {
3316                        int i;
3317
3318                        for (i = 1; i <= ibdev->num_ports; i++) {
3319                                if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3320                                        == IB_LINK_LAYER_INFINIBAND)
3321                                        mlx4_ib_slave_alias_guid_event(ibdev,
3322                                                                       p, i,
3323                                                                       0);
3324                        }
3325                }
3326                /* here, p is the slave id */
3327                do_slave_init(ibdev, p, 0);
3328                return;
3329
3330        default:
3331                return;
3332        }
3333
3334        ibev.device           = ibdev_ptr;
3335        ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3336
3337        ib_dispatch_event(&ibev);
3338}
3339
3340static struct mlx4_interface mlx4_ib_interface = {
3341        .add            = mlx4_ib_add,
3342        .remove         = mlx4_ib_remove,
3343        .event          = mlx4_ib_event,
3344        .protocol       = MLX4_PROT_IB_IPV6,
3345        .flags          = MLX4_INTFF_BONDING
3346};
3347
3348static int __init mlx4_ib_init(void)
3349{
3350        int err;
3351
3352        wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3353        if (!wq)
3354                return -ENOMEM;
3355
3356        err = mlx4_ib_mcg_init();
3357        if (err)
3358                goto clean_wq;
3359
3360        err = mlx4_register_interface(&mlx4_ib_interface);
3361        if (err)
3362                goto clean_mcg;
3363
3364        return 0;
3365
3366clean_mcg:
3367        mlx4_ib_mcg_destroy();
3368
3369clean_wq:
3370        destroy_workqueue(wq);
3371        return err;
3372}
3373
3374static void __exit mlx4_ib_cleanup(void)
3375{
3376        mlx4_unregister_interface(&mlx4_ib_interface);
3377        mlx4_ib_mcg_destroy();
3378        destroy_workqueue(wq);
3379}
3380
3381module_init(mlx4_ib_init);
3382module_exit(mlx4_ib_cleanup);
3383