linux/drivers/net/ethernet/mellanox/mlx4/fw.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/etherdevice.h>
  36#include <linux/mlx4/cmd.h>
  37#include <linux/module.h>
  38#include <linux/cache.h>
  39#include <linux/kernel.h>
  40#include <uapi/rdma/mlx4-abi.h>
  41
  42#include "fw.h"
  43#include "icm.h"
  44
  45enum {
  46        MLX4_COMMAND_INTERFACE_MIN_REV          = 2,
  47        MLX4_COMMAND_INTERFACE_MAX_REV          = 3,
  48        MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS    = 3,
  49};
  50
  51extern void __buggy_use_of_MLX4_GET(void);
  52extern void __buggy_use_of_MLX4_PUT(void);
  53
  54static bool enable_qos;
  55module_param(enable_qos, bool, 0444);
  56MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
  57
  58#define MLX4_GET(dest, source, offset)                                \
  59        do {                                                          \
  60                void *__p = (char *) (source) + (offset);             \
  61                __be64 val;                                           \
  62                switch (sizeof(dest)) {                               \
  63                case 1: (dest) = *(u8 *) __p;       break;            \
  64                case 2: (dest) = be16_to_cpup(__p); break;            \
  65                case 4: (dest) = be32_to_cpup(__p); break;            \
  66                case 8: val = get_unaligned((__be64 *)__p);           \
  67                        (dest) = be64_to_cpu(val);  break;            \
  68                default: __buggy_use_of_MLX4_GET();                   \
  69                }                                                     \
  70        } while (0)
  71
  72#define MLX4_PUT(dest, source, offset)                                \
  73        do {                                                          \
  74                void *__d = ((char *) (dest) + (offset));             \
  75                switch (sizeof(source)) {                             \
  76                case 1: *(u8 *) __d = (source);                break; \
  77                case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
  78                case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
  79                case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
  80                default: __buggy_use_of_MLX4_PUT();                   \
  81                }                                                     \
  82        } while (0)
  83
  84static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
  85{
  86        static const char *fname[] = {
  87                [ 0] = "RC transport",
  88                [ 1] = "UC transport",
  89                [ 2] = "UD transport",
  90                [ 3] = "XRC transport",
  91                [ 6] = "SRQ support",
  92                [ 7] = "IPoIB checksum offload",
  93                [ 8] = "P_Key violation counter",
  94                [ 9] = "Q_Key violation counter",
  95                [12] = "Dual Port Different Protocol (DPDP) support",
  96                [15] = "Big LSO headers",
  97                [16] = "MW support",
  98                [17] = "APM support",
  99                [18] = "Atomic ops support",
 100                [19] = "Raw multicast support",
 101                [20] = "Address vector port checking support",
 102                [21] = "UD multicast support",
 103                [30] = "IBoE support",
 104                [32] = "Unicast loopback support",
 105                [34] = "FCS header control",
 106                [37] = "Wake On LAN (port1) support",
 107                [38] = "Wake On LAN (port2) support",
 108                [40] = "UDP RSS support",
 109                [41] = "Unicast VEP steering support",
 110                [42] = "Multicast VEP steering support",
 111                [48] = "Counters support",
 112                [52] = "RSS IP fragments support",
 113                [53] = "Port ETS Scheduler support",
 114                [55] = "Port link type sensing support",
 115                [59] = "Port management change event support",
 116                [61] = "64 byte EQE support",
 117                [62] = "64 byte CQE support",
 118        };
 119        int i;
 120
 121        mlx4_dbg(dev, "DEV_CAP flags:\n");
 122        for (i = 0; i < ARRAY_SIZE(fname); ++i)
 123                if (fname[i] && (flags & (1LL << i)))
 124                        mlx4_dbg(dev, "    %s\n", fname[i]);
 125}
 126
 127static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
 128{
 129        static const char * const fname[] = {
 130                [0] = "RSS support",
 131                [1] = "RSS Toeplitz Hash Function support",
 132                [2] = "RSS XOR Hash Function support",
 133                [3] = "Device managed flow steering support",
 134                [4] = "Automatic MAC reassignment support",
 135                [5] = "Time stamping support",
 136                [6] = "VST (control vlan insertion/stripping) support",
 137                [7] = "FSM (MAC anti-spoofing) support",
 138                [8] = "Dynamic QP updates support",
 139                [9] = "Device managed flow steering IPoIB support",
 140                [10] = "TCP/IP offloads/flow-steering for VXLAN support",
 141                [11] = "MAD DEMUX (Secure-Host) support",
 142                [12] = "Large cache line (>64B) CQE stride support",
 143                [13] = "Large cache line (>64B) EQE stride support",
 144                [14] = "Ethernet protocol control support",
 145                [15] = "Ethernet Backplane autoneg support",
 146                [16] = "CONFIG DEV support",
 147                [17] = "Asymmetric EQs support",
 148                [18] = "More than 80 VFs support",
 149                [19] = "Performance optimized for limited rule configuration flow steering support",
 150                [20] = "Recoverable error events support",
 151                [21] = "Port Remap support",
 152                [22] = "QCN support",
 153                [23] = "QP rate limiting support",
 154                [24] = "Ethernet Flow control statistics support",
 155                [25] = "Granular QoS per VF support",
 156                [26] = "Port ETS Scheduler support",
 157                [27] = "Port beacon support",
 158                [28] = "RX-ALL support",
 159                [29] = "802.1ad offload support",
 160                [31] = "Modifying loopback source checks using UPDATE_QP support",
 161                [32] = "Loopback source checks support",
 162                [33] = "RoCEv2 support",
 163                [34] = "DMFS Sniffer support (UC & MC)",
 164                [35] = "Diag counters per port",
 165                [36] = "QinQ VST mode support",
 166                [37] = "sl to vl mapping table change event support",
 167                [38] = "user MAC support",
 168                [39] = "Report driver version to FW support",
 169        };
 170        int i;
 171
 172        for (i = 0; i < ARRAY_SIZE(fname); ++i)
 173                if (fname[i] && (flags & (1LL << i)))
 174                        mlx4_dbg(dev, "    %s\n", fname[i]);
 175}
 176
 177int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
 178{
 179        struct mlx4_cmd_mailbox *mailbox;
 180        u32 *inbox;
 181        int err = 0;
 182
 183#define MOD_STAT_CFG_IN_SIZE            0x100
 184
 185#define MOD_STAT_CFG_PG_SZ_M_OFFSET     0x002
 186#define MOD_STAT_CFG_PG_SZ_OFFSET       0x003
 187
 188        mailbox = mlx4_alloc_cmd_mailbox(dev);
 189        if (IS_ERR(mailbox))
 190                return PTR_ERR(mailbox);
 191        inbox = mailbox->buf;
 192
 193        MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
 194        MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
 195
 196        err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
 197                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 198
 199        mlx4_free_cmd_mailbox(dev, mailbox);
 200        return err;
 201}
 202
 203int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
 204{
 205        struct mlx4_cmd_mailbox *mailbox;
 206        u32 *outbox;
 207        u8 in_modifier;
 208        u8 field;
 209        u16 field16;
 210        int err;
 211
 212#define QUERY_FUNC_BUS_OFFSET                   0x00
 213#define QUERY_FUNC_DEVICE_OFFSET                0x01
 214#define QUERY_FUNC_FUNCTION_OFFSET              0x01
 215#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET     0x03
 216#define QUERY_FUNC_RSVD_EQS_OFFSET              0x04
 217#define QUERY_FUNC_MAX_EQ_OFFSET                0x06
 218#define QUERY_FUNC_RSVD_UARS_OFFSET             0x0b
 219
 220        mailbox = mlx4_alloc_cmd_mailbox(dev);
 221        if (IS_ERR(mailbox))
 222                return PTR_ERR(mailbox);
 223        outbox = mailbox->buf;
 224
 225        in_modifier = slave;
 226
 227        err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
 228                           MLX4_CMD_QUERY_FUNC,
 229                           MLX4_CMD_TIME_CLASS_A,
 230                           MLX4_CMD_NATIVE);
 231        if (err)
 232                goto out;
 233
 234        MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
 235        func->bus = field & 0xf;
 236        MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
 237        func->device = field & 0xf1;
 238        MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
 239        func->function = field & 0x7;
 240        MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
 241        func->physical_function = field & 0xf;
 242        MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
 243        func->rsvd_eqs = field16 & 0xffff;
 244        MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
 245        func->max_eq = field16 & 0xffff;
 246        MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
 247        func->rsvd_uars = field & 0x0f;
 248
 249        mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
 250                 func->bus, func->device, func->function, func->physical_function,
 251                 func->max_eq, func->rsvd_eqs, func->rsvd_uars);
 252
 253out:
 254        mlx4_free_cmd_mailbox(dev, mailbox);
 255        return err;
 256}
 257
 258static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port)
 259{
 260        struct mlx4_vport_oper_state *vp_oper;
 261        struct mlx4_vport_state *vp_admin;
 262        int err;
 263
 264        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 265        vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
 266
 267        if (vp_admin->default_vlan != vp_oper->state.default_vlan) {
 268                err = __mlx4_register_vlan(&priv->dev, port,
 269                                           vp_admin->default_vlan,
 270                                           &vp_oper->vlan_idx);
 271                if (err) {
 272                        vp_oper->vlan_idx = NO_INDX;
 273                        mlx4_warn(&priv->dev,
 274                                  "No vlan resources slave %d, port %d\n",
 275                                  slave, port);
 276                        return err;
 277                }
 278                mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
 279                         (int)(vp_oper->state.default_vlan),
 280                         vp_oper->vlan_idx, slave, port);
 281        }
 282        vp_oper->state.vlan_proto   = vp_admin->vlan_proto;
 283        vp_oper->state.default_vlan = vp_admin->default_vlan;
 284        vp_oper->state.default_qos  = vp_admin->default_qos;
 285
 286        return 0;
 287}
 288
 289static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port)
 290{
 291        struct mlx4_vport_oper_state *vp_oper;
 292        struct mlx4_slave_state *slave_state;
 293        struct mlx4_vport_state *vp_admin;
 294        int err;
 295
 296        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 297        vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
 298        slave_state = &priv->mfunc.master.slave_state[slave];
 299
 300        if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) ||
 301            (!slave_state->active))
 302                return 0;
 303
 304        if (vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
 305            vp_oper->state.default_vlan == vp_admin->default_vlan &&
 306            vp_oper->state.default_qos == vp_admin->default_qos)
 307                return 0;
 308
 309        if (!slave_state->vst_qinq_supported) {
 310                /* Warn and revert the request to set vst QinQ mode */
 311                vp_admin->vlan_proto   = vp_oper->state.vlan_proto;
 312                vp_admin->default_vlan = vp_oper->state.default_vlan;
 313                vp_admin->default_qos  = vp_oper->state.default_qos;
 314
 315                mlx4_warn(&priv->dev,
 316                          "Slave %d does not support VST QinQ mode\n", slave);
 317                return 0;
 318        }
 319
 320        err = mlx4_activate_vst_qinq(priv, slave, port);
 321        return err;
 322}
 323
 324int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 325                                struct mlx4_vhcr *vhcr,
 326                                struct mlx4_cmd_mailbox *inbox,
 327                                struct mlx4_cmd_mailbox *outbox,
 328                                struct mlx4_cmd_info *cmd)
 329{
 330        struct mlx4_priv *priv = mlx4_priv(dev);
 331        u8      field, port;
 332        u32     size, proxy_qp, qkey;
 333        int     err = 0;
 334        struct mlx4_func func;
 335
 336#define QUERY_FUNC_CAP_FLAGS_OFFSET             0x0
 337#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET         0x1
 338#define QUERY_FUNC_CAP_PF_BHVR_OFFSET           0x4
 339#define QUERY_FUNC_CAP_FMR_OFFSET               0x8
 340#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP      0x10
 341#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP      0x14
 342#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP     0x18
 343#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP     0x20
 344#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP     0x24
 345#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP     0x28
 346#define QUERY_FUNC_CAP_MAX_EQ_OFFSET            0x2c
 347#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET       0x30
 348#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET      0x48
 349
 350#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET          0x50
 351#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET          0x54
 352#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET         0x58
 353#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET         0x60
 354#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET         0x64
 355#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET         0x68
 356
 357#define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET       0x6c
 358
 359#define QUERY_FUNC_CAP_FMR_FLAG                 0x80
 360#define QUERY_FUNC_CAP_FLAG_RDMA                0x40
 361#define QUERY_FUNC_CAP_FLAG_ETH                 0x80
 362#define QUERY_FUNC_CAP_FLAG_QUOTAS              0x10
 363#define QUERY_FUNC_CAP_FLAG_RESD_LKEY           0x08
 364#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX       0x04
 365
 366#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG     (1UL << 31)
 367#define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG     (1UL << 30)
 368
 369/* when opcode modifier = 1 */
 370#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET         0x3
 371#define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET      0x4
 372#define QUERY_FUNC_CAP_FLAGS0_OFFSET            0x8
 373#define QUERY_FUNC_CAP_FLAGS1_OFFSET            0xc
 374
 375#define QUERY_FUNC_CAP_QP0_TUNNEL               0x10
 376#define QUERY_FUNC_CAP_QP0_PROXY                0x14
 377#define QUERY_FUNC_CAP_QP1_TUNNEL               0x18
 378#define QUERY_FUNC_CAP_QP1_PROXY                0x1c
 379#define QUERY_FUNC_CAP_PHYS_PORT_ID             0x28
 380
 381#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC         0x40
 382#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN        0x80
 383#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO                  0x10
 384#define QUERY_FUNC_CAP_VF_ENABLE_QP0            0x08
 385
 386#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
 387#define QUERY_FUNC_CAP_PHV_BIT                  0x40
 388#define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE     0x20
 389
 390#define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ        BIT(30)
 391#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31)
 392
 393        if (vhcr->op_modifier == 1) {
 394                struct mlx4_active_ports actv_ports =
 395                        mlx4_get_active_ports(dev, slave);
 396                int converted_port = mlx4_slave_convert_port(
 397                                dev, slave, vhcr->in_modifier);
 398                struct mlx4_vport_oper_state *vp_oper;
 399
 400                if (converted_port < 0)
 401                        return -EINVAL;
 402
 403                vhcr->in_modifier = converted_port;
 404                /* phys-port = logical-port */
 405                field = vhcr->in_modifier -
 406                        find_first_bit(actv_ports.ports, dev->caps.num_ports);
 407                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
 408
 409                port = vhcr->in_modifier;
 410                proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
 411
 412                /* Set nic_info bit to mark new fields support */
 413                field  = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
 414
 415                if (mlx4_vf_smi_enabled(dev, slave, port) &&
 416                    !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
 417                        field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
 418                        MLX4_PUT(outbox->buf, qkey,
 419                                 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
 420                }
 421                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
 422
 423                /* size is now the QP number */
 424                size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
 425                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
 426
 427                size += 2;
 428                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
 429
 430                MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
 431                proxy_qp += 2;
 432                MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
 433
 434                MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
 435                         QUERY_FUNC_CAP_PHYS_PORT_ID);
 436
 437                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 438                err = mlx4_handle_vst_qinq(priv, slave, port);
 439                if (err)
 440                        return err;
 441
 442                field = 0;
 443                if (dev->caps.phv_bit[port])
 444                        field |= QUERY_FUNC_CAP_PHV_BIT;
 445                if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
 446                        field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE;
 447                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
 448
 449        } else if (vhcr->op_modifier == 0) {
 450                struct mlx4_active_ports actv_ports =
 451                        mlx4_get_active_ports(dev, slave);
 452                struct mlx4_slave_state *slave_state =
 453                        &priv->mfunc.master.slave_state[slave];
 454
 455                /* enable rdma and ethernet interfaces, new quota locations,
 456                 * and reserved lkey
 457                 */
 458                field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
 459                         QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
 460                         QUERY_FUNC_CAP_FLAG_RESD_LKEY);
 461                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
 462
 463                field = min(
 464                        bitmap_weight(actv_ports.ports, dev->caps.num_ports),
 465                        dev->caps.num_ports);
 466                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
 467
 468                size = dev->caps.function_caps; /* set PF behaviours */
 469                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
 470
 471                field = 0; /* protected FMR support not available as yet */
 472                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
 473
 474                size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
 475                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
 476                size = dev->caps.num_qps;
 477                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
 478
 479                size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
 480                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
 481                size = dev->caps.num_srqs;
 482                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
 483
 484                size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
 485                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
 486                size = dev->caps.num_cqs;
 487                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
 488
 489                if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
 490                    mlx4_QUERY_FUNC(dev, &func, slave)) {
 491                        size = vhcr->in_modifier &
 492                                QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
 493                                dev->caps.num_eqs :
 494                                rounddown_pow_of_two(dev->caps.num_eqs);
 495                        MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
 496                        size = dev->caps.reserved_eqs;
 497                        MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
 498                } else {
 499                        size = vhcr->in_modifier &
 500                                QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
 501                                func.max_eq :
 502                                rounddown_pow_of_two(func.max_eq);
 503                        MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
 504                        size = func.rsvd_eqs;
 505                        MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
 506                }
 507
 508                size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
 509                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
 510                size = dev->caps.num_mpts;
 511                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
 512
 513                size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
 514                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
 515                size = dev->caps.num_mtts;
 516                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
 517
 518                size = dev->caps.num_mgms + dev->caps.num_amgms;
 519                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
 520                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
 521
 522                size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
 523                        QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
 524                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
 525
 526                size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
 527                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
 528
 529                if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ)
 530                        slave_state->vst_qinq_supported = true;
 531
 532        } else
 533                err = -EINVAL;
 534
 535        return err;
 536}
 537
 538int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
 539                        struct mlx4_func_cap *func_cap)
 540{
 541        struct mlx4_cmd_mailbox *mailbox;
 542        u32                     *outbox;
 543        u8                      field, op_modifier;
 544        u32                     size, qkey;
 545        int                     err = 0, quotas = 0;
 546        u32                     in_modifier;
 547        u32                     slave_caps;
 548
 549        op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
 550        slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ |
 551                QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
 552        in_modifier = op_modifier ? gen_or_port : slave_caps;
 553
 554        mailbox = mlx4_alloc_cmd_mailbox(dev);
 555        if (IS_ERR(mailbox))
 556                return PTR_ERR(mailbox);
 557
 558        err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
 559                           MLX4_CMD_QUERY_FUNC_CAP,
 560                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 561        if (err)
 562                goto out;
 563
 564        outbox = mailbox->buf;
 565
 566        if (!op_modifier) {
 567                MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
 568                if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
 569                        mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
 570                        err = -EPROTONOSUPPORT;
 571                        goto out;
 572                }
 573                func_cap->flags = field;
 574                quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
 575
 576                MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
 577                func_cap->num_ports = field;
 578
 579                MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
 580                func_cap->pf_context_behaviour = size;
 581
 582                if (quotas) {
 583                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
 584                        func_cap->qp_quota = size & 0xFFFFFF;
 585
 586                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
 587                        func_cap->srq_quota = size & 0xFFFFFF;
 588
 589                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
 590                        func_cap->cq_quota = size & 0xFFFFFF;
 591
 592                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
 593                        func_cap->mpt_quota = size & 0xFFFFFF;
 594
 595                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
 596                        func_cap->mtt_quota = size & 0xFFFFFF;
 597
 598                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
 599                        func_cap->mcg_quota = size & 0xFFFFFF;
 600
 601                } else {
 602                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
 603                        func_cap->qp_quota = size & 0xFFFFFF;
 604
 605                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
 606                        func_cap->srq_quota = size & 0xFFFFFF;
 607
 608                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
 609                        func_cap->cq_quota = size & 0xFFFFFF;
 610
 611                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
 612                        func_cap->mpt_quota = size & 0xFFFFFF;
 613
 614                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
 615                        func_cap->mtt_quota = size & 0xFFFFFF;
 616
 617                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
 618                        func_cap->mcg_quota = size & 0xFFFFFF;
 619                }
 620                MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
 621                func_cap->max_eq = size & 0xFFFFFF;
 622
 623                MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
 624                func_cap->reserved_eq = size & 0xFFFFFF;
 625
 626                if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
 627                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
 628                        func_cap->reserved_lkey = size;
 629                } else {
 630                        func_cap->reserved_lkey = 0;
 631                }
 632
 633                func_cap->extra_flags = 0;
 634
 635                /* Mailbox data from 0x6c and onward should only be treated if
 636                 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
 637                 */
 638                if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
 639                        MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
 640                        if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
 641                                func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
 642                        if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG)
 643                                func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP;
 644                }
 645
 646                goto out;
 647        }
 648
 649        /* logical port query */
 650        if (gen_or_port > dev->caps.num_ports) {
 651                err = -EINVAL;
 652                goto out;
 653        }
 654
 655        MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
 656        if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
 657                if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
 658                        mlx4_err(dev, "VLAN is enforced on this port\n");
 659                        err = -EPROTONOSUPPORT;
 660                        goto out;
 661                }
 662
 663                if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
 664                        mlx4_err(dev, "Force mac is enabled on this port\n");
 665                        err = -EPROTONOSUPPORT;
 666                        goto out;
 667                }
 668        } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
 669                MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
 670                if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
 671                        mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
 672                        err = -EPROTONOSUPPORT;
 673                        goto out;
 674                }
 675        }
 676
 677        MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
 678        func_cap->physical_port = field;
 679        if (func_cap->physical_port != gen_or_port) {
 680                err = -EINVAL;
 681                goto out;
 682        }
 683
 684        if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
 685                MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
 686                func_cap->spec_qps.qp0_qkey = qkey;
 687        } else {
 688                func_cap->spec_qps.qp0_qkey = 0;
 689        }
 690
 691        MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
 692        func_cap->spec_qps.qp0_tunnel = size & 0xFFFFFF;
 693
 694        MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
 695        func_cap->spec_qps.qp0_proxy = size & 0xFFFFFF;
 696
 697        MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
 698        func_cap->spec_qps.qp1_tunnel = size & 0xFFFFFF;
 699
 700        MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
 701        func_cap->spec_qps.qp1_proxy = size & 0xFFFFFF;
 702
 703        if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
 704                MLX4_GET(func_cap->phys_port_id, outbox,
 705                         QUERY_FUNC_CAP_PHYS_PORT_ID);
 706
 707        MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
 708
 709        /* All other resources are allocated by the master, but we still report
 710         * 'num' and 'reserved' capabilities as follows:
 711         * - num remains the maximum resource index
 712         * - 'num - reserved' is the total available objects of a resource, but
 713         *   resource indices may be less than 'reserved'
 714         * TODO: set per-resource quotas */
 715
 716out:
 717        mlx4_free_cmd_mailbox(dev, mailbox);
 718
 719        return err;
 720}
 721
 722static void disable_unsupported_roce_caps(void *buf);
 723
 724int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 725{
 726        struct mlx4_cmd_mailbox *mailbox;
 727        u32 *outbox;
 728        u8 field;
 729        u32 field32, flags, ext_flags;
 730        u16 size;
 731        u16 stat_rate;
 732        int err;
 733        int i;
 734
 735#define QUERY_DEV_CAP_OUT_SIZE                 0x100
 736#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET         0x10
 737#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET          0x11
 738#define QUERY_DEV_CAP_RSVD_QP_OFFSET            0x12
 739#define QUERY_DEV_CAP_MAX_QP_OFFSET             0x13
 740#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET           0x14
 741#define QUERY_DEV_CAP_MAX_SRQ_OFFSET            0x15
 742#define QUERY_DEV_CAP_RSVD_EEC_OFFSET           0x16
 743#define QUERY_DEV_CAP_MAX_EEC_OFFSET            0x17
 744#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET          0x19
 745#define QUERY_DEV_CAP_RSVD_CQ_OFFSET            0x1a
 746#define QUERY_DEV_CAP_MAX_CQ_OFFSET             0x1b
 747#define QUERY_DEV_CAP_MAX_MPT_OFFSET            0x1d
 748#define QUERY_DEV_CAP_RSVD_EQ_OFFSET            0x1e
 749#define QUERY_DEV_CAP_MAX_EQ_OFFSET             0x1f
 750#define QUERY_DEV_CAP_RSVD_MTT_OFFSET           0x20
 751#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET         0x21
 752#define QUERY_DEV_CAP_RSVD_MRW_OFFSET           0x22
 753#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET        0x23
 754#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET         0x26
 755#define QUERY_DEV_CAP_MAX_AV_OFFSET             0x27
 756#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET         0x29
 757#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET         0x2b
 758#define QUERY_DEV_CAP_MAX_GSO_OFFSET            0x2d
 759#define QUERY_DEV_CAP_RSS_OFFSET                0x2e
 760#define QUERY_DEV_CAP_MAX_RDMA_OFFSET           0x2f
 761#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET            0x33
 762#define QUERY_DEV_CAP_PORT_BEACON_OFFSET        0x34
 763#define QUERY_DEV_CAP_ACK_DELAY_OFFSET          0x35
 764#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET          0x36
 765#define QUERY_DEV_CAP_VL_PORT_OFFSET            0x37
 766#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET         0x38
 767#define QUERY_DEV_CAP_MAX_GID_OFFSET            0x3b
 768#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET       0x3c
 769#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET      0x3e
 770#define QUERY_DEV_CAP_MAX_PKEY_OFFSET           0x3f
 771#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET          0x40
 772#define QUERY_DEV_CAP_WOL_OFFSET                0x43
 773#define QUERY_DEV_CAP_FLAGS_OFFSET              0x44
 774#define QUERY_DEV_CAP_RSVD_UAR_OFFSET           0x48
 775#define QUERY_DEV_CAP_UAR_SZ_OFFSET             0x49
 776#define QUERY_DEV_CAP_PAGE_SZ_OFFSET            0x4b
 777#define QUERY_DEV_CAP_BF_OFFSET                 0x4c
 778#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET      0x4d
 779#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET   0x4e
 780#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET   0x4f
 781#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET          0x51
 782#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET     0x52
 783#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET          0x55
 784#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET     0x56
 785#define QUERY_DEV_CAP_USER_MAC_EN_OFFSET        0x5C
 786#define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET        0x5D
 787#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET         0x61
 788#define QUERY_DEV_CAP_RSVD_MCG_OFFSET           0x62
 789#define QUERY_DEV_CAP_MAX_MCG_OFFSET            0x63
 790#define QUERY_DEV_CAP_RSVD_PD_OFFSET            0x64
 791#define QUERY_DEV_CAP_MAX_PD_OFFSET             0x65
 792#define QUERY_DEV_CAP_RSVD_XRC_OFFSET           0x66
 793#define QUERY_DEV_CAP_MAX_XRC_OFFSET            0x67
 794#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET       0x68
 795#define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET    0x70
 796#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET        0x70
 797#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET        0x74
 798#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET     0x76
 799#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET       0x77
 800#define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET        0x78
 801#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE   0x7a
 802#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET        0x7b
 803#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET    0x80
 804#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET       0x82
 805#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET       0x84
 806#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET      0x86
 807#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET       0x88
 808#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET       0x8a
 809#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET       0x8c
 810#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET     0x8e
 811#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET       0x90
 812#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET     0x92
 813#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET         0x94
 814#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET         0x94
 815#define QUERY_DEV_CAP_PHV_EN_OFFSET             0x96
 816#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET          0x98
 817#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET         0xa0
 818#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET         0x9c
 819#define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT        0x9c
 820#define QUERY_DEV_CAP_FW_REASSIGN_MAC           0x9d
 821#define QUERY_DEV_CAP_VXLAN                     0x9e
 822#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET          0xb0
 823#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET    0xa8
 824#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET   0xac
 825#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET  0xcc
 826#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET  0xd0
 827#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET  0xd2
 828#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET      0xe4
 829
 830        dev_cap->flags2 = 0;
 831        mailbox = mlx4_alloc_cmd_mailbox(dev);
 832        if (IS_ERR(mailbox))
 833                return PTR_ERR(mailbox);
 834        outbox = mailbox->buf;
 835
 836        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
 837                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 838        if (err)
 839                goto out;
 840
 841        if (mlx4_is_mfunc(dev))
 842                disable_unsupported_roce_caps(outbox);
 843        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
 844        dev_cap->reserved_qps = 1 << (field & 0xf);
 845        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
 846        dev_cap->max_qps = 1 << (field & 0x1f);
 847        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
 848        dev_cap->reserved_srqs = 1 << (field >> 4);
 849        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
 850        dev_cap->max_srqs = 1 << (field & 0x1f);
 851        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
 852        dev_cap->max_cq_sz = 1 << field;
 853        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
 854        dev_cap->reserved_cqs = 1 << (field & 0xf);
 855        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
 856        dev_cap->max_cqs = 1 << (field & 0x1f);
 857        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
 858        dev_cap->max_mpts = 1 << (field & 0x3f);
 859        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
 860        dev_cap->reserved_eqs = 1 << (field & 0xf);
 861        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
 862        dev_cap->max_eqs = 1 << (field & 0xf);
 863        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
 864        dev_cap->reserved_mtts = 1 << (field >> 4);
 865        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
 866        dev_cap->reserved_mrws = 1 << (field & 0xf);
 867        MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
 868        dev_cap->num_sys_eqs = size & 0xfff;
 869        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
 870        dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
 871        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
 872        dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
 873        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
 874        field &= 0x1f;
 875        if (!field)
 876                dev_cap->max_gso_sz = 0;
 877        else
 878                dev_cap->max_gso_sz = 1 << field;
 879
 880        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
 881        if (field & 0x20)
 882                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
 883        if (field & 0x10)
 884                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
 885        field &= 0xf;
 886        if (field) {
 887                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
 888                dev_cap->max_rss_tbl_sz = 1 << field;
 889        } else
 890                dev_cap->max_rss_tbl_sz = 0;
 891        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
 892        dev_cap->max_rdma_global = 1 << (field & 0x3f);
 893        MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
 894        dev_cap->local_ca_ack_delay = field & 0x1f;
 895        MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
 896        dev_cap->num_ports = field & 0xf;
 897        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
 898        dev_cap->max_msg_sz = 1 << (field & 0x1f);
 899        MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
 900        if (field & 0x10)
 901                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
 902        MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
 903        if (field & 0x80)
 904                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
 905        dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
 906        if (field & 0x20)
 907                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER;
 908        MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
 909        if (field & 0x80)
 910                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
 911        MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
 912        if (field & 0x80)
 913                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
 914        MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
 915        dev_cap->fs_max_num_qp_per_entry = field;
 916        MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET);
 917        if (field & (1 << 5))
 918                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT;
 919        MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
 920        if (field & 0x1)
 921                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
 922        MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
 923        dev_cap->stat_rate_support = stat_rate;
 924        MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
 925        if (field & 0x80)
 926                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
 927        MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
 928        MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
 929        dev_cap->flags = flags | (u64)ext_flags << 32;
 930        MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
 931        dev_cap->wol_port[1] = !!(field & 0x20);
 932        dev_cap->wol_port[2] = !!(field & 0x40);
 933        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
 934        dev_cap->reserved_uars = field >> 4;
 935        MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
 936        dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
 937        MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
 938        dev_cap->min_page_sz = 1 << field;
 939
 940        MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
 941        if (field & 0x80) {
 942                MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
 943                dev_cap->bf_reg_size = 1 << (field & 0x1f);
 944                MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
 945                if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
 946                        field = 3;
 947                dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
 948        } else {
 949                dev_cap->bf_reg_size = 0;
 950        }
 951
 952        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
 953        dev_cap->max_sq_sg = field;
 954        MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
 955        dev_cap->max_sq_desc_sz = size;
 956
 957        MLX4_GET(field, outbox, QUERY_DEV_CAP_USER_MAC_EN_OFFSET);
 958        if (field & (1 << 2))
 959                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_USER_MAC_EN;
 960        MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET);
 961        if (field & 0x1)
 962                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP;
 963        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
 964        dev_cap->max_qp_per_mcg = 1 << field;
 965        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
 966        dev_cap->reserved_mgms = field & 0xf;
 967        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
 968        dev_cap->max_mcgs = 1 << field;
 969        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
 970        dev_cap->reserved_pds = field >> 4;
 971        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
 972        dev_cap->max_pds = 1 << (field & 0x3f);
 973        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
 974        dev_cap->reserved_xrcds = field >> 4;
 975        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
 976        dev_cap->max_xrcds = 1 << (field & 0x1f);
 977
 978        MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
 979        dev_cap->rdmarc_entry_sz = size;
 980        MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
 981        dev_cap->qpc_entry_sz = size;
 982        MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
 983        dev_cap->aux_entry_sz = size;
 984        MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
 985        dev_cap->altc_entry_sz = size;
 986        MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
 987        dev_cap->eqc_entry_sz = size;
 988        MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
 989        dev_cap->cqc_entry_sz = size;
 990        MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
 991        dev_cap->srq_entry_sz = size;
 992        MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
 993        dev_cap->cmpt_entry_sz = size;
 994        MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
 995        dev_cap->mtt_entry_sz = size;
 996        MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
 997        dev_cap->dmpt_entry_sz = size;
 998
 999        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
1000        dev_cap->max_srq_sz = 1 << field;
1001        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
1002        dev_cap->max_qp_sz = 1 << field;
1003        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
1004        dev_cap->resize_srq = field & 1;
1005        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
1006        dev_cap->max_rq_sg = field;
1007        MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
1008        dev_cap->max_rq_desc_sz = size;
1009        MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1010        if (field & (1 << 4))
1011                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP;
1012        if (field & (1 << 5))
1013                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
1014        if (field & (1 << 6))
1015                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
1016        if (field & (1 << 7))
1017                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
1018        MLX4_GET(dev_cap->bmme_flags, outbox,
1019                 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1020        if (dev_cap->bmme_flags & MLX4_FLAG_ROCE_V1_V2)
1021                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2;
1022        if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
1023                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
1024        MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1025        if (field & 0x20)
1026                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
1027        if (field & (1 << 2))
1028                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
1029        MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
1030        if (field & 0x80)
1031                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
1032        if (field & 0x40)
1033                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
1034
1035        MLX4_GET(dev_cap->reserved_lkey, outbox,
1036                 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
1037        MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
1038        if (field32 & (1 << 0))
1039                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
1040        if (field32 & (1 << 7))
1041                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
1042        if (field32 & (1 << 8))
1043                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW;
1044        MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT);
1045        if (field32 & (1 << 17))
1046                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT;
1047        MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
1048        if (field & 1<<6)
1049                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
1050        MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
1051        if (field & 1<<3)
1052                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
1053        if (field & (1 << 5))
1054                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
1055        MLX4_GET(dev_cap->max_icm_sz, outbox,
1056                 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
1057        if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1058                MLX4_GET(dev_cap->max_counters, outbox,
1059                         QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
1060
1061        MLX4_GET(field32, outbox,
1062                 QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
1063        if (field32 & (1 << 0))
1064                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
1065
1066        MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox,
1067                 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET);
1068        dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK;
1069        MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox,
1070                 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
1071        dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
1072
1073        MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
1074        dev_cap->rl_caps.num_rates = size;
1075        if (dev_cap->rl_caps.num_rates) {
1076                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT;
1077                MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET);
1078                dev_cap->rl_caps.max_val  = size & 0xfff;
1079                dev_cap->rl_caps.max_unit = size >> 14;
1080                MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET);
1081                dev_cap->rl_caps.min_val  = size & 0xfff;
1082                dev_cap->rl_caps.min_unit = size >> 14;
1083        }
1084
1085        MLX4_GET(dev_cap->health_buffer_addrs, outbox,
1086                 QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET);
1087
1088        MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1089        if (field32 & (1 << 16))
1090                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
1091        if (field32 & (1 << 18))
1092                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB;
1093        if (field32 & (1 << 19))
1094                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK;
1095        if (field32 & (1 << 26))
1096                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
1097        if (field32 & (1 << 20))
1098                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
1099        if (field32 & (1 << 21))
1100                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
1101
1102        for (i = 1; i <= dev_cap->num_ports; i++) {
1103                err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
1104                if (err)
1105                        goto out;
1106        }
1107
1108        /*
1109         * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
1110         * we can't use any EQs whose doorbell falls on that page,
1111         * even if the EQ itself isn't reserved.
1112         */
1113        if (dev_cap->num_sys_eqs == 0)
1114                dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
1115                                            dev_cap->reserved_eqs);
1116        else
1117                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
1118
1119out:
1120        mlx4_free_cmd_mailbox(dev, mailbox);
1121        return err;
1122}
1123
1124void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
1125{
1126        if (dev_cap->bf_reg_size > 0)
1127                mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
1128                         dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
1129        else
1130                mlx4_dbg(dev, "BlueFlame not available\n");
1131
1132        mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
1133                 dev_cap->bmme_flags, dev_cap->reserved_lkey);
1134        mlx4_dbg(dev, "Max ICM size %lld MB\n",
1135                 (unsigned long long) dev_cap->max_icm_sz >> 20);
1136        mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1137                 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
1138        mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1139                 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
1140        mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1141                 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
1142        mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
1143                 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
1144                 dev_cap->eqc_entry_sz);
1145        mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
1146                 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
1147        mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1148                 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
1149        mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
1150                 dev_cap->max_pds, dev_cap->reserved_mgms);
1151        mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1152                 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
1153        mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
1154                 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu,
1155                 dev_cap->port_cap[1].max_port_width);
1156        mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
1157                 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
1158        mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
1159                 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
1160        mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
1161        mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
1162        mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
1163        mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
1164                 dev_cap->dmfs_high_rate_qpn_base);
1165        mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
1166                 dev_cap->dmfs_high_rate_qpn_range);
1167
1168        if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) {
1169                struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps;
1170
1171                mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n",
1172                         rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val,
1173                         rl_caps->min_unit, rl_caps->min_val);
1174        }
1175
1176        dump_dev_cap_flags(dev, dev_cap->flags);
1177        dump_dev_cap_flags2(dev, dev_cap->flags2);
1178}
1179
1180int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
1181{
1182        struct mlx4_cmd_mailbox *mailbox;
1183        u32 *outbox;
1184        u8 field;
1185        u32 field32;
1186        int err;
1187
1188        mailbox = mlx4_alloc_cmd_mailbox(dev);
1189        if (IS_ERR(mailbox))
1190                return PTR_ERR(mailbox);
1191        outbox = mailbox->buf;
1192
1193        if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1194                err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1195                                   MLX4_CMD_TIME_CLASS_A,
1196                                   MLX4_CMD_NATIVE);
1197
1198                if (err)
1199                        goto out;
1200
1201                MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
1202                port_cap->max_vl           = field >> 4;
1203                MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
1204                port_cap->ib_mtu           = field >> 4;
1205                port_cap->max_port_width = field & 0xf;
1206                MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
1207                port_cap->max_gids         = 1 << (field & 0xf);
1208                MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
1209                port_cap->max_pkeys        = 1 << (field & 0xf);
1210        } else {
1211#define QUERY_PORT_SUPPORTED_TYPE_OFFSET        0x00
1212#define QUERY_PORT_MTU_OFFSET                   0x01
1213#define QUERY_PORT_ETH_MTU_OFFSET               0x02
1214#define QUERY_PORT_WIDTH_OFFSET                 0x06
1215#define QUERY_PORT_MAX_GID_PKEY_OFFSET          0x07
1216#define QUERY_PORT_MAX_MACVLAN_OFFSET           0x0a
1217#define QUERY_PORT_MAX_VL_OFFSET                0x0b
1218#define QUERY_PORT_MAC_OFFSET                   0x10
1219#define QUERY_PORT_TRANS_VENDOR_OFFSET          0x18
1220#define QUERY_PORT_WAVELENGTH_OFFSET            0x1c
1221#define QUERY_PORT_TRANS_CODE_OFFSET            0x20
1222
1223                err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT,
1224                                   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1225                if (err)
1226                        goto out;
1227
1228                MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1229                port_cap->link_state = (field & 0x80) >> 7;
1230                port_cap->supported_port_types = field & 3;
1231                port_cap->suggested_type = (field >> 3) & 1;
1232                port_cap->default_sense = (field >> 4) & 1;
1233                port_cap->dmfs_optimized_state = (field >> 5) & 1;
1234                MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
1235                port_cap->ib_mtu           = field & 0xf;
1236                MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
1237                port_cap->max_port_width = field & 0xf;
1238                MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
1239                port_cap->max_gids         = 1 << (field >> 4);
1240                port_cap->max_pkeys        = 1 << (field & 0xf);
1241                MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
1242                port_cap->max_vl           = field & 0xf;
1243                port_cap->max_tc_eth       = field >> 4;
1244                MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
1245                port_cap->log_max_macs  = field & 0xf;
1246                port_cap->log_max_vlans = field >> 4;
1247                MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET);
1248                MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET);
1249                MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
1250                port_cap->trans_type = field32 >> 24;
1251                port_cap->vendor_oui = field32 & 0xffffff;
1252                MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET);
1253                MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET);
1254        }
1255
1256out:
1257        mlx4_free_cmd_mailbox(dev, mailbox);
1258        return err;
1259}
1260
1261#define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28)
1262#define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
1263#define DEV_CAP_EXT_2_FLAG_80_VFS       (1 << 21)
1264#define DEV_CAP_EXT_2_FLAG_FSM          (1 << 20)
1265
1266int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1267                               struct mlx4_vhcr *vhcr,
1268                               struct mlx4_cmd_mailbox *inbox,
1269                               struct mlx4_cmd_mailbox *outbox,
1270                               struct mlx4_cmd_info *cmd)
1271{
1272        u64     flags;
1273        int     err = 0;
1274        u8      field;
1275        u16     field16;
1276        u32     bmme_flags, field32;
1277        int     real_port;
1278        int     slave_port;
1279        int     first_port;
1280        struct mlx4_active_ports actv_ports;
1281
1282        err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1283                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1284        if (err)
1285                return err;
1286
1287        disable_unsupported_roce_caps(outbox->buf);
1288        /* add port mng change event capability and disable mw type 1
1289         * unconditionally to slaves
1290         */
1291        MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1292        flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
1293        flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
1294        actv_ports = mlx4_get_active_ports(dev, slave);
1295        first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
1296        for (slave_port = 0, real_port = first_port;
1297             real_port < first_port +
1298             bitmap_weight(actv_ports.ports, dev->caps.num_ports);
1299             ++real_port, ++slave_port) {
1300                if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
1301                        flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
1302                else
1303                        flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1304        }
1305        for (; slave_port < dev->caps.num_ports; ++slave_port)
1306                flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1307
1308        /* Not exposing RSS IP fragments to guests */
1309        flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG;
1310        MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1311
1312        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
1313        field &= ~0x0F;
1314        field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
1315        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
1316
1317        /* For guests, disable timestamp */
1318        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1319        field &= 0x7f;
1320        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1321
1322        /* For guests, disable vxlan tunneling and QoS support */
1323        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
1324        field &= 0xd7;
1325        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
1326
1327        /* For guests, disable port BEACON */
1328        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
1329        field &= 0x7f;
1330        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
1331
1332        /* For guests, report Blueflame disabled */
1333        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
1334        field &= 0x7f;
1335        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1336
1337        /* For guests, disable mw type 2 and port remap*/
1338        MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1339        bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1340        bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
1341        MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1342
1343        /* turn off device-managed steering capability if not enabled */
1344        if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1345                MLX4_GET(field, outbox->buf,
1346                         QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1347                field &= 0x7f;
1348                MLX4_PUT(outbox->buf, field,
1349                         QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1350        }
1351
1352        /* turn off ipoib managed steering for guests */
1353        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1354        field &= ~0x80;
1355        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1356
1357        /* turn off host side virt features (VST, FSM, etc) for guests */
1358        MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1359        field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
1360                     DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS);
1361        MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1362
1363        /* turn off QCN for guests */
1364        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1365        field &= 0xfe;
1366        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1367
1368        /* turn off QP max-rate limiting for guests */
1369        field16 = 0;
1370        MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
1371
1372        /* turn off QoS per VF support for guests */
1373        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1374        field &= 0xef;
1375        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1376
1377        /* turn off ignore FCS feature for guests */
1378        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1379        field &= 0xfb;
1380        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1381
1382        return 0;
1383}
1384
1385static void disable_unsupported_roce_caps(void *buf)
1386{
1387        u32 flags;
1388
1389        MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1390        flags &= ~(1UL << 31);
1391        MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1392        MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1393        flags &= ~(1UL << 24);
1394        MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1395        MLX4_GET(flags, buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1396        flags &= ~(MLX4_FLAG_ROCE_V1_V2);
1397        MLX4_PUT(buf, flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1398}
1399
1400int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1401                            struct mlx4_vhcr *vhcr,
1402                            struct mlx4_cmd_mailbox *inbox,
1403                            struct mlx4_cmd_mailbox *outbox,
1404                            struct mlx4_cmd_info *cmd)
1405{
1406        struct mlx4_priv *priv = mlx4_priv(dev);
1407        u64 def_mac;
1408        u8 port_type;
1409        u16 short_field;
1410        int err;
1411        int admin_link_state;
1412        int port = mlx4_slave_convert_port(dev, slave,
1413                                           vhcr->in_modifier & 0xFF);
1414
1415#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
1416#define MLX4_PORT_LINK_UP_MASK          0x80
1417#define QUERY_PORT_CUR_MAX_PKEY_OFFSET  0x0c
1418#define QUERY_PORT_CUR_MAX_GID_OFFSET   0x0e
1419
1420        if (port < 0)
1421                return -EINVAL;
1422
1423        /* Protect against untrusted guests: enforce that this is the
1424         * QUERY_PORT general query.
1425         */
1426        if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF)
1427                return -EINVAL;
1428
1429        vhcr->in_modifier = port;
1430
1431        err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
1432                           MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1433                           MLX4_CMD_NATIVE);
1434
1435        if (!err && dev->caps.function != slave) {
1436                def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
1437                MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
1438
1439                /* get port type - currently only eth is enabled */
1440                MLX4_GET(port_type, outbox->buf,
1441                         QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1442
1443                /* No link sensing allowed */
1444                port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
1445                /* set port type to currently operating port type */
1446                port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
1447
1448                admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
1449                if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
1450                        port_type |= MLX4_PORT_LINK_UP_MASK;
1451                else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
1452                        port_type &= ~MLX4_PORT_LINK_UP_MASK;
1453                else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) {
1454                        int other_port = (port == 1) ? 2 : 1;
1455                        struct mlx4_port_cap port_cap;
1456
1457                        err = mlx4_QUERY_PORT(dev, other_port, &port_cap);
1458                        if (err)
1459                                goto out;
1460                        port_type |= (port_cap.link_state << 7);
1461                }
1462
1463                MLX4_PUT(outbox->buf, port_type,
1464                         QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1465
1466                if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1467                        short_field = mlx4_get_slave_num_gids(dev, slave, port);
1468                else
1469                        short_field = 1; /* slave max gids */
1470                MLX4_PUT(outbox->buf, short_field,
1471                         QUERY_PORT_CUR_MAX_GID_OFFSET);
1472
1473                short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1474                MLX4_PUT(outbox->buf, short_field,
1475                         QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1476        }
1477out:
1478        return err;
1479}
1480
1481int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1482                                    int *gid_tbl_len, int *pkey_tbl_len)
1483{
1484        struct mlx4_cmd_mailbox *mailbox;
1485        u32                     *outbox;
1486        u16                     field;
1487        int                     err;
1488
1489        mailbox = mlx4_alloc_cmd_mailbox(dev);
1490        if (IS_ERR(mailbox))
1491                return PTR_ERR(mailbox);
1492
1493        err =  mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1494                            MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1495                            MLX4_CMD_WRAPPED);
1496        if (err)
1497                goto out;
1498
1499        outbox = mailbox->buf;
1500
1501        MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
1502        *gid_tbl_len = field;
1503
1504        MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1505        *pkey_tbl_len = field;
1506
1507out:
1508        mlx4_free_cmd_mailbox(dev, mailbox);
1509        return err;
1510}
1511EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
1512
1513int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1514{
1515        struct mlx4_cmd_mailbox *mailbox;
1516        struct mlx4_icm_iter iter;
1517        __be64 *pages;
1518        int lg;
1519        int nent = 0;
1520        int i;
1521        int err = 0;
1522        int ts = 0, tc = 0;
1523
1524        mailbox = mlx4_alloc_cmd_mailbox(dev);
1525        if (IS_ERR(mailbox))
1526                return PTR_ERR(mailbox);
1527        pages = mailbox->buf;
1528
1529        for (mlx4_icm_first(icm, &iter);
1530             !mlx4_icm_last(&iter);
1531             mlx4_icm_next(&iter)) {
1532                /*
1533                 * We have to pass pages that are aligned to their
1534                 * size, so find the least significant 1 in the
1535                 * address or size and use that as our log2 size.
1536                 */
1537                lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1538                if (lg < MLX4_ICM_PAGE_SHIFT) {
1539                        mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1540                                  MLX4_ICM_PAGE_SIZE,
1541                                  (unsigned long long) mlx4_icm_addr(&iter),
1542                                  mlx4_icm_size(&iter));
1543                        err = -EINVAL;
1544                        goto out;
1545                }
1546
1547                for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1548                        if (virt != -1) {
1549                                pages[nent * 2] = cpu_to_be64(virt);
1550                                virt += 1ULL << lg;
1551                        }
1552
1553                        pages[nent * 2 + 1] =
1554                                cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1555                                            (lg - MLX4_ICM_PAGE_SHIFT));
1556                        ts += 1 << (lg - 10);
1557                        ++tc;
1558
1559                        if (++nent == MLX4_MAILBOX_SIZE / 16) {
1560                                err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1561                                                MLX4_CMD_TIME_CLASS_B,
1562                                                MLX4_CMD_NATIVE);
1563                                if (err)
1564                                        goto out;
1565                                nent = 0;
1566                        }
1567                }
1568        }
1569
1570        if (nent)
1571                err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1572                               MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1573        if (err)
1574                goto out;
1575
1576        switch (op) {
1577        case MLX4_CMD_MAP_FA:
1578                mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1579                break;
1580        case MLX4_CMD_MAP_ICM_AUX:
1581                mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1582                break;
1583        case MLX4_CMD_MAP_ICM:
1584                mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1585                         tc, ts, (unsigned long long) virt - (ts << 10));
1586                break;
1587        }
1588
1589out:
1590        mlx4_free_cmd_mailbox(dev, mailbox);
1591        return err;
1592}
1593
1594int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1595{
1596        return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1597}
1598
1599int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1600{
1601        return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1602                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1603}
1604
1605
1606int mlx4_RUN_FW(struct mlx4_dev *dev)
1607{
1608        return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1609                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1610}
1611
1612int mlx4_QUERY_FW(struct mlx4_dev *dev)
1613{
1614        struct mlx4_fw  *fw  = &mlx4_priv(dev)->fw;
1615        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1616        struct mlx4_cmd_mailbox *mailbox;
1617        u32 *outbox;
1618        int err = 0;
1619        u64 fw_ver;
1620        u16 cmd_if_rev;
1621        u8 lg;
1622
1623#define QUERY_FW_OUT_SIZE             0x100
1624#define QUERY_FW_VER_OFFSET            0x00
1625#define QUERY_FW_PPF_ID                0x09
1626#define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
1627#define QUERY_FW_MAX_CMD_OFFSET        0x0f
1628#define QUERY_FW_ERR_START_OFFSET      0x30
1629#define QUERY_FW_ERR_SIZE_OFFSET       0x38
1630#define QUERY_FW_ERR_BAR_OFFSET        0x3c
1631
1632#define QUERY_FW_SIZE_OFFSET           0x00
1633#define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
1634#define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
1635
1636#define QUERY_FW_COMM_BASE_OFFSET      0x40
1637#define QUERY_FW_COMM_BAR_OFFSET       0x48
1638
1639#define QUERY_FW_CLOCK_OFFSET          0x50
1640#define QUERY_FW_CLOCK_BAR             0x58
1641
1642        mailbox = mlx4_alloc_cmd_mailbox(dev);
1643        if (IS_ERR(mailbox))
1644                return PTR_ERR(mailbox);
1645        outbox = mailbox->buf;
1646
1647        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1648                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1649        if (err)
1650                goto out;
1651
1652        MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1653        /*
1654         * FW subminor version is at more significant bits than minor
1655         * version, so swap here.
1656         */
1657        dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1658                ((fw_ver & 0xffff0000ull) >> 16) |
1659                ((fw_ver & 0x0000ffffull) << 16);
1660
1661        MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1662        dev->caps.function = lg;
1663
1664        if (mlx4_is_slave(dev))
1665                goto out;
1666
1667
1668        MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1669        if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1670            cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1671                mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1672                         cmd_if_rev);
1673                mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1674                         (int) (dev->caps.fw_ver >> 32),
1675                         (int) (dev->caps.fw_ver >> 16) & 0xffff,
1676                         (int) dev->caps.fw_ver & 0xffff);
1677                mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1678                         MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1679                err = -ENODEV;
1680                goto out;
1681        }
1682
1683        if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1684                dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1685
1686        MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1687        cmd->max_cmds = 1 << lg;
1688
1689        mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1690                 (int) (dev->caps.fw_ver >> 32),
1691                 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1692                 (int) dev->caps.fw_ver & 0xffff,
1693                 cmd_if_rev, cmd->max_cmds);
1694
1695        MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1696        MLX4_GET(fw->catas_size,   outbox, QUERY_FW_ERR_SIZE_OFFSET);
1697        MLX4_GET(fw->catas_bar,    outbox, QUERY_FW_ERR_BAR_OFFSET);
1698        fw->catas_bar = (fw->catas_bar >> 6) * 2;
1699
1700        mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1701                 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1702
1703        MLX4_GET(fw->fw_pages,     outbox, QUERY_FW_SIZE_OFFSET);
1704        MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1705        MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1706        fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1707
1708        MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1709        MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
1710        fw->comm_bar = (fw->comm_bar >> 6) * 2;
1711        mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1712                 fw->comm_bar, fw->comm_base);
1713        mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1714
1715        MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1716        MLX4_GET(fw->clock_bar,    outbox, QUERY_FW_CLOCK_BAR);
1717        fw->clock_bar = (fw->clock_bar >> 6) * 2;
1718        mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1719                 fw->clock_bar, fw->clock_offset);
1720
1721        /*
1722         * Round up number of system pages needed in case
1723         * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1724         */
1725        fw->fw_pages =
1726                ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1727                (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1728
1729        mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1730                 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1731
1732out:
1733        mlx4_free_cmd_mailbox(dev, mailbox);
1734        return err;
1735}
1736
1737int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1738                          struct mlx4_vhcr *vhcr,
1739                          struct mlx4_cmd_mailbox *inbox,
1740                          struct mlx4_cmd_mailbox *outbox,
1741                          struct mlx4_cmd_info *cmd)
1742{
1743        u8 *outbuf;
1744        int err;
1745
1746        outbuf = outbox->buf;
1747        err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1748                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1749        if (err)
1750                return err;
1751
1752        /* for slaves, set pci PPF ID to invalid and zero out everything
1753         * else except FW version */
1754        outbuf[0] = outbuf[1] = 0;
1755        memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1756        outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1757
1758        return 0;
1759}
1760
1761static void get_board_id(void *vsd, char *board_id)
1762{
1763        int i;
1764
1765#define VSD_OFFSET_SIG1         0x00
1766#define VSD_OFFSET_SIG2         0xde
1767#define VSD_OFFSET_MLX_BOARD_ID 0xd0
1768#define VSD_OFFSET_TS_BOARD_ID  0x20
1769
1770#define VSD_SIGNATURE_TOPSPIN   0x5ad
1771
1772        memset(board_id, 0, MLX4_BOARD_ID_LEN);
1773
1774        if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1775            be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1776                strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1777        } else {
1778                /*
1779                 * The board ID is a string but the firmware byte
1780                 * swaps each 4-byte word before passing it back to
1781                 * us.  Therefore we need to swab it before printing.
1782                 */
1783                u32 *bid_u32 = (u32 *)board_id;
1784
1785                for (i = 0; i < 4; ++i) {
1786                        u32 *addr;
1787                        u32 val;
1788
1789                        addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
1790                        val = get_unaligned(addr);
1791                        val = swab32(val);
1792                        put_unaligned(val, &bid_u32[i]);
1793                }
1794        }
1795}
1796
1797int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1798{
1799        struct mlx4_cmd_mailbox *mailbox;
1800        u32 *outbox;
1801        int err;
1802
1803#define QUERY_ADAPTER_OUT_SIZE             0x100
1804#define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
1805#define QUERY_ADAPTER_VSD_OFFSET           0x20
1806
1807        mailbox = mlx4_alloc_cmd_mailbox(dev);
1808        if (IS_ERR(mailbox))
1809                return PTR_ERR(mailbox);
1810        outbox = mailbox->buf;
1811
1812        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1813                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1814        if (err)
1815                goto out;
1816
1817        MLX4_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
1818
1819        get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1820                     adapter->board_id);
1821
1822out:
1823        mlx4_free_cmd_mailbox(dev, mailbox);
1824        return err;
1825}
1826
1827int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1828{
1829        struct mlx4_cmd_mailbox *mailbox;
1830        __be32 *inbox;
1831        int err;
1832        static const u8 a0_dmfs_hw_steering[] =  {
1833                [MLX4_STEERING_DMFS_A0_DEFAULT]         = 0,
1834                [MLX4_STEERING_DMFS_A0_DYNAMIC]         = 1,
1835                [MLX4_STEERING_DMFS_A0_STATIC]          = 2,
1836                [MLX4_STEERING_DMFS_A0_DISABLE]         = 3
1837        };
1838
1839#define INIT_HCA_IN_SIZE                 0x200
1840#define INIT_HCA_VERSION_OFFSET          0x000
1841#define  INIT_HCA_VERSION                2
1842#define INIT_HCA_VXLAN_OFFSET            0x0c
1843#define INIT_HCA_CACHELINE_SZ_OFFSET     0x0e
1844#define INIT_HCA_FLAGS_OFFSET            0x014
1845#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1846#define INIT_HCA_QPC_OFFSET              0x020
1847#define  INIT_HCA_QPC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x10)
1848#define  INIT_HCA_LOG_QP_OFFSET          (INIT_HCA_QPC_OFFSET + 0x17)
1849#define  INIT_HCA_SRQC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x28)
1850#define  INIT_HCA_LOG_SRQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x2f)
1851#define  INIT_HCA_CQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x30)
1852#define  INIT_HCA_LOG_CQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x37)
1853#define  INIT_HCA_EQE_CQE_OFFSETS        (INIT_HCA_QPC_OFFSET + 0x38)
1854#define  INIT_HCA_EQE_CQE_STRIDE_OFFSET  (INIT_HCA_QPC_OFFSET + 0x3b)
1855#define  INIT_HCA_ALTC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x40)
1856#define  INIT_HCA_AUXC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x50)
1857#define  INIT_HCA_EQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x60)
1858#define  INIT_HCA_LOG_EQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x67)
1859#define INIT_HCA_NUM_SYS_EQS_OFFSET     (INIT_HCA_QPC_OFFSET + 0x6a)
1860#define  INIT_HCA_RDMARC_BASE_OFFSET     (INIT_HCA_QPC_OFFSET + 0x70)
1861#define  INIT_HCA_LOG_RD_OFFSET          (INIT_HCA_QPC_OFFSET + 0x77)
1862#define INIT_HCA_MCAST_OFFSET            0x0c0
1863#define  INIT_HCA_MC_BASE_OFFSET         (INIT_HCA_MCAST_OFFSET + 0x00)
1864#define  INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1865#define  INIT_HCA_LOG_MC_HASH_SZ_OFFSET  (INIT_HCA_MCAST_OFFSET + 0x16)
1866#define  INIT_HCA_UC_STEERING_OFFSET     (INIT_HCA_MCAST_OFFSET + 0x18)
1867#define  INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1868#define  INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN       0x6
1869#define  INIT_HCA_DRIVER_VERSION_OFFSET   0x140
1870#define  INIT_HCA_DRIVER_VERSION_SZ       0x40
1871#define  INIT_HCA_FS_PARAM_OFFSET         0x1d0
1872#define  INIT_HCA_FS_BASE_OFFSET          (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1873#define  INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1874#define  INIT_HCA_FS_A0_OFFSET            (INIT_HCA_FS_PARAM_OFFSET + 0x18)
1875#define  INIT_HCA_FS_LOG_TABLE_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1876#define  INIT_HCA_FS_ETH_BITS_OFFSET      (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1877#define  INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1878#define  INIT_HCA_FS_IB_BITS_OFFSET       (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1879#define  INIT_HCA_FS_IB_NUM_ADDRS_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1880#define INIT_HCA_TPT_OFFSET              0x0f0
1881#define  INIT_HCA_DMPT_BASE_OFFSET       (INIT_HCA_TPT_OFFSET + 0x00)
1882#define  INIT_HCA_TPT_MW_OFFSET          (INIT_HCA_TPT_OFFSET + 0x08)
1883#define  INIT_HCA_LOG_MPT_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x0b)
1884#define  INIT_HCA_MTT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x10)
1885#define  INIT_HCA_CMPT_BASE_OFFSET       (INIT_HCA_TPT_OFFSET + 0x18)
1886#define INIT_HCA_UAR_OFFSET              0x120
1887#define  INIT_HCA_LOG_UAR_SZ_OFFSET      (INIT_HCA_UAR_OFFSET + 0x0a)
1888#define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
1889
1890        mailbox = mlx4_alloc_cmd_mailbox(dev);
1891        if (IS_ERR(mailbox))
1892                return PTR_ERR(mailbox);
1893        inbox = mailbox->buf;
1894
1895        *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1896
1897        *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1898                ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
1899
1900#if defined(__LITTLE_ENDIAN)
1901        *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1902#elif defined(__BIG_ENDIAN)
1903        *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1904#else
1905#error Host endianness not defined
1906#endif
1907        /* Check port for UD address vector: */
1908        *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1909
1910        /* Enable IPoIB checksumming if we can: */
1911        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1912                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1913
1914        /* Enable QoS support if module parameter set */
1915        if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
1916                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1917
1918        /* enable counters */
1919        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1920                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1921
1922        /* Enable RSS spread to fragmented IP packets when supported */
1923        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
1924                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
1925
1926        /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1927        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1928                *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1929                dev->caps.eqe_size   = 64;
1930                dev->caps.eqe_factor = 1;
1931        } else {
1932                dev->caps.eqe_size   = 32;
1933                dev->caps.eqe_factor = 0;
1934        }
1935
1936        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1937                *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1938                dev->caps.cqe_size   = 64;
1939                dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1940        } else {
1941                dev->caps.cqe_size   = 32;
1942        }
1943
1944        /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1945        if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
1946            (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
1947                dev->caps.eqe_size = cache_line_size();
1948                dev->caps.cqe_size = cache_line_size();
1949                dev->caps.eqe_factor = 0;
1950                MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
1951                                      (ilog2(dev->caps.eqe_size) - 5)),
1952                         INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1953
1954                /* User still need to know to support CQE > 32B */
1955                dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1956        }
1957
1958        if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1959                *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1960
1961        if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
1962                u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4);
1963
1964                strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1);
1965                mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst);
1966        }
1967
1968        /* QPC/EEC/CQC/EQC/RDMARC attributes */
1969
1970        MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
1971        MLX4_PUT(inbox, param->log_num_qps,   INIT_HCA_LOG_QP_OFFSET);
1972        MLX4_PUT(inbox, param->srqc_base,     INIT_HCA_SRQC_BASE_OFFSET);
1973        MLX4_PUT(inbox, param->log_num_srqs,  INIT_HCA_LOG_SRQ_OFFSET);
1974        MLX4_PUT(inbox, param->cqc_base,      INIT_HCA_CQC_BASE_OFFSET);
1975        MLX4_PUT(inbox, param->log_num_cqs,   INIT_HCA_LOG_CQ_OFFSET);
1976        MLX4_PUT(inbox, param->altc_base,     INIT_HCA_ALTC_BASE_OFFSET);
1977        MLX4_PUT(inbox, param->auxc_base,     INIT_HCA_AUXC_BASE_OFFSET);
1978        MLX4_PUT(inbox, param->eqc_base,      INIT_HCA_EQC_BASE_OFFSET);
1979        MLX4_PUT(inbox, param->log_num_eqs,   INIT_HCA_LOG_EQ_OFFSET);
1980        MLX4_PUT(inbox, param->num_sys_eqs,   INIT_HCA_NUM_SYS_EQS_OFFSET);
1981        MLX4_PUT(inbox, param->rdmarc_base,   INIT_HCA_RDMARC_BASE_OFFSET);
1982        MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1983
1984        /* steering attributes */
1985        if (dev->caps.steering_mode ==
1986            MLX4_STEERING_MODE_DEVICE_MANAGED) {
1987                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1988                        cpu_to_be32(1 <<
1989                                    INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1990
1991                MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1992                MLX4_PUT(inbox, param->log_mc_entry_sz,
1993                         INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1994                MLX4_PUT(inbox, param->log_mc_table_sz,
1995                         INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1996                /* Enable Ethernet flow steering
1997                 * with udp unicast and tcp unicast
1998                 */
1999                if (dev->caps.dmfs_high_steer_mode !=
2000                    MLX4_STEERING_DMFS_A0_STATIC)
2001                        MLX4_PUT(inbox,
2002                                 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
2003                                 INIT_HCA_FS_ETH_BITS_OFFSET);
2004                MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
2005                         INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
2006                /* Enable IPoIB flow steering
2007                 * with udp unicast and tcp unicast
2008                 */
2009                MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
2010                         INIT_HCA_FS_IB_BITS_OFFSET);
2011                MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
2012                         INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
2013
2014                if (dev->caps.dmfs_high_steer_mode !=
2015                    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2016                        MLX4_PUT(inbox,
2017                                 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
2018                                       << 6)),
2019                                 INIT_HCA_FS_A0_OFFSET);
2020        } else {
2021                MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
2022                MLX4_PUT(inbox, param->log_mc_entry_sz,
2023                         INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
2024                MLX4_PUT(inbox, param->log_mc_hash_sz,
2025                         INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
2026                MLX4_PUT(inbox, param->log_mc_table_sz,
2027                         INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
2028                if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
2029                        MLX4_PUT(inbox, (u8) (1 << 3),
2030                                 INIT_HCA_UC_STEERING_OFFSET);
2031        }
2032
2033        /* TPT attributes */
2034
2035        MLX4_PUT(inbox, param->dmpt_base,  INIT_HCA_DMPT_BASE_OFFSET);
2036        MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
2037        MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
2038        MLX4_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
2039        MLX4_PUT(inbox, param->cmpt_base,  INIT_HCA_CMPT_BASE_OFFSET);
2040
2041        /* UAR attributes */
2042
2043        MLX4_PUT(inbox, param->uar_page_sz,     INIT_HCA_UAR_PAGE_SZ_OFFSET);
2044        MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
2045
2046        /* set parser VXLAN attributes */
2047        if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
2048                u8 parser_params = 0;
2049                MLX4_PUT(inbox, parser_params,  INIT_HCA_VXLAN_OFFSET);
2050        }
2051
2052        err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
2053                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2054
2055        if (err)
2056                mlx4_err(dev, "INIT_HCA returns %d\n", err);
2057
2058        mlx4_free_cmd_mailbox(dev, mailbox);
2059        return err;
2060}
2061
2062int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2063                   struct mlx4_init_hca_param *param)
2064{
2065        struct mlx4_cmd_mailbox *mailbox;
2066        __be32 *outbox;
2067        u64 qword_field;
2068        u32 dword_field;
2069        u16 word_field;
2070        u8 byte_field;
2071        int err;
2072        static const u8 a0_dmfs_query_hw_steering[] =  {
2073                [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
2074                [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
2075                [2] = MLX4_STEERING_DMFS_A0_STATIC,
2076                [3] = MLX4_STEERING_DMFS_A0_DISABLE
2077        };
2078
2079#define QUERY_HCA_GLOBAL_CAPS_OFFSET    0x04
2080#define QUERY_HCA_CORE_CLOCK_OFFSET     0x0c
2081
2082        mailbox = mlx4_alloc_cmd_mailbox(dev);
2083        if (IS_ERR(mailbox))
2084                return PTR_ERR(mailbox);
2085        outbox = mailbox->buf;
2086
2087        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2088                           MLX4_CMD_QUERY_HCA,
2089                           MLX4_CMD_TIME_CLASS_B,
2090                           !mlx4_is_slave(dev));
2091        if (err)
2092                goto out;
2093
2094        MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
2095        MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
2096
2097        /* QPC/EEC/CQC/EQC/RDMARC attributes */
2098
2099        MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
2100        param->qpc_base = qword_field & ~((u64)0x1f);
2101        MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
2102        param->log_num_qps = byte_field & 0x1f;
2103        MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
2104        param->srqc_base = qword_field & ~((u64)0x1f);
2105        MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
2106        param->log_num_srqs = byte_field & 0x1f;
2107        MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
2108        param->cqc_base = qword_field & ~((u64)0x1f);
2109        MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
2110        param->log_num_cqs = byte_field & 0x1f;
2111        MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
2112        param->altc_base = qword_field;
2113        MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
2114        param->auxc_base = qword_field;
2115        MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
2116        param->eqc_base = qword_field & ~((u64)0x1f);
2117        MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
2118        param->log_num_eqs = byte_field & 0x1f;
2119        MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
2120        param->num_sys_eqs = word_field & 0xfff;
2121        MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
2122        param->rdmarc_base = qword_field & ~((u64)0x1f);
2123        MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
2124        param->log_rd_per_qp = byte_field & 0x7;
2125
2126        MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
2127        if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
2128                param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2129        } else {
2130                MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
2131                if (byte_field & 0x8)
2132                        param->steering_mode = MLX4_STEERING_MODE_B0;
2133                else
2134                        param->steering_mode = MLX4_STEERING_MODE_A0;
2135        }
2136
2137        if (dword_field & (1 << 13))
2138                param->rss_ip_frags = 1;
2139
2140        /* steering attributes */
2141        if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2142                MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
2143                MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
2144                param->log_mc_entry_sz = byte_field & 0x1f;
2145                MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
2146                param->log_mc_table_sz = byte_field & 0x1f;
2147                MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
2148                param->dmfs_high_steer_mode =
2149                        a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
2150        } else {
2151                MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
2152                MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
2153                param->log_mc_entry_sz = byte_field & 0x1f;
2154                MLX4_GET(byte_field,  outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
2155                param->log_mc_hash_sz = byte_field & 0x1f;
2156                MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
2157                param->log_mc_table_sz = byte_field & 0x1f;
2158        }
2159
2160        /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
2161        MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
2162        if (byte_field & 0x20) /* 64-bytes eqe enabled */
2163                param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
2164        if (byte_field & 0x40) /* 64-bytes cqe enabled */
2165                param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
2166
2167        /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
2168        MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
2169        if (byte_field) {
2170                param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
2171                param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
2172                param->cqe_size = 1 << ((byte_field &
2173                                         MLX4_CQE_SIZE_MASK_STRIDE) + 5);
2174                param->eqe_size = 1 << (((byte_field &
2175                                          MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5);
2176        }
2177
2178        /* TPT attributes */
2179
2180        MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
2181        MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
2182        param->mw_enabled = byte_field >> 7;
2183        MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
2184        param->log_mpt_sz = byte_field & 0x3f;
2185        MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
2186        MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
2187
2188        /* UAR attributes */
2189
2190        MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
2191        MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
2192        param->log_uar_sz = byte_field & 0xf;
2193
2194        /* phv_check enable */
2195        MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
2196        if (byte_field & 0x2)
2197                param->phv_check_en = 1;
2198out:
2199        mlx4_free_cmd_mailbox(dev, mailbox);
2200
2201        return err;
2202}
2203
2204static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
2205{
2206        struct mlx4_cmd_mailbox *mailbox;
2207        __be32 *outbox;
2208        int err;
2209
2210        mailbox = mlx4_alloc_cmd_mailbox(dev);
2211        if (IS_ERR(mailbox)) {
2212                mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
2213                return PTR_ERR(mailbox);
2214        }
2215        outbox = mailbox->buf;
2216
2217        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2218                           MLX4_CMD_QUERY_HCA,
2219                           MLX4_CMD_TIME_CLASS_B,
2220                           !mlx4_is_slave(dev));
2221        if (err) {
2222                mlx4_warn(dev, "hca_core_clock update failed\n");
2223                goto out;
2224        }
2225
2226        MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
2227
2228out:
2229        mlx4_free_cmd_mailbox(dev, mailbox);
2230
2231        return err;
2232}
2233
2234/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
2235 * and real QP0 are active, so that the paravirtualized QP0 is ready
2236 * to operate */
2237static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
2238{
2239        struct mlx4_priv *priv = mlx4_priv(dev);
2240        /* irrelevant if not infiniband */
2241        if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
2242            priv->mfunc.master.qp0_state[port].qp0_active)
2243                return 1;
2244        return 0;
2245}
2246
2247int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
2248                           struct mlx4_vhcr *vhcr,
2249                           struct mlx4_cmd_mailbox *inbox,
2250                           struct mlx4_cmd_mailbox *outbox,
2251                           struct mlx4_cmd_info *cmd)
2252{
2253        struct mlx4_priv *priv = mlx4_priv(dev);
2254        int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2255        int err;
2256
2257        if (port < 0)
2258                return -EINVAL;
2259
2260        if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
2261                return 0;
2262
2263        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2264                /* Enable port only if it was previously disabled */
2265                if (!priv->mfunc.master.init_port_ref[port]) {
2266                        err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2267                                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2268                        if (err)
2269                                return err;
2270                }
2271                priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2272        } else {
2273                if (slave == mlx4_master_func_num(dev)) {
2274                        if (check_qp0_state(dev, slave, port) &&
2275                            !priv->mfunc.master.qp0_state[port].port_active) {
2276                                err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2277                                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2278                                if (err)
2279                                        return err;
2280                                priv->mfunc.master.qp0_state[port].port_active = 1;
2281                                priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2282                        }
2283                } else
2284                        priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2285        }
2286        ++priv->mfunc.master.init_port_ref[port];
2287        return 0;
2288}
2289
2290int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
2291{
2292        struct mlx4_cmd_mailbox *mailbox;
2293        u32 *inbox;
2294        int err;
2295        u32 flags;
2296        u16 field;
2297
2298        if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
2299#define INIT_PORT_IN_SIZE          256
2300#define INIT_PORT_FLAGS_OFFSET     0x00
2301#define INIT_PORT_FLAG_SIG         (1 << 18)
2302#define INIT_PORT_FLAG_NG          (1 << 17)
2303#define INIT_PORT_FLAG_G0          (1 << 16)
2304#define INIT_PORT_VL_SHIFT         4
2305#define INIT_PORT_PORT_WIDTH_SHIFT 8
2306#define INIT_PORT_MTU_OFFSET       0x04
2307#define INIT_PORT_MAX_GID_OFFSET   0x06
2308#define INIT_PORT_MAX_PKEY_OFFSET  0x0a
2309#define INIT_PORT_GUID0_OFFSET     0x10
2310#define INIT_PORT_NODE_GUID_OFFSET 0x18
2311#define INIT_PORT_SI_GUID_OFFSET   0x20
2312
2313                mailbox = mlx4_alloc_cmd_mailbox(dev);
2314                if (IS_ERR(mailbox))
2315                        return PTR_ERR(mailbox);
2316                inbox = mailbox->buf;
2317
2318                flags = 0;
2319                flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
2320                flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
2321                MLX4_PUT(inbox, flags,            INIT_PORT_FLAGS_OFFSET);
2322
2323                field = 128 << dev->caps.ib_mtu_cap[port];
2324                MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
2325                field = dev->caps.gid_table_len[port];
2326                MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
2327                field = dev->caps.pkey_table_len[port];
2328                MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
2329
2330                err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
2331                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2332
2333                mlx4_free_cmd_mailbox(dev, mailbox);
2334        } else
2335                err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2336                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2337
2338        if (!err)
2339                mlx4_hca_core_clock_update(dev);
2340
2341        return err;
2342}
2343EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
2344
2345int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2346                            struct mlx4_vhcr *vhcr,
2347                            struct mlx4_cmd_mailbox *inbox,
2348                            struct mlx4_cmd_mailbox *outbox,
2349                            struct mlx4_cmd_info *cmd)
2350{
2351        struct mlx4_priv *priv = mlx4_priv(dev);
2352        int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2353        int err;
2354
2355        if (port < 0)
2356                return -EINVAL;
2357
2358        if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
2359            (1 << port)))
2360                return 0;
2361
2362        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2363                if (priv->mfunc.master.init_port_ref[port] == 1) {
2364                        err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2365                                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2366                        if (err)
2367                                return err;
2368                }
2369                priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2370        } else {
2371                /* infiniband port */
2372                if (slave == mlx4_master_func_num(dev)) {
2373                        if (!priv->mfunc.master.qp0_state[port].qp0_active &&
2374                            priv->mfunc.master.qp0_state[port].port_active) {
2375                                err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2376                                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2377                                if (err)
2378                                        return err;
2379                                priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2380                                priv->mfunc.master.qp0_state[port].port_active = 0;
2381                        }
2382                } else
2383                        priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2384        }
2385        --priv->mfunc.master.init_port_ref[port];
2386        return 0;
2387}
2388
2389int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2390{
2391        return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2392                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2393}
2394EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
2395
2396int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2397{
2398        return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
2399                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2400}
2401
2402struct mlx4_config_dev {
2403        __be32  update_flags;
2404        __be32  rsvd1[3];
2405        __be16  vxlan_udp_dport;
2406        __be16  rsvd2;
2407        __be16  roce_v2_entropy;
2408        __be16  roce_v2_udp_dport;
2409        __be32  roce_flags;
2410        __be32  rsvd4[25];
2411        __be16  rsvd5;
2412        u8      rsvd6;
2413        u8      rx_checksum_val;
2414};
2415
2416#define MLX4_VXLAN_UDP_DPORT (1 << 0)
2417#define MLX4_ROCE_V2_UDP_DPORT BIT(3)
2418#define MLX4_DISABLE_RX_PORT BIT(18)
2419
2420static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2421{
2422        int err;
2423        struct mlx4_cmd_mailbox *mailbox;
2424
2425        mailbox = mlx4_alloc_cmd_mailbox(dev);
2426        if (IS_ERR(mailbox))
2427                return PTR_ERR(mailbox);
2428
2429        memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
2430
2431        err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
2432                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2433
2434        mlx4_free_cmd_mailbox(dev, mailbox);
2435        return err;
2436}
2437
2438static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2439{
2440        int err;
2441        struct mlx4_cmd_mailbox *mailbox;
2442
2443        mailbox = mlx4_alloc_cmd_mailbox(dev);
2444        if (IS_ERR(mailbox))
2445                return PTR_ERR(mailbox);
2446
2447        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
2448                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2449
2450        if (!err)
2451                memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
2452
2453        mlx4_free_cmd_mailbox(dev, mailbox);
2454        return err;
2455}
2456
2457/* Conversion between the HW values and the actual functionality.
2458 * The value represented by the array index,
2459 * and the functionality determined by the flags.
2460 */
2461static const u8 config_dev_csum_flags[] = {
2462        [0] =   0,
2463        [1] =   MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
2464        [2] =   MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP       |
2465                MLX4_RX_CSUM_MODE_L4,
2466        [3] =   MLX4_RX_CSUM_MODE_L4                    |
2467                MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP  |
2468                MLX4_RX_CSUM_MODE_MULTI_VLAN
2469};
2470
2471int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2472                              struct mlx4_config_dev_params *params)
2473{
2474        struct mlx4_config_dev config_dev = {0};
2475        int err;
2476        u8 csum_mask;
2477
2478#define CONFIG_DEV_RX_CSUM_MODE_MASK                    0x7
2479#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET        0
2480#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET        4
2481
2482        if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
2483                return -EOPNOTSUPP;
2484
2485        err = mlx4_CONFIG_DEV_get(dev, &config_dev);
2486        if (err)
2487                return err;
2488
2489        csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
2490                        CONFIG_DEV_RX_CSUM_MODE_MASK;
2491
2492        if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags))
2493                return -EINVAL;
2494        params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
2495
2496        csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
2497                        CONFIG_DEV_RX_CSUM_MODE_MASK;
2498
2499        if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags))
2500                return -EINVAL;
2501        params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
2502
2503        params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
2504
2505        return 0;
2506}
2507EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
2508
2509int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2510{
2511        struct mlx4_config_dev config_dev;
2512
2513        memset(&config_dev, 0, sizeof(config_dev));
2514        config_dev.update_flags    = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
2515        config_dev.vxlan_udp_dport = udp_port;
2516
2517        return mlx4_CONFIG_DEV_set(dev, &config_dev);
2518}
2519EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2520
2521#define CONFIG_DISABLE_RX_PORT BIT(15)
2522int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
2523{
2524        struct mlx4_config_dev config_dev;
2525
2526        memset(&config_dev, 0, sizeof(config_dev));
2527        config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
2528        if (dis)
2529                config_dev.roce_flags =
2530                        cpu_to_be32(CONFIG_DISABLE_RX_PORT);
2531
2532        return mlx4_CONFIG_DEV_set(dev, &config_dev);
2533}
2534
2535int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port)
2536{
2537        struct mlx4_config_dev config_dev;
2538
2539        memset(&config_dev, 0, sizeof(config_dev));
2540        config_dev.update_flags    = cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT);
2541        config_dev.roce_v2_udp_dport = cpu_to_be16(udp_port);
2542
2543        return mlx4_CONFIG_DEV_set(dev, &config_dev);
2544}
2545EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port);
2546
2547int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
2548{
2549        struct mlx4_cmd_mailbox *mailbox;
2550        struct {
2551                __be32 v_port1;
2552                __be32 v_port2;
2553        } *v2p;
2554        int err;
2555
2556        mailbox = mlx4_alloc_cmd_mailbox(dev);
2557        if (IS_ERR(mailbox))
2558                return -ENOMEM;
2559
2560        v2p = mailbox->buf;
2561        v2p->v_port1 = cpu_to_be32(port1);
2562        v2p->v_port2 = cpu_to_be32(port2);
2563
2564        err = mlx4_cmd(dev, mailbox->dma, 0,
2565                       MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
2566                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2567
2568        mlx4_free_cmd_mailbox(dev, mailbox);
2569        return err;
2570}
2571
2572
2573int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2574{
2575        int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
2576                               MLX4_CMD_SET_ICM_SIZE,
2577                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2578        if (ret)
2579                return ret;
2580
2581        /*
2582         * Round up number of system pages needed in case
2583         * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
2584         */
2585        *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
2586                (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
2587
2588        return 0;
2589}
2590
2591int mlx4_NOP(struct mlx4_dev *dev)
2592{
2593        /* Input modifier of 0x1f means "finish as soon as possible." */
2594        return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
2595                        MLX4_CMD_NATIVE);
2596}
2597
2598int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
2599                             const u32 offset[],
2600                             u32 value[], size_t array_len, u8 port)
2601{
2602        struct mlx4_cmd_mailbox *mailbox;
2603        u32 *outbox;
2604        size_t i;
2605        int ret;
2606
2607        mailbox = mlx4_alloc_cmd_mailbox(dev);
2608        if (IS_ERR(mailbox))
2609                return PTR_ERR(mailbox);
2610
2611        outbox = mailbox->buf;
2612
2613        ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier,
2614                           MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
2615                           MLX4_CMD_NATIVE);
2616        if (ret)
2617                goto out;
2618
2619        for (i = 0; i < array_len; i++) {
2620                if (offset[i] > MLX4_MAILBOX_SIZE) {
2621                        ret = -EINVAL;
2622                        goto out;
2623                }
2624
2625                MLX4_GET(value[i], outbox, offset[i]);
2626        }
2627
2628out:
2629        mlx4_free_cmd_mailbox(dev, mailbox);
2630        return ret;
2631}
2632EXPORT_SYMBOL(mlx4_query_diag_counters);
2633
2634int mlx4_get_phys_port_id(struct mlx4_dev *dev)
2635{
2636        u8 port;
2637        u32 *outbox;
2638        struct mlx4_cmd_mailbox *mailbox;
2639        u32 in_mod;
2640        u32 guid_hi, guid_lo;
2641        int err, ret = 0;
2642#define MOD_STAT_CFG_PORT_OFFSET 8
2643#define MOD_STAT_CFG_GUID_H      0X14
2644#define MOD_STAT_CFG_GUID_L      0X1c
2645
2646        mailbox = mlx4_alloc_cmd_mailbox(dev);
2647        if (IS_ERR(mailbox))
2648                return PTR_ERR(mailbox);
2649        outbox = mailbox->buf;
2650
2651        for (port = 1; port <= dev->caps.num_ports; port++) {
2652                in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
2653                err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
2654                                   MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2655                                   MLX4_CMD_NATIVE);
2656                if (err) {
2657                        mlx4_err(dev, "Fail to get port %d uplink guid\n",
2658                                 port);
2659                        ret = err;
2660                } else {
2661                        MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
2662                        MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
2663                        dev->caps.phys_port_id[port] = (u64)guid_lo |
2664                                                       (u64)guid_hi << 32;
2665                }
2666        }
2667        mlx4_free_cmd_mailbox(dev, mailbox);
2668        return ret;
2669}
2670
2671#define MLX4_WOL_SETUP_MODE (5 << 28)
2672int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
2673{
2674        u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2675
2676        return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
2677                            MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2678                            MLX4_CMD_NATIVE);
2679}
2680EXPORT_SYMBOL_GPL(mlx4_wol_read);
2681
2682int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
2683{
2684        u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2685
2686        return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
2687                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2688}
2689EXPORT_SYMBOL_GPL(mlx4_wol_write);
2690
2691enum {
2692        ADD_TO_MCG = 0x26,
2693};
2694
2695
2696void mlx4_opreq_action(struct work_struct *work)
2697{
2698        struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
2699                                              opreq_task);
2700        struct mlx4_dev *dev = &priv->dev;
2701        int num_tasks = atomic_read(&priv->opreq_count);
2702        struct mlx4_cmd_mailbox *mailbox;
2703        struct mlx4_mgm *mgm;
2704        u32 *outbox;
2705        u32 modifier;
2706        u16 token;
2707        u16 type;
2708        int err;
2709        u32 num_qps;
2710        struct mlx4_qp qp;
2711        int i;
2712        u8 rem_mcg;
2713        u8 prot;
2714
2715#define GET_OP_REQ_MODIFIER_OFFSET      0x08
2716#define GET_OP_REQ_TOKEN_OFFSET         0x14
2717#define GET_OP_REQ_TYPE_OFFSET          0x1a
2718#define GET_OP_REQ_DATA_OFFSET          0x20
2719
2720        mailbox = mlx4_alloc_cmd_mailbox(dev);
2721        if (IS_ERR(mailbox)) {
2722                mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
2723                return;
2724        }
2725        outbox = mailbox->buf;
2726
2727        while (num_tasks) {
2728                err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2729                                   MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2730                                   MLX4_CMD_NATIVE);
2731                if (err) {
2732                        mlx4_err(dev, "Failed to retreive required operation: %d\n",
2733                                 err);
2734                        return;
2735                }
2736                MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
2737                MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
2738                MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
2739                type &= 0xfff;
2740
2741                switch (type) {
2742                case ADD_TO_MCG:
2743                        if (dev->caps.steering_mode ==
2744                            MLX4_STEERING_MODE_DEVICE_MANAGED) {
2745                                mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2746                                err = EPERM;
2747                                break;
2748                        }
2749                        mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
2750                                                  GET_OP_REQ_DATA_OFFSET);
2751                        num_qps = be32_to_cpu(mgm->members_count) &
2752                                  MGM_QPN_MASK;
2753                        rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
2754                        prot = ((u8 *)(&mgm->members_count))[0] >> 6;
2755
2756                        for (i = 0; i < num_qps; i++) {
2757                                qp.qpn = be32_to_cpu(mgm->qp[i]);
2758                                if (rem_mcg)
2759                                        err = mlx4_multicast_detach(dev, &qp,
2760                                                                    mgm->gid,
2761                                                                    prot, 0);
2762                                else
2763                                        err = mlx4_multicast_attach(dev, &qp,
2764                                                                    mgm->gid,
2765                                                                    mgm->gid[5]
2766                                                                    , 0, prot,
2767                                                                    NULL);
2768                                if (err)
2769                                        break;
2770                        }
2771                        break;
2772                default:
2773                        mlx4_warn(dev, "Bad type for required operation\n");
2774                        err = EINVAL;
2775                        break;
2776                }
2777                err = mlx4_cmd(dev, 0, ((u32) err |
2778                                        (__force u32)cpu_to_be32(token) << 16),
2779                               1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2780                               MLX4_CMD_NATIVE);
2781                if (err) {
2782                        mlx4_err(dev, "Failed to acknowledge required request: %d\n",
2783                                 err);
2784                        goto out;
2785                }
2786                memset(outbox, 0, 0xffc);
2787                num_tasks = atomic_dec_return(&priv->opreq_count);
2788        }
2789
2790out:
2791        mlx4_free_cmd_mailbox(dev, mailbox);
2792}
2793
2794static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
2795                                          struct mlx4_cmd_mailbox *mailbox)
2796{
2797#define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET              0x10
2798#define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET          0x20
2799#define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET             0x40
2800#define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET     0x70
2801
2802        u32 set_attr_mask, getresp_attr_mask;
2803        u32 trap_attr_mask, traprepress_attr_mask;
2804
2805        MLX4_GET(set_attr_mask, mailbox->buf,
2806                 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
2807        mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
2808                 set_attr_mask);
2809
2810        MLX4_GET(getresp_attr_mask, mailbox->buf,
2811                 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
2812        mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
2813                 getresp_attr_mask);
2814
2815        MLX4_GET(trap_attr_mask, mailbox->buf,
2816                 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
2817        mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
2818                 trap_attr_mask);
2819
2820        MLX4_GET(traprepress_attr_mask, mailbox->buf,
2821                 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
2822        mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2823                 traprepress_attr_mask);
2824
2825        if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
2826            traprepress_attr_mask)
2827                return 1;
2828
2829        return 0;
2830}
2831
2832int mlx4_config_mad_demux(struct mlx4_dev *dev)
2833{
2834        struct mlx4_cmd_mailbox *mailbox;
2835        int err;
2836
2837        /* Check if mad_demux is supported */
2838        if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
2839                return 0;
2840
2841        mailbox = mlx4_alloc_cmd_mailbox(dev);
2842        if (IS_ERR(mailbox)) {
2843                mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
2844                return -ENOMEM;
2845        }
2846
2847        /* Query mad_demux to find out which MADs are handled by internal sma */
2848        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
2849                           MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
2850                           MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2851        if (err) {
2852                mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2853                          err);
2854                goto out;
2855        }
2856
2857        if (mlx4_check_smp_firewall_active(dev, mailbox))
2858                dev->flags |= MLX4_FLAG_SECURE_HOST;
2859
2860        /* Config mad_demux to handle all MADs returned by the query above */
2861        err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
2862                       MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
2863                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2864        if (err) {
2865                mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
2866                goto out;
2867        }
2868
2869        if (dev->flags & MLX4_FLAG_SECURE_HOST)
2870                mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
2871out:
2872        mlx4_free_cmd_mailbox(dev, mailbox);
2873        return err;
2874}
2875
2876/* Access Reg commands */
2877enum mlx4_access_reg_masks {
2878        MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
2879        MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
2880        MLX4_ACCESS_REG_LEN_MASK = 0x7ff
2881};
2882
2883struct mlx4_access_reg {
2884        __be16 constant1;
2885        u8 status;
2886        u8 resrvd1;
2887        __be16 reg_id;
2888        u8 method;
2889        u8 constant2;
2890        __be32 resrvd2[2];
2891        __be16 len_const;
2892        __be16 resrvd3;
2893#define MLX4_ACCESS_REG_HEADER_SIZE (20)
2894        u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
2895} __attribute__((__packed__));
2896
2897/**
2898 * mlx4_ACCESS_REG - Generic access reg command.
2899 * @dev: mlx4_dev.
2900 * @reg_id: register ID to access.
2901 * @method: Access method Read/Write.
2902 * @reg_len: register length to Read/Write in bytes.
2903 * @reg_data: reg_data pointer to Read/Write From/To.
2904 *
2905 * Access ConnectX registers FW command.
2906 * Returns 0 on success and copies outbox mlx4_access_reg data
2907 * field into reg_data or a negative error code.
2908 */
2909static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
2910                           enum mlx4_access_reg_method method,
2911                           u16 reg_len, void *reg_data)
2912{
2913        struct mlx4_cmd_mailbox *inbox, *outbox;
2914        struct mlx4_access_reg *inbuf, *outbuf;
2915        int err;
2916
2917        inbox = mlx4_alloc_cmd_mailbox(dev);
2918        if (IS_ERR(inbox))
2919                return PTR_ERR(inbox);
2920
2921        outbox = mlx4_alloc_cmd_mailbox(dev);
2922        if (IS_ERR(outbox)) {
2923                mlx4_free_cmd_mailbox(dev, inbox);
2924                return PTR_ERR(outbox);
2925        }
2926
2927        inbuf = inbox->buf;
2928        outbuf = outbox->buf;
2929
2930        inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
2931        inbuf->constant2 = 0x1;
2932        inbuf->reg_id = cpu_to_be16(reg_id);
2933        inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
2934
2935        reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
2936        inbuf->len_const =
2937                cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
2938                            ((0x3) << 12));
2939
2940        memcpy(inbuf->reg_data, reg_data, reg_len);
2941        err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
2942                           MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2943                           MLX4_CMD_WRAPPED);
2944        if (err)
2945                goto out;
2946
2947        if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
2948                err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
2949                mlx4_err(dev,
2950                         "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2951                         reg_id, err);
2952                goto out;
2953        }
2954
2955        memcpy(reg_data, outbuf->reg_data, reg_len);
2956out:
2957        mlx4_free_cmd_mailbox(dev, inbox);
2958        mlx4_free_cmd_mailbox(dev, outbox);
2959        return err;
2960}
2961
2962/* ConnectX registers IDs */
2963enum mlx4_reg_id {
2964        MLX4_REG_ID_PTYS = 0x5004,
2965};
2966
2967/**
2968 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2969 * register
2970 * @dev: mlx4_dev.
2971 * @method: Access method Read/Write.
2972 * @ptys_reg: PTYS register data pointer.
2973 *
2974 * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2975 * configuration
2976 * Returns 0 on success or a negative error code.
2977 */
2978int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
2979                         enum mlx4_access_reg_method method,
2980                         struct mlx4_ptys_reg *ptys_reg)
2981{
2982        return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
2983                               method, sizeof(*ptys_reg), ptys_reg);
2984}
2985EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
2986
2987int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
2988                            struct mlx4_vhcr *vhcr,
2989                            struct mlx4_cmd_mailbox *inbox,
2990                            struct mlx4_cmd_mailbox *outbox,
2991                            struct mlx4_cmd_info *cmd)
2992{
2993        struct mlx4_access_reg *inbuf = inbox->buf;
2994        u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
2995        u16 reg_id = be16_to_cpu(inbuf->reg_id);
2996
2997        if (slave != mlx4_master_func_num(dev) &&
2998            method == MLX4_ACCESS_REG_WRITE)
2999                return -EPERM;
3000
3001        if (reg_id == MLX4_REG_ID_PTYS) {
3002                struct mlx4_ptys_reg *ptys_reg =
3003                        (struct mlx4_ptys_reg *)inbuf->reg_data;
3004
3005                ptys_reg->local_port =
3006                        mlx4_slave_convert_port(dev, slave,
3007                                                ptys_reg->local_port);
3008        }
3009
3010        return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
3011                            0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
3012                            MLX4_CMD_NATIVE);
3013}
3014
3015static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
3016{
3017#define SET_PORT_GEN_PHV_VALID  0x10
3018#define SET_PORT_GEN_PHV_EN     0x80
3019
3020        struct mlx4_cmd_mailbox *mailbox;
3021        struct mlx4_set_port_general_context *context;
3022        u32 in_mod;
3023        int err;
3024
3025        mailbox = mlx4_alloc_cmd_mailbox(dev);
3026        if (IS_ERR(mailbox))
3027                return PTR_ERR(mailbox);
3028        context = mailbox->buf;
3029
3030        context->flags2 |=  SET_PORT_GEN_PHV_VALID;
3031        if (phv_bit)
3032                context->phv_en |=  SET_PORT_GEN_PHV_EN;
3033
3034        in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
3035        err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
3036                       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
3037                       MLX4_CMD_NATIVE);
3038
3039        mlx4_free_cmd_mailbox(dev, mailbox);
3040        return err;
3041}
3042
3043int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
3044{
3045        int err;
3046        struct mlx4_func_cap func_cap;
3047
3048        memset(&func_cap, 0, sizeof(func_cap));
3049        err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
3050        if (!err)
3051                *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT;
3052        return err;
3053}
3054EXPORT_SYMBOL(get_phv_bit);
3055
3056int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
3057{
3058        int ret;
3059
3060        if (mlx4_is_slave(dev))
3061                return -EPERM;
3062
3063        if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3064            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3065                ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
3066                if (!ret)
3067                        dev->caps.phv_bit[port] = new_val;
3068                return ret;
3069        }
3070
3071        return -EOPNOTSUPP;
3072}
3073EXPORT_SYMBOL(set_phv_bit);
3074
3075int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
3076                                      bool *vlan_offload_disabled)
3077{
3078        struct mlx4_func_cap func_cap;
3079        int err;
3080
3081        memset(&func_cap, 0, sizeof(func_cap));
3082        err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
3083        if (!err)
3084                *vlan_offload_disabled =
3085                        !!(func_cap.flags0 &
3086                           QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE);
3087        return err;
3088}
3089EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled);
3090
3091void mlx4_replace_zero_macs(struct mlx4_dev *dev)
3092{
3093        int i;
3094        u8 mac_addr[ETH_ALEN];
3095
3096        dev->port_random_macs = 0;
3097        for (i = 1; i <= dev->caps.num_ports; ++i)
3098                if (!dev->caps.def_mac[i] &&
3099                    dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
3100                        eth_random_addr(mac_addr);
3101                        dev->port_random_macs |= 1 << i;
3102                        dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
3103                }
3104}
3105EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
3106