linux/drivers/infiniband/hw/mlx5/mad.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/mlx5/cmd.h>
  34#include <linux/mlx5/vport.h>
  35#include <rdma/ib_mad.h>
  36#include <rdma/ib_smi.h>
  37#include <rdma/ib_pma.h>
  38#include "mlx5_ib.h"
  39
  40enum {
  41        MLX5_IB_VENDOR_CLASS1 = 0x9,
  42        MLX5_IB_VENDOR_CLASS2 = 0xa
  43};
  44
  45int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
  46                 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  47                 const void *in_mad, void *response_mad)
  48{
  49        u8 op_modifier = 0;
  50
  51        /* Key check traps can't be generated unless we have in_wc to
  52         * tell us where to send the trap.
  53         */
  54        if (ignore_mkey || !in_wc)
  55                op_modifier |= 0x1;
  56        if (ignore_bkey || !in_wc)
  57                op_modifier |= 0x2;
  58
  59        return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
  60}
  61
  62static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  63                       const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  64                       const struct ib_mad *in_mad, struct ib_mad *out_mad)
  65{
  66        u16 slid;
  67        int err;
  68
  69        slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
  70
  71        if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
  72                return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
  73
  74        if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  75            in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
  76                if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
  77                    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
  78                    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
  79                        return IB_MAD_RESULT_SUCCESS;
  80
  81                /* Don't process SMInfo queries -- the SMA can't handle them.
  82                 */
  83                if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
  84                        return IB_MAD_RESULT_SUCCESS;
  85        } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
  86                   in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1   ||
  87                   in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2   ||
  88                   in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
  89                if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
  90                    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
  91                        return IB_MAD_RESULT_SUCCESS;
  92        } else {
  93                return IB_MAD_RESULT_SUCCESS;
  94        }
  95
  96        err = mlx5_MAD_IFC(to_mdev(ibdev),
  97                           mad_flags & IB_MAD_IGNORE_MKEY,
  98                           mad_flags & IB_MAD_IGNORE_BKEY,
  99                           port_num, in_wc, in_grh, in_mad, out_mad);
 100        if (err)
 101                return IB_MAD_RESULT_FAILURE;
 102
 103        /* set return bit in status of directed route responses */
 104        if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
 105                out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
 106
 107        if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
 108                /* no response for trap repress */
 109                return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
 110
 111        return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
 112}
 113
 114static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
 115                               void *out)
 116{
 117#define MLX5_SUM_CNT(p, cntr1, cntr2)   \
 118        (MLX5_GET64(query_vport_counter_out, p, cntr1) + \
 119        MLX5_GET64(query_vport_counter_out, p, cntr2))
 120
 121        pma_cnt_ext->port_xmit_data =
 122                cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
 123                                         transmitted_ib_multicast.octets) >> 2);
 124        pma_cnt_ext->port_rcv_data =
 125                cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
 126                                         received_ib_multicast.octets) >> 2);
 127        pma_cnt_ext->port_xmit_packets =
 128                cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
 129                                         transmitted_ib_multicast.packets));
 130        pma_cnt_ext->port_rcv_packets =
 131                cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
 132                                         received_ib_multicast.packets));
 133        pma_cnt_ext->port_unicast_xmit_packets =
 134                MLX5_GET64_BE(query_vport_counter_out,
 135                              out, transmitted_ib_unicast.packets);
 136        pma_cnt_ext->port_unicast_rcv_packets =
 137                MLX5_GET64_BE(query_vport_counter_out,
 138                              out, received_ib_unicast.packets);
 139        pma_cnt_ext->port_multicast_xmit_packets =
 140                MLX5_GET64_BE(query_vport_counter_out,
 141                              out, transmitted_ib_multicast.packets);
 142        pma_cnt_ext->port_multicast_rcv_packets =
 143                MLX5_GET64_BE(query_vport_counter_out,
 144                              out, received_ib_multicast.packets);
 145}
 146
 147static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
 148                           void *out)
 149{
 150        /* Traffic counters will be reported in
 151         * their 64bit form via ib_pma_portcounters_ext by default.
 152         */
 153        void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
 154                                     counter_set);
 155
 156#define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) {               \
 157        counter_var = MLX5_GET_BE(typeof(counter_var),                  \
 158                                  ib_port_cntrs_grp_data_layout,        \
 159                                  out_pma, counter_name);               \
 160        }
 161
 162        MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
 163                             symbol_error_counter);
 164        MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
 165                             link_error_recovery_counter);
 166        MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
 167                             link_downed_counter);
 168        MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
 169                             port_rcv_errors);
 170        MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
 171                             port_rcv_remote_physical_errors);
 172        MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
 173                             port_rcv_switch_relay_errors);
 174        MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
 175                             port_xmit_discards);
 176        MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
 177                             port_xmit_constraint_errors);
 178        MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
 179                             port_rcv_constraint_errors);
 180        MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
 181                             link_overrun_errors);
 182        MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
 183                             vl_15_dropped);
 184}
 185
 186static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
 187                           const struct ib_mad *in_mad, struct ib_mad *out_mad)
 188{
 189        struct mlx5_ib_dev *dev = to_mdev(ibdev);
 190        int err;
 191        void *out_cnt;
 192
 193        /* Decalring support of extended counters */
 194        if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
 195                struct ib_class_port_info cpi = {};
 196
 197                cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
 198                memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
 199                return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
 200        }
 201
 202        if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
 203                struct ib_pma_portcounters_ext *pma_cnt_ext =
 204                        (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
 205                int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
 206
 207                out_cnt = mlx5_vzalloc(sz);
 208                if (!out_cnt)
 209                        return IB_MAD_RESULT_FAILURE;
 210
 211                err = mlx5_core_query_vport_counter(dev->mdev, 0, 0,
 212                                                    port_num, out_cnt, sz);
 213                if (!err)
 214                        pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
 215        } else {
 216                struct ib_pma_portcounters *pma_cnt =
 217                        (struct ib_pma_portcounters *)(out_mad->data + 40);
 218                int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 219
 220                out_cnt = mlx5_vzalloc(sz);
 221                if (!out_cnt)
 222                        return IB_MAD_RESULT_FAILURE;
 223
 224                err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num,
 225                                               out_cnt, sz);
 226                if (!err)
 227                        pma_cnt_assign(pma_cnt, out_cnt);
 228                }
 229
 230        kvfree(out_cnt);
 231        if (err)
 232                return IB_MAD_RESULT_FAILURE;
 233
 234        return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
 235}
 236
 237int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 238                        const struct ib_wc *in_wc, const struct ib_grh *in_grh,
 239                        const struct ib_mad_hdr *in, size_t in_mad_size,
 240                        struct ib_mad_hdr *out, size_t *out_mad_size,
 241                        u16 *out_mad_pkey_index)
 242{
 243        struct mlx5_ib_dev *dev = to_mdev(ibdev);
 244        struct mlx5_core_dev *mdev = dev->mdev;
 245        const struct ib_mad *in_mad = (const struct ib_mad *)in;
 246        struct ib_mad *out_mad = (struct ib_mad *)out;
 247
 248        if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
 249                         *out_mad_size != sizeof(*out_mad)))
 250                return IB_MAD_RESULT_FAILURE;
 251
 252        memset(out_mad->data, 0, sizeof(out_mad->data));
 253
 254        if (MLX5_CAP_GEN(mdev, vport_counters) &&
 255            in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
 256            in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
 257                return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
 258        } else {
 259                return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
 260                                   in_mad, out_mad);
 261        }
 262}
 263
 264int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
 265{
 266        struct ib_smp *in_mad  = NULL;
 267        struct ib_smp *out_mad = NULL;
 268        int err = -ENOMEM;
 269        u16 packet_error;
 270
 271        in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
 272        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 273        if (!in_mad || !out_mad)
 274                goto out;
 275
 276        init_query_mad(in_mad);
 277        in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
 278        in_mad->attr_mod = cpu_to_be32(port);
 279
 280        err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
 281
 282        packet_error = be16_to_cpu(out_mad->status);
 283
 284        dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
 285                MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
 286
 287out:
 288        kfree(in_mad);
 289        kfree(out_mad);
 290        return err;
 291}
 292
 293int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
 294                                          struct ib_smp *out_mad)
 295{
 296        struct ib_smp *in_mad = NULL;
 297        int err = -ENOMEM;
 298
 299        in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
 300        if (!in_mad)
 301                return -ENOMEM;
 302
 303        init_query_mad(in_mad);
 304        in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
 305
 306        err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
 307                           out_mad);
 308
 309        kfree(in_mad);
 310        return err;
 311}
 312
 313int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
 314                                         __be64 *sys_image_guid)
 315{
 316        struct ib_smp *out_mad = NULL;
 317        int err = -ENOMEM;
 318
 319        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 320        if (!out_mad)
 321                return -ENOMEM;
 322
 323        err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
 324        if (err)
 325                goto out;
 326
 327        memcpy(sys_image_guid, out_mad->data + 4, 8);
 328
 329out:
 330        kfree(out_mad);
 331
 332        return err;
 333}
 334
 335int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
 336                                 u16 *max_pkeys)
 337{
 338        struct ib_smp *out_mad = NULL;
 339        int err = -ENOMEM;
 340
 341        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 342        if (!out_mad)
 343                return -ENOMEM;
 344
 345        err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
 346        if (err)
 347                goto out;
 348
 349        *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
 350
 351out:
 352        kfree(out_mad);
 353
 354        return err;
 355}
 356
 357int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
 358                                 u32 *vendor_id)
 359{
 360        struct ib_smp *out_mad = NULL;
 361        int err = -ENOMEM;
 362
 363        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 364        if (!out_mad)
 365                return -ENOMEM;
 366
 367        err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
 368        if (err)
 369                goto out;
 370
 371        *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
 372
 373out:
 374        kfree(out_mad);
 375
 376        return err;
 377}
 378
 379int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
 380{
 381        struct ib_smp *in_mad  = NULL;
 382        struct ib_smp *out_mad = NULL;
 383        int err = -ENOMEM;
 384
 385        in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
 386        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 387        if (!in_mad || !out_mad)
 388                goto out;
 389
 390        init_query_mad(in_mad);
 391        in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
 392
 393        err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
 394        if (err)
 395                goto out;
 396
 397        memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
 398out:
 399        kfree(in_mad);
 400        kfree(out_mad);
 401        return err;
 402}
 403
 404int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
 405{
 406        struct ib_smp *in_mad  = NULL;
 407        struct ib_smp *out_mad = NULL;
 408        int err = -ENOMEM;
 409
 410        in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
 411        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 412        if (!in_mad || !out_mad)
 413                goto out;
 414
 415        init_query_mad(in_mad);
 416        in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
 417
 418        err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
 419        if (err)
 420                goto out;
 421
 422        memcpy(node_guid, out_mad->data + 12, 8);
 423out:
 424        kfree(in_mad);
 425        kfree(out_mad);
 426        return err;
 427}
 428
 429int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
 430                            u16 *pkey)
 431{
 432        struct ib_smp *in_mad  = NULL;
 433        struct ib_smp *out_mad = NULL;
 434        int err = -ENOMEM;
 435
 436        in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
 437        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 438        if (!in_mad || !out_mad)
 439                goto out;
 440
 441        init_query_mad(in_mad);
 442        in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
 443        in_mad->attr_mod = cpu_to_be32(index / 32);
 444
 445        err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
 446                           out_mad);
 447        if (err)
 448                goto out;
 449
 450        *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
 451
 452out:
 453        kfree(in_mad);
 454        kfree(out_mad);
 455        return err;
 456}
 457
 458int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
 459                            union ib_gid *gid)
 460{
 461        struct ib_smp *in_mad  = NULL;
 462        struct ib_smp *out_mad = NULL;
 463        int err = -ENOMEM;
 464
 465        in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
 466        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 467        if (!in_mad || !out_mad)
 468                goto out;
 469
 470        init_query_mad(in_mad);
 471        in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
 472        in_mad->attr_mod = cpu_to_be32(port);
 473
 474        err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
 475                           out_mad);
 476        if (err)
 477                goto out;
 478
 479        memcpy(gid->raw, out_mad->data + 8, 8);
 480
 481        init_query_mad(in_mad);
 482        in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
 483        in_mad->attr_mod = cpu_to_be32(index / 8);
 484
 485        err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
 486                           out_mad);
 487        if (err)
 488                goto out;
 489
 490        memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
 491
 492out:
 493        kfree(in_mad);
 494        kfree(out_mad);
 495        return err;
 496}
 497
 498int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
 499                            struct ib_port_attr *props)
 500{
 501        struct mlx5_ib_dev *dev = to_mdev(ibdev);
 502        struct mlx5_core_dev *mdev = dev->mdev;
 503        struct ib_smp *in_mad  = NULL;
 504        struct ib_smp *out_mad = NULL;
 505        int ext_active_speed;
 506        int err = -ENOMEM;
 507
 508        if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
 509                mlx5_ib_warn(dev, "invalid port number %d\n", port);
 510                return -EINVAL;
 511        }
 512
 513        in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
 514        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
 515        if (!in_mad || !out_mad)
 516                goto out;
 517
 518        memset(props, 0, sizeof(*props));
 519
 520        init_query_mad(in_mad);
 521        in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
 522        in_mad->attr_mod = cpu_to_be32(port);
 523
 524        err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
 525        if (err) {
 526                mlx5_ib_warn(dev, "err %d\n", err);
 527                goto out;
 528        }
 529
 530        props->lid              = be16_to_cpup((__be16 *)(out_mad->data + 16));
 531        props->lmc              = out_mad->data[34] & 0x7;
 532        props->sm_lid           = be16_to_cpup((__be16 *)(out_mad->data + 18));
 533        props->sm_sl            = out_mad->data[36] & 0xf;
 534        props->state            = out_mad->data[32] & 0xf;
 535        props->phys_state       = out_mad->data[33] >> 4;
 536        props->port_cap_flags   = be32_to_cpup((__be32 *)(out_mad->data + 20));
 537        props->gid_tbl_len      = out_mad->data[50];
 538        props->max_msg_sz       = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
 539        props->pkey_tbl_len     = mdev->port_caps[port - 1].pkey_table_len;
 540        props->bad_pkey_cntr    = be16_to_cpup((__be16 *)(out_mad->data + 46));
 541        props->qkey_viol_cntr   = be16_to_cpup((__be16 *)(out_mad->data + 48));
 542        props->active_width     = out_mad->data[31] & 0xf;
 543        props->active_speed     = out_mad->data[35] >> 4;
 544        props->max_mtu          = out_mad->data[41] & 0xf;
 545        props->active_mtu       = out_mad->data[36] >> 4;
 546        props->subnet_timeout   = out_mad->data[51] & 0x1f;
 547        props->max_vl_num       = out_mad->data[37] >> 4;
 548        props->init_type_reply  = out_mad->data[41] >> 4;
 549
 550        /* Check if extended speeds (EDR/FDR/...) are supported */
 551        if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
 552                ext_active_speed = out_mad->data[62] >> 4;
 553
 554                switch (ext_active_speed) {
 555                case 1:
 556                        props->active_speed = 16; /* FDR */
 557                        break;
 558                case 2:
 559                        props->active_speed = 32; /* EDR */
 560                        break;
 561                }
 562        }
 563
 564        /* If reported active speed is QDR, check if is FDR-10 */
 565        if (props->active_speed == 4) {
 566                if (mdev->port_caps[port - 1].ext_port_cap &
 567                    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
 568                        init_query_mad(in_mad);
 569                        in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
 570                        in_mad->attr_mod = cpu_to_be32(port);
 571
 572                        err = mlx5_MAD_IFC(dev, 1, 1, port,
 573                                           NULL, NULL, in_mad, out_mad);
 574                        if (err)
 575                                goto out;
 576
 577                        /* Checking LinkSpeedActive for FDR-10 */
 578                        if (out_mad->data[15] & 0x1)
 579                                props->active_speed = 8;
 580                }
 581        }
 582
 583out:
 584        kfree(in_mad);
 585        kfree(out_mad);
 586
 587        return err;
 588}
 589