linux/drivers/infiniband/core/nldev.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3 *
   4 * Redistribution and use in source and binary forms, with or without
   5 * modification, are permitted provided that the following conditions are met:
   6 *
   7 * 1. Redistributions of source code must retain the above copyright
   8 *    notice, this list of conditions and the following disclaimer.
   9 * 2. Redistributions in binary form must reproduce the above copyright
  10 *    notice, this list of conditions and the following disclaimer in the
  11 *    documentation and/or other materials provided with the distribution.
  12 * 3. Neither the names of the copyright holders nor the names of its
  13 *    contributors may be used to endorse or promote products derived from
  14 *    this software without specific prior written permission.
  15 *
  16 * Alternatively, this software may be distributed under the terms of the
  17 * GNU General Public License ("GPL") version 2 as published by the Free
  18 * Software Foundation.
  19 *
  20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  30 * POSSIBILITY OF SUCH DAMAGE.
  31 */
  32
  33#include <linux/module.h>
  34#include <net/netlink.h>
  35#include <rdma/rdma_netlink.h>
  36
  37#include "core_priv.h"
  38
  39static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
  40        [RDMA_NLDEV_ATTR_DEV_INDEX]     = { .type = NLA_U32 },
  41        [RDMA_NLDEV_ATTR_DEV_NAME]      = { .type = NLA_NUL_STRING,
  42                                            .len = IB_DEVICE_NAME_MAX - 1},
  43        [RDMA_NLDEV_ATTR_PORT_INDEX]    = { .type = NLA_U32 },
  44        [RDMA_NLDEV_ATTR_FW_VERSION]    = { .type = NLA_NUL_STRING,
  45                                            .len = IB_FW_VERSION_NAME_MAX - 1},
  46        [RDMA_NLDEV_ATTR_NODE_GUID]     = { .type = NLA_U64 },
  47        [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
  48        [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
  49        [RDMA_NLDEV_ATTR_LID]           = { .type = NLA_U32 },
  50        [RDMA_NLDEV_ATTR_SM_LID]        = { .type = NLA_U32 },
  51        [RDMA_NLDEV_ATTR_LMC]           = { .type = NLA_U8 },
  52        [RDMA_NLDEV_ATTR_PORT_STATE]    = { .type = NLA_U8 },
  53        [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
  54        [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
  55};
  56
  57static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
  58{
  59        char fw[IB_FW_VERSION_NAME_MAX];
  60
  61        if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
  62                return -EMSGSIZE;
  63        if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
  64                return -EMSGSIZE;
  65        if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
  66                return -EMSGSIZE;
  67
  68        BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
  69        if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
  70                              device->attrs.device_cap_flags, 0))
  71                return -EMSGSIZE;
  72
  73        ib_get_device_fw_str(device, fw);
  74        /* Device without FW has strlen(fw) */
  75        if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
  76                return -EMSGSIZE;
  77
  78        if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
  79                              be64_to_cpu(device->node_guid), 0))
  80                return -EMSGSIZE;
  81        if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
  82                              be64_to_cpu(device->attrs.sys_image_guid), 0))
  83                return -EMSGSIZE;
  84        if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
  85                return -EMSGSIZE;
  86        return 0;
  87}
  88
  89static int fill_port_info(struct sk_buff *msg,
  90                          struct ib_device *device, u32 port)
  91{
  92        struct ib_port_attr attr;
  93        int ret;
  94
  95        if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
  96                return -EMSGSIZE;
  97        if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
  98                return -EMSGSIZE;
  99        if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
 100                return -EMSGSIZE;
 101
 102        ret = ib_query_port(device, port, &attr);
 103        if (ret)
 104                return ret;
 105
 106        BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
 107        if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
 108                              (u64)attr.port_cap_flags, 0))
 109                return -EMSGSIZE;
 110        if (rdma_protocol_ib(device, port) &&
 111            nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
 112                              attr.subnet_prefix, 0))
 113                return -EMSGSIZE;
 114        if (rdma_protocol_ib(device, port)) {
 115                if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
 116                        return -EMSGSIZE;
 117                if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
 118                        return -EMSGSIZE;
 119                if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
 120                        return -EMSGSIZE;
 121        }
 122        if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
 123                return -EMSGSIZE;
 124        if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
 125                return -EMSGSIZE;
 126        return 0;
 127}
 128
 129static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 130                          struct netlink_ext_ack *extack)
 131{
 132        struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
 133        struct ib_device *device;
 134        struct sk_buff *msg;
 135        u32 index;
 136        int err;
 137
 138        err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 139                          nldev_policy, extack);
 140        if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
 141                return -EINVAL;
 142
 143        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
 144
 145        device = __ib_device_get_by_index(index);
 146        if (!device)
 147                return -EINVAL;
 148
 149        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 150        if (!msg)
 151                return -ENOMEM;
 152
 153        nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
 154                        RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
 155                        0, 0);
 156
 157        err = fill_dev_info(msg, device);
 158        if (err) {
 159                nlmsg_free(msg);
 160                return err;
 161        }
 162
 163        nlmsg_end(msg, nlh);
 164
 165        return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
 166}
 167
 168static int _nldev_get_dumpit(struct ib_device *device,
 169                             struct sk_buff *skb,
 170                             struct netlink_callback *cb,
 171                             unsigned int idx)
 172{
 173        int start = cb->args[0];
 174        struct nlmsghdr *nlh;
 175
 176        if (idx < start)
 177                return 0;
 178
 179        nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 180                        RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
 181                        0, NLM_F_MULTI);
 182
 183        if (fill_dev_info(skb, device)) {
 184                nlmsg_cancel(skb, nlh);
 185                goto out;
 186        }
 187
 188        nlmsg_end(skb, nlh);
 189
 190        idx++;
 191
 192out:    cb->args[0] = idx;
 193        return skb->len;
 194}
 195
 196static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 197{
 198        /*
 199         * There is no need to take lock, because
 200         * we are relying on ib_core's lists_rwsem
 201         */
 202        return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
 203}
 204
 205static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 206                               struct netlink_ext_ack *extack)
 207{
 208        struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
 209        struct ib_device *device;
 210        struct sk_buff *msg;
 211        u32 index;
 212        u32 port;
 213        int err;
 214
 215        err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 216                          nldev_policy, extack);
 217        if (err ||
 218            !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
 219            !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
 220                return -EINVAL;
 221
 222        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
 223        device = __ib_device_get_by_index(index);
 224        if (!device)
 225                return -EINVAL;
 226
 227        port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
 228        if (!rdma_is_port_valid(device, port))
 229                return -EINVAL;
 230
 231        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 232        if (!msg)
 233                return -ENOMEM;
 234
 235        nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
 236                        RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
 237                        0, 0);
 238
 239        err = fill_port_info(msg, device, port);
 240        if (err) {
 241                nlmsg_free(msg);
 242                return err;
 243        }
 244
 245        nlmsg_end(msg, nlh);
 246
 247        return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
 248}
 249
 250static int nldev_port_get_dumpit(struct sk_buff *skb,
 251                                 struct netlink_callback *cb)
 252{
 253        struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
 254        struct ib_device *device;
 255        int start = cb->args[0];
 256        struct nlmsghdr *nlh;
 257        u32 idx = 0;
 258        u32 ifindex;
 259        int err;
 260        u32 p;
 261
 262        err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 263                          nldev_policy, NULL);
 264        if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
 265                return -EINVAL;
 266
 267        ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
 268        device = __ib_device_get_by_index(ifindex);
 269        if (!device)
 270                return -EINVAL;
 271
 272        for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
 273                /*
 274                 * The dumpit function returns all information from specific
 275                 * index. This specific index is taken from the netlink
 276                 * messages request sent by user and it is available
 277                 * in cb->args[0].
 278                 *
 279                 * Usually, the user doesn't fill this field and it causes
 280                 * to return everything.
 281                 *
 282                 */
 283                if (idx < start) {
 284                        idx++;
 285                        continue;
 286                }
 287
 288                nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 289                                cb->nlh->nlmsg_seq,
 290                                RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
 291                                                 RDMA_NLDEV_CMD_PORT_GET),
 292                                0, NLM_F_MULTI);
 293
 294                if (fill_port_info(skb, device, p)) {
 295                        nlmsg_cancel(skb, nlh);
 296                        goto out;
 297                }
 298                idx++;
 299                nlmsg_end(skb, nlh);
 300        }
 301
 302out:    cb->args[0] = idx;
 303        return skb->len;
 304}
 305
 306static const struct rdma_nl_cbs nldev_cb_table[] = {
 307        [RDMA_NLDEV_CMD_GET] = {
 308                .doit = nldev_get_doit,
 309                .dump = nldev_get_dumpit,
 310        },
 311        [RDMA_NLDEV_CMD_PORT_GET] = {
 312                .doit = nldev_port_get_doit,
 313                .dump = nldev_port_get_dumpit,
 314        },
 315};
 316
 317void __init nldev_init(void)
 318{
 319        rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
 320}
 321
 322void __exit nldev_exit(void)
 323{
 324        rdma_nl_unregister(RDMA_NL_NLDEV);
 325}
 326
 327MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
 328