linux/drivers/infiniband/core/netlink.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017 Mellanox Technologies Inc.  All rights reserved.
   3 * Copyright (c) 2010 Voltaire Inc.  All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
  35
  36#include <linux/export.h>
  37#include <net/netlink.h>
  38#include <net/net_namespace.h>
  39#include <net/sock.h>
  40#include <rdma/rdma_netlink.h>
  41#include <linux/module.h>
  42#include "core_priv.h"
  43
  44static DEFINE_MUTEX(rdma_nl_mutex);
  45static struct sock *nls;
  46static struct {
  47        const struct rdma_nl_cbs   *cb_table;
  48} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
  49
  50int rdma_nl_chk_listeners(unsigned int group)
  51{
  52        return (netlink_has_listeners(nls, group)) ? 0 : -1;
  53}
  54EXPORT_SYMBOL(rdma_nl_chk_listeners);
  55
  56static bool is_nl_msg_valid(unsigned int type, unsigned int op)
  57{
  58        static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = {
  59                [RDMA_NL_RDMA_CM] = RDMA_NL_RDMA_CM_NUM_OPS,
  60                [RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS,
  61                [RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS,
  62                [RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS,
  63        };
  64
  65        /*
  66         * This BUILD_BUG_ON is intended to catch addition of new
  67         * RDMA netlink protocol without updating the array above.
  68         */
  69        BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6);
  70
  71        if (type >= RDMA_NL_NUM_CLIENTS)
  72                return false;
  73
  74        return (op < max_num_ops[type]) ? true : false;
  75}
  76
  77static bool is_nl_valid(unsigned int type, unsigned int op)
  78{
  79        const struct rdma_nl_cbs *cb_table;
  80
  81        if (!is_nl_msg_valid(type, op))
  82                return false;
  83
  84        if (!rdma_nl_types[type].cb_table) {
  85                mutex_unlock(&rdma_nl_mutex);
  86                request_module("rdma-netlink-subsys-%d", type);
  87                mutex_lock(&rdma_nl_mutex);
  88        }
  89
  90        cb_table = rdma_nl_types[type].cb_table;
  91
  92        if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
  93                return false;
  94        return true;
  95}
  96
  97void rdma_nl_register(unsigned int index,
  98                      const struct rdma_nl_cbs cb_table[])
  99{
 100        mutex_lock(&rdma_nl_mutex);
 101        if (!is_nl_msg_valid(index, 0)) {
 102                /*
 103                 * All clients are not interesting in success/failure of
 104                 * this call. They want to see the print to error log and
 105                 * continue their initialization. Print warning for them,
 106                 * because it is programmer's error to be here.
 107                 */
 108                mutex_unlock(&rdma_nl_mutex);
 109                WARN(true,
 110                     "The not-valid %u index was supplied to RDMA netlink\n",
 111                     index);
 112                return;
 113        }
 114
 115        if (rdma_nl_types[index].cb_table) {
 116                mutex_unlock(&rdma_nl_mutex);
 117                WARN(true,
 118                     "The %u index is already registered in RDMA netlink\n",
 119                     index);
 120                return;
 121        }
 122
 123        rdma_nl_types[index].cb_table = cb_table;
 124        mutex_unlock(&rdma_nl_mutex);
 125}
 126EXPORT_SYMBOL(rdma_nl_register);
 127
 128void rdma_nl_unregister(unsigned int index)
 129{
 130        mutex_lock(&rdma_nl_mutex);
 131        rdma_nl_types[index].cb_table = NULL;
 132        mutex_unlock(&rdma_nl_mutex);
 133}
 134EXPORT_SYMBOL(rdma_nl_unregister);
 135
 136void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
 137                   int len, int client, int op, int flags)
 138{
 139        *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags);
 140        if (!*nlh)
 141                return NULL;
 142        return nlmsg_data(*nlh);
 143}
 144EXPORT_SYMBOL(ibnl_put_msg);
 145
 146int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
 147                  int len, void *data, int type)
 148{
 149        if (nla_put(skb, type, len, data)) {
 150                nlmsg_cancel(skb, nlh);
 151                return -EMSGSIZE;
 152        }
 153        return 0;
 154}
 155EXPORT_SYMBOL(ibnl_put_attr);
 156
 157static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
 158                           struct netlink_ext_ack *extack)
 159{
 160        int type = nlh->nlmsg_type;
 161        unsigned int index = RDMA_NL_GET_CLIENT(type);
 162        unsigned int op = RDMA_NL_GET_OP(type);
 163        const struct rdma_nl_cbs *cb_table;
 164
 165        if (!is_nl_valid(index, op))
 166                return -EINVAL;
 167
 168        cb_table = rdma_nl_types[index].cb_table;
 169
 170        if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
 171            !netlink_capable(skb, CAP_NET_ADMIN))
 172                return -EPERM;
 173
 174        /*
 175         * LS responses overload the 0x100 (NLM_F_ROOT) flag.  Don't
 176         * mistakenly call the .dump() function.
 177         */
 178        if (index == RDMA_NL_LS) {
 179                if (cb_table[op].doit)
 180                        return cb_table[op].doit(skb, nlh, extack);
 181                return -EINVAL;
 182        }
 183        /* FIXME: Convert IWCM to properly handle doit callbacks */
 184        if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
 185            index == RDMA_NL_IWCM) {
 186                struct netlink_dump_control c = {
 187                        .dump = cb_table[op].dump,
 188                };
 189                if (c.dump)
 190                        return netlink_dump_start(nls, skb, nlh, &c);
 191                return -EINVAL;
 192        }
 193
 194        if (cb_table[op].doit)
 195                return cb_table[op].doit(skb, nlh, extack);
 196
 197        return 0;
 198}
 199
 200/*
 201 * This function is similar to netlink_rcv_skb with one exception:
 202 * It calls to the callback for the netlink messages without NLM_F_REQUEST
 203 * flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed
 204 * for that consumer only.
 205 */
 206static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
 207                                                   struct nlmsghdr *,
 208                                                   struct netlink_ext_ack *))
 209{
 210        struct netlink_ext_ack extack = {};
 211        struct nlmsghdr *nlh;
 212        int err;
 213
 214        while (skb->len >= nlmsg_total_size(0)) {
 215                int msglen;
 216
 217                nlh = nlmsg_hdr(skb);
 218                err = 0;
 219
 220                if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
 221                        return 0;
 222
 223                /*
 224                 * Generally speaking, the only requests are handled
 225                 * by the kernel, but RDMA_NL_LS is different, because it
 226                 * runs backward netlink scheme. Kernel initiates messages
 227                 * and waits for reply with data to keep pathrecord cache
 228                 * in sync.
 229                 */
 230                if (!(nlh->nlmsg_flags & NLM_F_REQUEST) &&
 231                    (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS))
 232                        goto ack;
 233
 234                /* Skip control messages */
 235                if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
 236                        goto ack;
 237
 238                err = cb(skb, nlh, &extack);
 239                if (err == -EINTR)
 240                        goto skip;
 241
 242ack:
 243                if (nlh->nlmsg_flags & NLM_F_ACK || err)
 244                        netlink_ack(skb, nlh, err, &extack);
 245
 246skip:
 247                msglen = NLMSG_ALIGN(nlh->nlmsg_len);
 248                if (msglen > skb->len)
 249                        msglen = skb->len;
 250                skb_pull(skb, msglen);
 251        }
 252
 253        return 0;
 254}
 255
 256static void rdma_nl_rcv(struct sk_buff *skb)
 257{
 258        mutex_lock(&rdma_nl_mutex);
 259        rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
 260        mutex_unlock(&rdma_nl_mutex);
 261}
 262
 263int rdma_nl_unicast(struct sk_buff *skb, u32 pid)
 264{
 265        int err;
 266
 267        err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
 268        return (err < 0) ? err : 0;
 269}
 270EXPORT_SYMBOL(rdma_nl_unicast);
 271
 272int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid)
 273{
 274        int err;
 275
 276        err = netlink_unicast(nls, skb, pid, 0);
 277        return (err < 0) ? err : 0;
 278}
 279EXPORT_SYMBOL(rdma_nl_unicast_wait);
 280
 281int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags)
 282{
 283        return nlmsg_multicast(nls, skb, 0, group, flags);
 284}
 285EXPORT_SYMBOL(rdma_nl_multicast);
 286
 287int __init rdma_nl_init(void)
 288{
 289        struct netlink_kernel_cfg cfg = {
 290                .input  = rdma_nl_rcv,
 291        };
 292
 293        nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
 294        if (!nls)
 295                return -ENOMEM;
 296
 297        nls->sk_sndtimeo = 10 * HZ;
 298        return 0;
 299}
 300
 301void rdma_nl_exit(void)
 302{
 303        int idx;
 304
 305        for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
 306                rdma_nl_unregister(idx);
 307
 308        netlink_kernel_release(nls);
 309}
 310
 311MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
 312