linux/drivers/infiniband/core/core_priv.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#ifndef _CORE_PRIV_H
  34#define _CORE_PRIV_H
  35
  36#include <linux/list.h>
  37#include <linux/spinlock.h>
  38#include <linux/cgroup_rdma.h>
  39#include <net/net_namespace.h>
  40#include <net/netns/generic.h>
  41
  42#include <rdma/ib_verbs.h>
  43#include <rdma/opa_addr.h>
  44#include <rdma/ib_mad.h>
  45#include <rdma/restrack.h>
  46#include "mad_priv.h"
  47
  48/* Total number of ports combined across all struct ib_devices's */
  49#define RDMA_MAX_PORTS 8192
  50
  51struct pkey_index_qp_list {
  52        struct list_head    pkey_index_list;
  53        u16                 pkey_index;
  54        /* Lock to hold while iterating the qp_list. */
  55        spinlock_t          qp_list_lock;
  56        struct list_head    qp_list;
  57};
  58
  59/**
  60 * struct rdma_dev_net - rdma net namespace metadata for a net
  61 * @nl_sock:    Pointer to netlink socket
  62 * @net:        Pointer to owner net namespace
  63 * @id:         xarray id to identify the net namespace.
  64 */
  65struct rdma_dev_net {
  66        struct sock *nl_sock;
  67        possible_net_t net;
  68        u32 id;
  69};
  70
  71extern const struct attribute_group ib_dev_attr_group;
  72extern bool ib_devices_shared_netns;
  73extern unsigned int rdma_dev_net_id;
  74
  75static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net)
  76{
  77        return net_generic(net, rdma_dev_net_id);
  78}
  79
  80int ib_device_register_sysfs(struct ib_device *device);
  81void ib_device_unregister_sysfs(struct ib_device *device);
  82int ib_device_rename(struct ib_device *ibdev, const char *name);
  83int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim);
  84
  85typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
  86              struct net_device *idev, void *cookie);
  87
  88typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port,
  89                                   struct net_device *idev, void *cookie);
  90
  91struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
  92                                        unsigned int port);
  93
  94void ib_enum_roce_netdev(struct ib_device *ib_dev,
  95                         roce_netdev_filter filter,
  96                         void *filter_cookie,
  97                         roce_netdev_callback cb,
  98                         void *cookie);
  99void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
 100                              void *filter_cookie,
 101                              roce_netdev_callback cb,
 102                              void *cookie);
 103
 104typedef int (*nldev_callback)(struct ib_device *device,
 105                              struct sk_buff *skb,
 106                              struct netlink_callback *cb,
 107                              unsigned int idx);
 108
 109int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
 110                     struct netlink_callback *cb);
 111
 112struct ib_client_nl_info {
 113        struct sk_buff *nl_msg;
 114        struct device *cdev;
 115        unsigned int port;
 116        u64 abi;
 117};
 118int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
 119                          struct ib_client_nl_info *res);
 120
 121enum ib_cache_gid_default_mode {
 122        IB_CACHE_GID_DEFAULT_MODE_SET,
 123        IB_CACHE_GID_DEFAULT_MODE_DELETE
 124};
 125
 126int ib_cache_gid_parse_type_str(const char *buf);
 127
 128const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);
 129
 130void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
 131                                  struct net_device *ndev,
 132                                  unsigned long gid_type_mask,
 133                                  enum ib_cache_gid_default_mode mode);
 134
 135int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 136                     union ib_gid *gid, struct ib_gid_attr *attr);
 137
 138int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 139                     union ib_gid *gid, struct ib_gid_attr *attr);
 140
 141int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 142                                     struct net_device *ndev);
 143
 144int roce_gid_mgmt_init(void);
 145void roce_gid_mgmt_cleanup(void);
 146
 147unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
 148
 149int ib_cache_setup_one(struct ib_device *device);
 150void ib_cache_cleanup_one(struct ib_device *device);
 151void ib_cache_release_one(struct ib_device *device);
 152void ib_dispatch_event_clients(struct ib_event *event);
 153
 154#ifdef CONFIG_CGROUP_RDMA
 155void ib_device_register_rdmacg(struct ib_device *device);
 156void ib_device_unregister_rdmacg(struct ib_device *device);
 157
 158int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
 159                         struct ib_device *device,
 160                         enum rdmacg_resource_type resource_index);
 161
 162void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
 163                        struct ib_device *device,
 164                        enum rdmacg_resource_type resource_index);
 165#else
 166static inline void ib_device_register_rdmacg(struct ib_device *device)
 167{
 168}
 169
 170static inline void ib_device_unregister_rdmacg(struct ib_device *device)
 171{
 172}
 173
 174static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
 175                                       struct ib_device *device,
 176                                       enum rdmacg_resource_type resource_index)
 177{
 178        return 0;
 179}
 180
 181static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
 182                                      struct ib_device *device,
 183                                      enum rdmacg_resource_type resource_index)
 184{
 185}
 186#endif
 187
 188static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
 189                                         struct net_device *upper)
 190{
 191        return netdev_has_upper_dev_all_rcu(dev, upper);
 192}
 193
 194int addr_init(void);
 195void addr_cleanup(void);
 196
 197int ib_mad_init(void);
 198void ib_mad_cleanup(void);
 199
 200int ib_sa_init(void);
 201void ib_sa_cleanup(void);
 202
 203void rdma_nl_init(void);
 204void rdma_nl_exit(void);
 205
 206int ib_nl_handle_resolve_resp(struct sk_buff *skb,
 207                              struct nlmsghdr *nlh,
 208                              struct netlink_ext_ack *extack);
 209int ib_nl_handle_set_timeout(struct sk_buff *skb,
 210                             struct nlmsghdr *nlh,
 211                             struct netlink_ext_ack *extack);
 212int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
 213                             struct nlmsghdr *nlh,
 214                             struct netlink_ext_ack *extack);
 215
 216int ib_get_cached_subnet_prefix(struct ib_device *device,
 217                                u8                port_num,
 218                                u64              *sn_pfx);
 219
 220#ifdef CONFIG_SECURITY_INFINIBAND
 221void ib_security_release_port_pkey_list(struct ib_device *device);
 222
 223void ib_security_cache_change(struct ib_device *device,
 224                              u8 port_num,
 225                              u64 subnet_prefix);
 226
 227int ib_security_modify_qp(struct ib_qp *qp,
 228                          struct ib_qp_attr *qp_attr,
 229                          int qp_attr_mask,
 230                          struct ib_udata *udata);
 231
 232int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev);
 233void ib_destroy_qp_security_begin(struct ib_qp_security *sec);
 234void ib_destroy_qp_security_abort(struct ib_qp_security *sec);
 235void ib_destroy_qp_security_end(struct ib_qp_security *sec);
 236int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev);
 237void ib_close_shared_qp_security(struct ib_qp_security *sec);
 238int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
 239                                enum ib_qp_type qp_type);
 240void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
 241int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
 242void ib_mad_agent_security_change(void);
 243#else
 244static inline void ib_security_release_port_pkey_list(struct ib_device *device)
 245{
 246}
 247
 248static inline void ib_security_cache_change(struct ib_device *device,
 249                                            u8 port_num,
 250                                            u64 subnet_prefix)
 251{
 252}
 253
 254static inline int ib_security_modify_qp(struct ib_qp *qp,
 255                                        struct ib_qp_attr *qp_attr,
 256                                        int qp_attr_mask,
 257                                        struct ib_udata *udata)
 258{
 259        return qp->device->ops.modify_qp(qp->real_qp,
 260                                         qp_attr,
 261                                         qp_attr_mask,
 262                                         udata);
 263}
 264
 265static inline int ib_create_qp_security(struct ib_qp *qp,
 266                                        struct ib_device *dev)
 267{
 268        return 0;
 269}
 270
 271static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
 272{
 273}
 274
 275static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
 276{
 277}
 278
 279static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec)
 280{
 281}
 282
 283static inline int ib_open_shared_qp_security(struct ib_qp *qp,
 284                                             struct ib_device *dev)
 285{
 286        return 0;
 287}
 288
 289static inline void ib_close_shared_qp_security(struct ib_qp_security *sec)
 290{
 291}
 292
 293static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
 294                                              enum ib_qp_type qp_type)
 295{
 296        return 0;
 297}
 298
 299static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
 300{
 301}
 302
 303static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
 304                                          u16 pkey_index)
 305{
 306        return 0;
 307}
 308
 309static inline void ib_mad_agent_security_change(void)
 310{
 311}
 312#endif
 313
 314struct ib_device *ib_device_get_by_index(const struct net *net, u32 index);
 315
 316/* RDMA device netlink */
 317void nldev_init(void);
 318void nldev_exit(void);
 319
 320static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
 321                                          struct ib_pd *pd,
 322                                          struct ib_qp_init_attr *attr,
 323                                          struct ib_udata *udata,
 324                                          struct ib_uqp_object *uobj)
 325{
 326        enum ib_qp_type qp_type = attr->qp_type;
 327        struct ib_qp *qp;
 328        bool is_xrc;
 329
 330        if (!dev->ops.create_qp)
 331                return ERR_PTR(-EOPNOTSUPP);
 332
 333        qp = dev->ops.create_qp(pd, attr, udata);
 334        if (IS_ERR(qp))
 335                return qp;
 336
 337        qp->device = dev;
 338        qp->pd = pd;
 339        qp->uobject = uobj;
 340        qp->real_qp = qp;
 341
 342        qp->qp_type = attr->qp_type;
 343        qp->rwq_ind_tbl = attr->rwq_ind_tbl;
 344        qp->send_cq = attr->send_cq;
 345        qp->recv_cq = attr->recv_cq;
 346        qp->srq = attr->srq;
 347        qp->rwq_ind_tbl = attr->rwq_ind_tbl;
 348        qp->event_handler = attr->event_handler;
 349
 350        atomic_set(&qp->usecnt, 0);
 351        spin_lock_init(&qp->mr_lock);
 352        INIT_LIST_HEAD(&qp->rdma_mrs);
 353        INIT_LIST_HEAD(&qp->sig_mrs);
 354
 355        /*
 356         * We don't track XRC QPs for now, because they don't have PD
 357         * and more importantly they are created internaly by driver,
 358         * see mlx5 create_dev_resources() as an example.
 359         */
 360        is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
 361        if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
 362                qp->res.type = RDMA_RESTRACK_QP;
 363                if (uobj)
 364                        rdma_restrack_uadd(&qp->res);
 365                else
 366                        rdma_restrack_kadd(&qp->res);
 367        } else
 368                qp->res.valid = false;
 369
 370        return qp;
 371}
 372
 373struct rdma_dev_addr;
 374int rdma_resolve_ip_route(struct sockaddr *src_addr,
 375                          const struct sockaddr *dst_addr,
 376                          struct rdma_dev_addr *addr);
 377
 378int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
 379                                 const union ib_gid *dgid,
 380                                 u8 *dmac, const struct ib_gid_attr *sgid_attr,
 381                                 int *hoplimit);
 382void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
 383                           const struct net_device *dev);
 384
 385struct sa_path_rec;
 386int roce_resolve_route_from_path(struct sa_path_rec *rec,
 387                                 const struct ib_gid_attr *attr);
 388
 389struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
 390
 391void ib_free_port_attrs(struct ib_core_device *coredev);
 392int ib_setup_port_attrs(struct ib_core_device *coredev);
 393
 394int rdma_compatdev_set(u8 enable);
 395
 396int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
 397                                 struct kobject *kobj, struct kobj_type *ktype,
 398                                 const char *name);
 399void ib_port_unregister_module_stat(struct kobject *kobj);
 400
 401int ib_device_set_netns_put(struct sk_buff *skb,
 402                            struct ib_device *dev, u32 ns_fd);
 403
 404int rdma_nl_net_init(struct rdma_dev_net *rnet);
 405void rdma_nl_net_exit(struct rdma_dev_net *rnet);
 406
 407struct rdma_umap_priv {
 408        struct vm_area_struct *vma;
 409        struct list_head list;
 410        struct rdma_user_mmap_entry *entry;
 411};
 412
 413void rdma_umap_priv_init(struct rdma_umap_priv *priv,
 414                         struct vm_area_struct *vma,
 415                         struct rdma_user_mmap_entry *entry);
 416
 417void ib_cq_pool_init(struct ib_device *dev);
 418void ib_cq_pool_destroy(struct ib_device *dev);
 419
 420#endif /* _CORE_PRIV_H */
 421