1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef _CORE_PRIV_H
34#define _CORE_PRIV_H
35
36#include <linux/list.h>
37#include <linux/spinlock.h>
38#include <linux/cgroup_rdma.h>
39
40#include <rdma/ib_verbs.h>
41#include <rdma/opa_addr.h>
42#include <rdma/ib_mad.h>
43#include <rdma/restrack.h>
44#include "mad_priv.h"
45
46
47#define RDMA_MAX_PORTS 8192
48
49struct pkey_index_qp_list {
50 struct list_head pkey_index_list;
51 u16 pkey_index;
52
53 spinlock_t qp_list_lock;
54 struct list_head qp_list;
55};
56
57int ib_device_register_sysfs(struct ib_device *device,
58 int (*port_callback)(struct ib_device *,
59 u8, struct kobject *));
60void ib_device_unregister_sysfs(struct ib_device *device);
61int ib_device_rename(struct ib_device *ibdev, const char *name);
62
63typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
64 struct net_device *idev, void *cookie);
65
66typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port,
67 struct net_device *idev, void *cookie);
68
69void ib_enum_roce_netdev(struct ib_device *ib_dev,
70 roce_netdev_filter filter,
71 void *filter_cookie,
72 roce_netdev_callback cb,
73 void *cookie);
74void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
75 void *filter_cookie,
76 roce_netdev_callback cb,
77 void *cookie);
78
79typedef int (*nldev_callback)(struct ib_device *device,
80 struct sk_buff *skb,
81 struct netlink_callback *cb,
82 unsigned int idx);
83
84int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
85 struct netlink_callback *cb);
86
87enum ib_cache_gid_default_mode {
88 IB_CACHE_GID_DEFAULT_MODE_SET,
89 IB_CACHE_GID_DEFAULT_MODE_DELETE
90};
91
92int ib_cache_gid_parse_type_str(const char *buf);
93
94const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);
95
96void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
97 struct net_device *ndev,
98 unsigned long gid_type_mask,
99 enum ib_cache_gid_default_mode mode);
100
101int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
102 union ib_gid *gid, struct ib_gid_attr *attr);
103
104int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
105 union ib_gid *gid, struct ib_gid_attr *attr);
106
107int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
108 struct net_device *ndev);
109
110int roce_gid_mgmt_init(void);
111void roce_gid_mgmt_cleanup(void);
112
113unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
114
115int ib_cache_setup_one(struct ib_device *device);
116void ib_cache_cleanup_one(struct ib_device *device);
117void ib_cache_release_one(struct ib_device *device);
118
119#ifdef CONFIG_CGROUP_RDMA
120int ib_device_register_rdmacg(struct ib_device *device);
121void ib_device_unregister_rdmacg(struct ib_device *device);
122
123int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
124 struct ib_device *device,
125 enum rdmacg_resource_type resource_index);
126
127void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
128 struct ib_device *device,
129 enum rdmacg_resource_type resource_index);
130#else
131static inline int ib_device_register_rdmacg(struct ib_device *device)
132{ return 0; }
133
134static inline void ib_device_unregister_rdmacg(struct ib_device *device)
135{ }
136
137static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
138 struct ib_device *device,
139 enum rdmacg_resource_type resource_index)
140{ return 0; }
141
142static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
143 struct ib_device *device,
144 enum rdmacg_resource_type resource_index)
145{ }
146#endif
147
148static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
149 struct net_device *upper)
150{
151 return netdev_has_upper_dev_all_rcu(dev, upper);
152}
153
154int addr_init(void);
155void addr_cleanup(void);
156
157int ib_mad_init(void);
158void ib_mad_cleanup(void);
159
160int ib_sa_init(void);
161void ib_sa_cleanup(void);
162
163int rdma_nl_init(void);
164void rdma_nl_exit(void);
165
166int ib_nl_handle_resolve_resp(struct sk_buff *skb,
167 struct nlmsghdr *nlh,
168 struct netlink_ext_ack *extack);
169int ib_nl_handle_set_timeout(struct sk_buff *skb,
170 struct nlmsghdr *nlh,
171 struct netlink_ext_ack *extack);
172int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
173 struct nlmsghdr *nlh,
174 struct netlink_ext_ack *extack);
175
176int ib_get_cached_subnet_prefix(struct ib_device *device,
177 u8 port_num,
178 u64 *sn_pfx);
179
180#ifdef CONFIG_SECURITY_INFINIBAND
181void ib_security_release_port_pkey_list(struct ib_device *device);
182
183void ib_security_cache_change(struct ib_device *device,
184 u8 port_num,
185 u64 subnet_prefix);
186
187int ib_security_modify_qp(struct ib_qp *qp,
188 struct ib_qp_attr *qp_attr,
189 int qp_attr_mask,
190 struct ib_udata *udata);
191
192int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev);
193void ib_destroy_qp_security_begin(struct ib_qp_security *sec);
194void ib_destroy_qp_security_abort(struct ib_qp_security *sec);
195void ib_destroy_qp_security_end(struct ib_qp_security *sec);
196int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev);
197void ib_close_shared_qp_security(struct ib_qp_security *sec);
198int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
199 enum ib_qp_type qp_type);
200void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
201int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
202void ib_mad_agent_security_change(void);
203#else
204static inline void ib_security_release_port_pkey_list(struct ib_device *device)
205{
206}
207
208static inline void ib_security_cache_change(struct ib_device *device,
209 u8 port_num,
210 u64 subnet_prefix)
211{
212}
213
214static inline int ib_security_modify_qp(struct ib_qp *qp,
215 struct ib_qp_attr *qp_attr,
216 int qp_attr_mask,
217 struct ib_udata *udata)
218{
219 return qp->device->ops.modify_qp(qp->real_qp,
220 qp_attr,
221 qp_attr_mask,
222 udata);
223}
224
225static inline int ib_create_qp_security(struct ib_qp *qp,
226 struct ib_device *dev)
227{
228 return 0;
229}
230
231static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
232{
233}
234
235static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
236{
237}
238
239static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec)
240{
241}
242
243static inline int ib_open_shared_qp_security(struct ib_qp *qp,
244 struct ib_device *dev)
245{
246 return 0;
247}
248
249static inline void ib_close_shared_qp_security(struct ib_qp_security *sec)
250{
251}
252
253static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
254 enum ib_qp_type qp_type)
255{
256 return 0;
257}
258
259static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
260{
261}
262
263static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
264 u16 pkey_index)
265{
266 return 0;
267}
268
269static inline void ib_mad_agent_security_change(void)
270{
271}
272#endif
273
274struct ib_device *ib_device_get_by_index(u32 ifindex);
275
276void nldev_init(void);
277void nldev_exit(void);
278
279static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
280 struct ib_pd *pd,
281 struct ib_qp_init_attr *attr,
282 struct ib_udata *udata,
283 struct ib_uobject *uobj)
284{
285 struct ib_qp *qp;
286
287 if (!dev->ops.create_qp)
288 return ERR_PTR(-EOPNOTSUPP);
289
290 qp = dev->ops.create_qp(pd, attr, udata);
291 if (IS_ERR(qp))
292 return qp;
293
294 qp->device = dev;
295 qp->pd = pd;
296 qp->uobject = uobj;
297
298
299
300
301
302 if (attr->qp_type < IB_QPT_XRC_INI) {
303 qp->res.type = RDMA_RESTRACK_QP;
304 if (uobj)
305 rdma_restrack_uadd(&qp->res);
306 else
307 rdma_restrack_kadd(&qp->res);
308 } else
309 qp->res.valid = false;
310
311 return qp;
312}
313
314struct rdma_dev_addr;
315int rdma_resolve_ip_route(struct sockaddr *src_addr,
316 const struct sockaddr *dst_addr,
317 struct rdma_dev_addr *addr);
318
319int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
320 const union ib_gid *dgid,
321 u8 *dmac, const struct ib_gid_attr *sgid_attr,
322 int *hoplimit);
323void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
324 const struct net_device *dev);
325
326struct sa_path_rec;
327int roce_resolve_route_from_path(struct sa_path_rec *rec,
328 const struct ib_gid_attr *attr);
329
330struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
331#endif
332