1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/module.h>
34#include <linux/pid.h>
35#include <linux/pid_namespace.h>
36#include <net/netlink.h>
37#include <rdma/rdma_cm.h>
38#include <rdma/rdma_netlink.h>
39
40#include "core_priv.h"
41#include "cma_priv.h"
42
43static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
44 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
45 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
46 .len = IB_DEVICE_NAME_MAX - 1},
47 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
48 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
49 .len = IB_FW_VERSION_NAME_MAX - 1},
50 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
51 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
52 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
53 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
54 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
55 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
56 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
57 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
58 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
59 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
60 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
61 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
62 .len = 16 },
63 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
64 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
65 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
66 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
67 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
68 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
69 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
70 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
71 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
72 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
73 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
74 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
75 .len = TASK_COMM_LEN },
76 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
77 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
78 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
79 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
80 .len = sizeof(struct __kernel_sockaddr_storage) },
81 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
82 .len = sizeof(struct __kernel_sockaddr_storage) },
83 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
84 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
85 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
86 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
87 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
88 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
89 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
90 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
91 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
92 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
93 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
94 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
95 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
96 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
97 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 },
98 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
99 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
100 .len = IFNAMSIZ },
101 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
102 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
103 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
104 .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
105 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
106 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
107 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
108 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
109 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
110};
111
112static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
113 enum rdma_nldev_print_type print_type)
114{
115 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
116 return -EMSGSIZE;
117 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
118 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
119 return -EMSGSIZE;
120
121 return 0;
122}
123
124static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
125 enum rdma_nldev_print_type print_type,
126 u32 value)
127{
128 if (put_driver_name_print_type(msg, name, print_type))
129 return -EMSGSIZE;
130 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
131 return -EMSGSIZE;
132
133 return 0;
134}
135
136static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
137 enum rdma_nldev_print_type print_type,
138 u64 value)
139{
140 if (put_driver_name_print_type(msg, name, print_type))
141 return -EMSGSIZE;
142 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
143 RDMA_NLDEV_ATTR_PAD))
144 return -EMSGSIZE;
145
146 return 0;
147}
148
149int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
150{
151 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
152 value);
153}
154EXPORT_SYMBOL(rdma_nl_put_driver_u32);
155
156int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
157 u32 value)
158{
159 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
160 value);
161}
162EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
163
164int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
165{
166 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
167 value);
168}
169EXPORT_SYMBOL(rdma_nl_put_driver_u64);
170
171int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
172{
173 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
174 value);
175}
176EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
177
178static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
179{
180 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
181 return -EMSGSIZE;
182 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
183 return -EMSGSIZE;
184
185 return 0;
186}
187
188static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
189{
190 char fw[IB_FW_VERSION_NAME_MAX];
191
192 if (fill_nldev_handle(msg, device))
193 return -EMSGSIZE;
194
195 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
196 return -EMSGSIZE;
197
198 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
199 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
200 device->attrs.device_cap_flags,
201 RDMA_NLDEV_ATTR_PAD))
202 return -EMSGSIZE;
203
204 ib_get_device_fw_str(device, fw);
205
206 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
207 return -EMSGSIZE;
208
209 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
210 be64_to_cpu(device->node_guid),
211 RDMA_NLDEV_ATTR_PAD))
212 return -EMSGSIZE;
213 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
214 be64_to_cpu(device->attrs.sys_image_guid),
215 RDMA_NLDEV_ATTR_PAD))
216 return -EMSGSIZE;
217 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
218 return -EMSGSIZE;
219 return 0;
220}
221
222static int fill_port_info(struct sk_buff *msg,
223 struct ib_device *device, u32 port,
224 const struct net *net)
225{
226 struct net_device *netdev = NULL;
227 struct ib_port_attr attr;
228 int ret;
229
230 if (fill_nldev_handle(msg, device))
231 return -EMSGSIZE;
232
233 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
234 return -EMSGSIZE;
235
236 ret = ib_query_port(device, port, &attr);
237 if (ret)
238 return ret;
239
240 if (rdma_protocol_ib(device, port)) {
241 BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
242 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
243 (u64)attr.port_cap_flags,
244 RDMA_NLDEV_ATTR_PAD))
245 return -EMSGSIZE;
246 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
247 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
248 return -EMSGSIZE;
249 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
250 return -EMSGSIZE;
251 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
252 return -EMSGSIZE;
253 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
254 return -EMSGSIZE;
255 }
256 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
257 return -EMSGSIZE;
258 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
259 return -EMSGSIZE;
260
261 if (device->get_netdev)
262 netdev = device->get_netdev(device, port);
263
264 if (netdev && net_eq(dev_net(netdev), net)) {
265 ret = nla_put_u32(msg,
266 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
267 if (ret)
268 goto out;
269 ret = nla_put_string(msg,
270 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
271 }
272
273out:
274 if (netdev)
275 dev_put(netdev);
276 return ret;
277}
278
279static int fill_res_info_entry(struct sk_buff *msg,
280 const char *name, u64 curr)
281{
282 struct nlattr *entry_attr;
283
284 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
285 if (!entry_attr)
286 return -EMSGSIZE;
287
288 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
289 goto err;
290 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
291 RDMA_NLDEV_ATTR_PAD))
292 goto err;
293
294 nla_nest_end(msg, entry_attr);
295 return 0;
296
297err:
298 nla_nest_cancel(msg, entry_attr);
299 return -EMSGSIZE;
300}
301
302static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
303{
304 static const char * const names[RDMA_RESTRACK_MAX] = {
305 [RDMA_RESTRACK_PD] = "pd",
306 [RDMA_RESTRACK_CQ] = "cq",
307 [RDMA_RESTRACK_QP] = "qp",
308 [RDMA_RESTRACK_CM_ID] = "cm_id",
309 [RDMA_RESTRACK_MR] = "mr",
310 };
311
312 struct rdma_restrack_root *res = &device->res;
313 struct nlattr *table_attr;
314 int ret, i, curr;
315
316 if (fill_nldev_handle(msg, device))
317 return -EMSGSIZE;
318
319 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
320 if (!table_attr)
321 return -EMSGSIZE;
322
323 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
324 if (!names[i])
325 continue;
326 curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
327 ret = fill_res_info_entry(msg, names[i], curr);
328 if (ret)
329 goto err;
330 }
331
332 nla_nest_end(msg, table_attr);
333 return 0;
334
335err:
336 nla_nest_cancel(msg, table_attr);
337 return ret;
338}
339
340static int fill_res_name_pid(struct sk_buff *msg,
341 struct rdma_restrack_entry *res)
342{
343
344
345
346
347 if (rdma_is_kernel_res(res)) {
348 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
349 res->kern_name))
350 return -EMSGSIZE;
351 } else {
352 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
353 task_pid_vnr(res->task)))
354 return -EMSGSIZE;
355 }
356 return 0;
357}
358
359static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
360 struct rdma_restrack_entry *res, uint32_t port)
361{
362 struct ib_qp *qp = container_of(res, struct ib_qp, res);
363 struct rdma_restrack_root *resroot = &qp->device->res;
364 struct ib_qp_init_attr qp_init_attr;
365 struct nlattr *entry_attr;
366 struct ib_qp_attr qp_attr;
367 int ret;
368
369 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
370 if (ret)
371 return ret;
372
373 if (port && port != qp_attr.port_num)
374 return 0;
375
376 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
377 if (!entry_attr)
378 goto out;
379
380
381 if (qp_attr.port_num &&
382 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
383 goto err;
384
385 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
386 goto err;
387 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
388 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
389 qp_attr.dest_qp_num))
390 goto err;
391 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
392 qp_attr.rq_psn))
393 goto err;
394 }
395
396 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
397 goto err;
398
399 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
400 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
401 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
402 qp_attr.path_mig_state))
403 goto err;
404 }
405 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
406 goto err;
407 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
408 goto err;
409
410 if (fill_res_name_pid(msg, res))
411 goto err;
412
413 if (resroot->fill_res_entry(msg, res))
414 goto err;
415
416 nla_nest_end(msg, entry_attr);
417 return 0;
418
419err:
420 nla_nest_cancel(msg, entry_attr);
421out:
422 return -EMSGSIZE;
423}
424
425static int fill_res_cm_id_entry(struct sk_buff *msg,
426 struct netlink_callback *cb,
427 struct rdma_restrack_entry *res, uint32_t port)
428{
429 struct rdma_id_private *id_priv =
430 container_of(res, struct rdma_id_private, res);
431 struct rdma_restrack_root *resroot = &id_priv->id.device->res;
432 struct rdma_cm_id *cm_id = &id_priv->id;
433 struct nlattr *entry_attr;
434
435 if (port && port != cm_id->port_num)
436 return 0;
437
438 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY);
439 if (!entry_attr)
440 goto out;
441
442 if (cm_id->port_num &&
443 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
444 goto err;
445
446 if (id_priv->qp_num) {
447 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
448 goto err;
449 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
450 goto err;
451 }
452
453 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
454 goto err;
455
456 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
457 goto err;
458
459 if (cm_id->route.addr.src_addr.ss_family &&
460 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
461 sizeof(cm_id->route.addr.src_addr),
462 &cm_id->route.addr.src_addr))
463 goto err;
464 if (cm_id->route.addr.dst_addr.ss_family &&
465 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
466 sizeof(cm_id->route.addr.dst_addr),
467 &cm_id->route.addr.dst_addr))
468 goto err;
469
470 if (fill_res_name_pid(msg, res))
471 goto err;
472
473 if (resroot->fill_res_entry(msg, res))
474 goto err;
475
476 nla_nest_end(msg, entry_attr);
477 return 0;
478
479err:
480 nla_nest_cancel(msg, entry_attr);
481out:
482 return -EMSGSIZE;
483}
484
485static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
486 struct rdma_restrack_entry *res, uint32_t port)
487{
488 struct ib_cq *cq = container_of(res, struct ib_cq, res);
489 struct rdma_restrack_root *resroot = &cq->device->res;
490 struct nlattr *entry_attr;
491
492 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY);
493 if (!entry_attr)
494 goto out;
495
496 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
497 goto err;
498 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
499 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
500 goto err;
501
502
503 if (rdma_is_kernel_res(res) &&
504 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
505 goto err;
506
507 if (fill_res_name_pid(msg, res))
508 goto err;
509
510 if (resroot->fill_res_entry(msg, res))
511 goto err;
512
513 nla_nest_end(msg, entry_attr);
514 return 0;
515
516err:
517 nla_nest_cancel(msg, entry_attr);
518out:
519 return -EMSGSIZE;
520}
521
522static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
523 struct rdma_restrack_entry *res, uint32_t port)
524{
525 struct ib_mr *mr = container_of(res, struct ib_mr, res);
526 struct rdma_restrack_root *resroot = &mr->pd->device->res;
527 struct nlattr *entry_attr;
528
529 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY);
530 if (!entry_attr)
531 goto out;
532
533 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
534 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
535 goto err;
536 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
537 goto err;
538 }
539
540 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
541 RDMA_NLDEV_ATTR_PAD))
542 goto err;
543
544 if (fill_res_name_pid(msg, res))
545 goto err;
546
547 if (resroot->fill_res_entry(msg, res))
548 goto err;
549
550 nla_nest_end(msg, entry_attr);
551 return 0;
552
553err:
554 nla_nest_cancel(msg, entry_attr);
555out:
556 return -EMSGSIZE;
557}
558
559static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
560 struct rdma_restrack_entry *res, uint32_t port)
561{
562 struct ib_pd *pd = container_of(res, struct ib_pd, res);
563 struct rdma_restrack_root *resroot = &pd->device->res;
564 struct nlattr *entry_attr;
565
566 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY);
567 if (!entry_attr)
568 goto out;
569
570 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
571 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
572 pd->local_dma_lkey))
573 goto err;
574 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
575 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
576 pd->unsafe_global_rkey))
577 goto err;
578 }
579 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
580 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
581 goto err;
582 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
583 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
584 pd->unsafe_global_rkey))
585 goto err;
586
587 if (fill_res_name_pid(msg, res))
588 goto err;
589
590 if (resroot->fill_res_entry(msg, res))
591 goto err;
592
593 nla_nest_end(msg, entry_attr);
594 return 0;
595
596err:
597 nla_nest_cancel(msg, entry_attr);
598out:
599 return -EMSGSIZE;
600}
601
602static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
603 struct netlink_ext_ack *extack)
604{
605 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
606 struct ib_device *device;
607 struct sk_buff *msg;
608 u32 index;
609 int err;
610
611 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
612 nldev_policy, extack);
613 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
614 return -EINVAL;
615
616 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
617
618 device = ib_device_get_by_index(index);
619 if (!device)
620 return -EINVAL;
621
622 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
623 if (!msg) {
624 err = -ENOMEM;
625 goto err;
626 }
627
628 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
629 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
630 0, 0);
631
632 err = fill_dev_info(msg, device);
633 if (err)
634 goto err_free;
635
636 nlmsg_end(msg, nlh);
637
638 put_device(&device->dev);
639 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
640
641err_free:
642 nlmsg_free(msg);
643err:
644 put_device(&device->dev);
645 return err;
646}
647
648static int _nldev_get_dumpit(struct ib_device *device,
649 struct sk_buff *skb,
650 struct netlink_callback *cb,
651 unsigned int idx)
652{
653 int start = cb->args[0];
654 struct nlmsghdr *nlh;
655
656 if (idx < start)
657 return 0;
658
659 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
660 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
661 0, NLM_F_MULTI);
662
663 if (fill_dev_info(skb, device)) {
664 nlmsg_cancel(skb, nlh);
665 goto out;
666 }
667
668 nlmsg_end(skb, nlh);
669
670 idx++;
671
672out: cb->args[0] = idx;
673 return skb->len;
674}
675
676static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
677{
678
679
680
681
682 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
683}
684
685static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
686 struct netlink_ext_ack *extack)
687{
688 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
689 struct ib_device *device;
690 struct sk_buff *msg;
691 u32 index;
692 u32 port;
693 int err;
694
695 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
696 nldev_policy, extack);
697 if (err ||
698 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
699 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
700 return -EINVAL;
701
702 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
703 device = ib_device_get_by_index(index);
704 if (!device)
705 return -EINVAL;
706
707 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
708 if (!rdma_is_port_valid(device, port)) {
709 err = -EINVAL;
710 goto err;
711 }
712
713 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
714 if (!msg) {
715 err = -ENOMEM;
716 goto err;
717 }
718
719 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
720 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
721 0, 0);
722
723 err = fill_port_info(msg, device, port, sock_net(skb->sk));
724 if (err)
725 goto err_free;
726
727 nlmsg_end(msg, nlh);
728 put_device(&device->dev);
729
730 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
731
732err_free:
733 nlmsg_free(msg);
734err:
735 put_device(&device->dev);
736 return err;
737}
738
739static int nldev_port_get_dumpit(struct sk_buff *skb,
740 struct netlink_callback *cb)
741{
742 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
743 struct ib_device *device;
744 int start = cb->args[0];
745 struct nlmsghdr *nlh;
746 u32 idx = 0;
747 u32 ifindex;
748 int err;
749 u32 p;
750
751 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
752 nldev_policy, NULL);
753 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
754 return -EINVAL;
755
756 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
757 device = ib_device_get_by_index(ifindex);
758 if (!device)
759 return -EINVAL;
760
761 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
762
763
764
765
766
767
768
769
770
771
772 if (idx < start) {
773 idx++;
774 continue;
775 }
776
777 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
778 cb->nlh->nlmsg_seq,
779 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
780 RDMA_NLDEV_CMD_PORT_GET),
781 0, NLM_F_MULTI);
782
783 if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
784 nlmsg_cancel(skb, nlh);
785 goto out;
786 }
787 idx++;
788 nlmsg_end(skb, nlh);
789 }
790
791out:
792 put_device(&device->dev);
793 cb->args[0] = idx;
794 return skb->len;
795}
796
797static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
798 struct netlink_ext_ack *extack)
799{
800 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
801 struct ib_device *device;
802 struct sk_buff *msg;
803 u32 index;
804 int ret;
805
806 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
807 nldev_policy, extack);
808 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
809 return -EINVAL;
810
811 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
812 device = ib_device_get_by_index(index);
813 if (!device)
814 return -EINVAL;
815
816 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
817 if (!msg) {
818 ret = -ENOMEM;
819 goto err;
820 }
821
822 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
823 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
824 0, 0);
825
826 ret = fill_res_info(msg, device);
827 if (ret)
828 goto err_free;
829
830 nlmsg_end(msg, nlh);
831 put_device(&device->dev);
832 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
833
834err_free:
835 nlmsg_free(msg);
836err:
837 put_device(&device->dev);
838 return ret;
839}
840
841static int _nldev_res_get_dumpit(struct ib_device *device,
842 struct sk_buff *skb,
843 struct netlink_callback *cb,
844 unsigned int idx)
845{
846 int start = cb->args[0];
847 struct nlmsghdr *nlh;
848
849 if (idx < start)
850 return 0;
851
852 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
853 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
854 0, NLM_F_MULTI);
855
856 if (fill_res_info(skb, device)) {
857 nlmsg_cancel(skb, nlh);
858 goto out;
859 }
860
861 nlmsg_end(skb, nlh);
862
863 idx++;
864
865out:
866 cb->args[0] = idx;
867 return skb->len;
868}
869
870static int nldev_res_get_dumpit(struct sk_buff *skb,
871 struct netlink_callback *cb)
872{
873 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
874}
875
876struct nldev_fill_res_entry {
877 int (*fill_res_func)(struct sk_buff *msg, struct netlink_callback *cb,
878 struct rdma_restrack_entry *res, u32 port);
879 enum rdma_nldev_attr nldev_attr;
880 enum rdma_nldev_command nldev_cmd;
881};
882
883static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
884 [RDMA_RESTRACK_QP] = {
885 .fill_res_func = fill_res_qp_entry,
886 .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
887 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
888 },
889 [RDMA_RESTRACK_CM_ID] = {
890 .fill_res_func = fill_res_cm_id_entry,
891 .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
892 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
893 },
894 [RDMA_RESTRACK_CQ] = {
895 .fill_res_func = fill_res_cq_entry,
896 .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
897 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
898 },
899 [RDMA_RESTRACK_MR] = {
900 .fill_res_func = fill_res_mr_entry,
901 .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
902 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
903 },
904 [RDMA_RESTRACK_PD] = {
905 .fill_res_func = fill_res_pd_entry,
906 .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
907 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
908 },
909};
910
911static int res_get_common_dumpit(struct sk_buff *skb,
912 struct netlink_callback *cb,
913 enum rdma_restrack_type res_type)
914{
915 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
916 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
917 struct rdma_restrack_entry *res;
918 int err, ret = 0, idx = 0;
919 struct nlattr *table_attr;
920 struct ib_device *device;
921 int start = cb->args[0];
922 struct nlmsghdr *nlh;
923 u32 index, port = 0;
924 bool filled = false;
925
926 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
927 nldev_policy, NULL);
928
929
930
931
932
933
934
935
936 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
937 return -EINVAL;
938
939 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
940 device = ib_device_get_by_index(index);
941 if (!device)
942 return -EINVAL;
943
944
945
946
947 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
948 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
949 if (!rdma_is_port_valid(device, port)) {
950 ret = -EINVAL;
951 goto err_index;
952 }
953 }
954
955 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
956 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
957 0, NLM_F_MULTI);
958
959 if (fill_nldev_handle(skb, device)) {
960 ret = -EMSGSIZE;
961 goto err;
962 }
963
964 table_attr = nla_nest_start(skb, fe->nldev_attr);
965 if (!table_attr) {
966 ret = -EMSGSIZE;
967 goto err;
968 }
969
970 down_read(&device->res.rwsem);
971 hash_for_each_possible(device->res.hash, res, node, res_type) {
972 if (idx < start)
973 goto next;
974
975 if ((rdma_is_kernel_res(res) &&
976 task_active_pid_ns(current) != &init_pid_ns) ||
977 (!rdma_is_kernel_res(res) && task_active_pid_ns(current) !=
978 task_active_pid_ns(res->task)))
979
980
981
982
983
984
985 goto next;
986
987 if (!rdma_restrack_get(res))
988
989
990
991
992
993 goto next;
994
995 filled = true;
996
997 up_read(&device->res.rwsem);
998 ret = fe->fill_res_func(skb, cb, res, port);
999 down_read(&device->res.rwsem);
1000
1001
1002
1003
1004 rdma_restrack_put(res);
1005
1006 if (ret == -EMSGSIZE)
1007
1008
1009
1010
1011
1012 break;
1013 if (ret)
1014 goto res_err;
1015next: idx++;
1016 }
1017 up_read(&device->res.rwsem);
1018
1019 nla_nest_end(skb, table_attr);
1020 nlmsg_end(skb, nlh);
1021 cb->args[0] = idx;
1022
1023
1024
1025
1026
1027 if (!filled)
1028 goto err;
1029
1030 put_device(&device->dev);
1031 return skb->len;
1032
1033res_err:
1034 nla_nest_cancel(skb, table_attr);
1035 up_read(&device->res.rwsem);
1036
1037err:
1038 nlmsg_cancel(skb, nlh);
1039
1040err_index:
1041 put_device(&device->dev);
1042 return ret;
1043}
1044
1045static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
1046 struct netlink_callback *cb)
1047{
1048 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_QP);
1049}
1050
1051static int nldev_res_get_cm_id_dumpit(struct sk_buff *skb,
1052 struct netlink_callback *cb)
1053{
1054 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CM_ID);
1055}
1056
1057static int nldev_res_get_cq_dumpit(struct sk_buff *skb,
1058 struct netlink_callback *cb)
1059{
1060 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CQ);
1061}
1062
1063static int nldev_res_get_mr_dumpit(struct sk_buff *skb,
1064 struct netlink_callback *cb)
1065{
1066 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR);
1067}
1068
1069static int nldev_res_get_pd_dumpit(struct sk_buff *skb,
1070 struct netlink_callback *cb)
1071{
1072 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_PD);
1073}
1074
1075static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
1076 [RDMA_NLDEV_CMD_GET] = {
1077 .doit = nldev_get_doit,
1078 .dump = nldev_get_dumpit,
1079 },
1080 [RDMA_NLDEV_CMD_PORT_GET] = {
1081 .doit = nldev_port_get_doit,
1082 .dump = nldev_port_get_dumpit,
1083 },
1084 [RDMA_NLDEV_CMD_RES_GET] = {
1085 .doit = nldev_res_get_doit,
1086 .dump = nldev_res_get_dumpit,
1087 },
1088 [RDMA_NLDEV_CMD_RES_QP_GET] = {
1089 .dump = nldev_res_get_qp_dumpit,
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 },
1101 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
1102 .dump = nldev_res_get_cm_id_dumpit,
1103 },
1104 [RDMA_NLDEV_CMD_RES_CQ_GET] = {
1105 .dump = nldev_res_get_cq_dumpit,
1106 },
1107 [RDMA_NLDEV_CMD_RES_MR_GET] = {
1108 .dump = nldev_res_get_mr_dumpit,
1109 },
1110 [RDMA_NLDEV_CMD_RES_PD_GET] = {
1111 .dump = nldev_res_get_pd_dumpit,
1112 },
1113};
1114
1115void __init nldev_init(void)
1116{
1117 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
1118}
1119
1120void __exit nldev_exit(void)
1121{
1122 rdma_nl_unregister(RDMA_NL_NLDEV);
1123}
1124
1125MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
1126