1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <rdma/rdma_cm.h>
34
35#include "iw_cxgb4.h"
36#include <rdma/restrack.h>
37#include <uapi/rdma/rdma_netlink.h>
38
39static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
40{
41
42 if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
43 goto err;
44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed))
45 goto err;
46 if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
47 goto err;
48 if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
49 goto err;
50 if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
51 goto err;
52 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
53 goto err;
54 if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
55 goto err;
56 if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
57 goto err;
58 if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size))
59 goto err;
60 if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags))
61 goto err;
62 return 0;
63err:
64 return -EMSGSIZE;
65}
66
67static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
68{
69
70 if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
71 goto err;
72 if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize))
73 goto err;
74 if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx))
75 goto err;
76 if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx))
77 goto err;
78 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx))
79 goto err;
80 if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn))
81 goto err;
82 if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr))
83 goto err;
84 if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size))
85 goto err;
86 if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use))
87 goto err;
88 if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size))
89 goto err;
90 return 0;
91err:
92 return -EMSGSIZE;
93}
94
95static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx,
96 struct t4_swsqe *sqe)
97{
98 if (rdma_nl_put_driver_u32(msg, "idx", idx))
99 goto err;
100 if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode))
101 goto err;
102 if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete))
103 goto err;
104 if (sqe->complete &&
105 rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
106 goto err;
107 if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled))
108 goto err;
109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed))
110 goto err;
111 return 0;
112err:
113 return -EMSGSIZE;
114}
115
116
117
118
119static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq,
120 u16 first_idx, struct t4_swsqe *first_sqe,
121 u16 last_idx, struct t4_swsqe *last_sqe)
122{
123 if (!first_sqe)
124 goto out;
125 if (fill_swsqe(msg, sq, first_idx, first_sqe))
126 goto err;
127 if (!last_sqe)
128 goto out;
129 if (fill_swsqe(msg, sq, last_idx, last_sqe))
130 goto err;
131out:
132 return 0;
133err:
134 return -EMSGSIZE;
135}
136
137static int fill_res_qp_entry(struct sk_buff *msg,
138 struct rdma_restrack_entry *res)
139{
140 struct ib_qp *ibqp = container_of(res, struct ib_qp, res);
141 struct t4_swsqe *fsp = NULL, *lsp = NULL;
142 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
143 u16 first_sq_idx = 0, last_sq_idx = 0;
144 struct t4_swsqe first_sqe, last_sqe;
145 struct nlattr *table_attr;
146 struct t4_wq wq;
147
148
149 if (qhp->ucontext)
150 return 0;
151
152 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
153 if (!table_attr)
154 goto err;
155
156
157 spin_lock_irq(&qhp->lock);
158 wq = qhp->wq;
159
160
161 if (wq.sq.cidx != wq.sq.pidx) {
162 first_sq_idx = wq.sq.cidx;
163 first_sqe = qhp->wq.sq.sw_sq[first_sq_idx];
164 fsp = &first_sqe;
165 last_sq_idx = wq.sq.pidx;
166 if (last_sq_idx-- == 0)
167 last_sq_idx = wq.sq.size - 1;
168 if (last_sq_idx != first_sq_idx) {
169 last_sqe = qhp->wq.sq.sw_sq[last_sq_idx];
170 lsp = &last_sqe;
171 }
172 }
173 spin_unlock_irq(&qhp->lock);
174
175 if (fill_sq(msg, &wq))
176 goto err_cancel_table;
177
178 if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp))
179 goto err_cancel_table;
180
181 if (fill_rq(msg, &wq))
182 goto err_cancel_table;
183
184 nla_nest_end(msg, table_attr);
185 return 0;
186
187err_cancel_table:
188 nla_nest_cancel(msg, table_attr);
189err:
190 return -EMSGSIZE;
191}
192
193union union_ep {
194 struct c4iw_listen_ep lep;
195 struct c4iw_ep ep;
196};
197
198static int fill_res_ep_entry(struct sk_buff *msg,
199 struct rdma_restrack_entry *res)
200{
201 struct rdma_cm_id *cm_id = rdma_res_to_id(res);
202 struct nlattr *table_attr;
203 struct c4iw_ep_common *epcp;
204 struct c4iw_listen_ep *listen_ep = NULL;
205 struct c4iw_ep *ep = NULL;
206 struct iw_cm_id *iw_cm_id;
207 union union_ep *uep;
208
209 iw_cm_id = rdma_iw_cm_id(cm_id);
210 if (!iw_cm_id)
211 return 0;
212 epcp = (struct c4iw_ep_common *)iw_cm_id->provider_data;
213 if (!epcp)
214 return 0;
215 uep = kcalloc(1, sizeof(*uep), GFP_KERNEL);
216 if (!uep)
217 return 0;
218
219 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
220 if (!table_attr)
221 goto err_free_uep;
222
223
224 mutex_lock(&epcp->mutex);
225 if (epcp->state == LISTEN) {
226 uep->lep = *(struct c4iw_listen_ep *)epcp;
227 mutex_unlock(&epcp->mutex);
228 listen_ep = &uep->lep;
229 epcp = &listen_ep->com;
230 } else {
231 uep->ep = *(struct c4iw_ep *)epcp;
232 mutex_unlock(&epcp->mutex);
233 ep = &uep->ep;
234 epcp = &ep->com;
235 }
236
237 if (rdma_nl_put_driver_u32(msg, "state", epcp->state))
238 goto err_cancel_table;
239 if (rdma_nl_put_driver_u64_hex(msg, "flags", epcp->flags))
240 goto err_cancel_table;
241 if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
242 goto err_cancel_table;
243
244 if (epcp->state == LISTEN) {
245 if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
246 goto err_cancel_table;
247 if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
248 goto err_cancel_table;
249 } else {
250 if (rdma_nl_put_driver_u32(msg, "hwtid", ep->hwtid))
251 goto err_cancel_table;
252 if (rdma_nl_put_driver_u32(msg, "ord", ep->ord))
253 goto err_cancel_table;
254 if (rdma_nl_put_driver_u32(msg, "ird", ep->ird))
255 goto err_cancel_table;
256 if (rdma_nl_put_driver_u32(msg, "emss", ep->emss))
257 goto err_cancel_table;
258
259 if (!ep->parent_ep && rdma_nl_put_driver_u32(msg, "atid",
260 ep->atid))
261 goto err_cancel_table;
262 }
263 nla_nest_end(msg, table_attr);
264 kfree(uep);
265 return 0;
266
267err_cancel_table:
268 nla_nest_cancel(msg, table_attr);
269err_free_uep:
270 kfree(uep);
271 return -EMSGSIZE;
272}
273
274static int fill_cq(struct sk_buff *msg, struct t4_cq *cq)
275{
276 if (rdma_nl_put_driver_u32(msg, "cqid", cq->cqid))
277 goto err;
278 if (rdma_nl_put_driver_u32(msg, "memsize", cq->memsize))
279 goto err;
280 if (rdma_nl_put_driver_u32(msg, "size", cq->size))
281 goto err;
282 if (rdma_nl_put_driver_u32(msg, "cidx", cq->cidx))
283 goto err;
284 if (rdma_nl_put_driver_u32(msg, "cidx_inc", cq->cidx_inc))
285 goto err;
286 if (rdma_nl_put_driver_u32(msg, "sw_cidx", cq->sw_cidx))
287 goto err;
288 if (rdma_nl_put_driver_u32(msg, "sw_pidx", cq->sw_pidx))
289 goto err;
290 if (rdma_nl_put_driver_u32(msg, "sw_in_use", cq->sw_in_use))
291 goto err;
292 if (rdma_nl_put_driver_u32(msg, "vector", cq->vector))
293 goto err;
294 if (rdma_nl_put_driver_u32(msg, "gen", cq->gen))
295 goto err;
296 if (rdma_nl_put_driver_u32(msg, "error", cq->error))
297 goto err;
298 if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
299 be64_to_cpu(cq->bits_type_ts)))
300 goto err;
301 if (rdma_nl_put_driver_u64_hex(msg, "flags", cq->flags))
302 goto err;
303
304 return 0;
305
306err:
307 return -EMSGSIZE;
308}
309
310static int fill_cqe(struct sk_buff *msg, struct t4_cqe *cqe, u16 idx,
311 const char *qstr)
312{
313 if (rdma_nl_put_driver_u32(msg, qstr, idx))
314 goto err;
315 if (rdma_nl_put_driver_u32_hex(msg, "header",
316 be32_to_cpu(cqe->header)))
317 goto err;
318 if (rdma_nl_put_driver_u32(msg, "len", be32_to_cpu(cqe->len)))
319 goto err;
320 if (rdma_nl_put_driver_u32_hex(msg, "wrid_hi",
321 be32_to_cpu(cqe->u.gen.wrid_hi)))
322 goto err;
323 if (rdma_nl_put_driver_u32_hex(msg, "wrid_low",
324 be32_to_cpu(cqe->u.gen.wrid_low)))
325 goto err;
326 if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
327 be64_to_cpu(cqe->bits_type_ts)))
328 goto err;
329
330 return 0;
331
332err:
333 return -EMSGSIZE;
334}
335
336static int fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq,
337 struct t4_cqe *cqes)
338{
339 u16 idx;
340
341 idx = (cq->cidx > 0) ? cq->cidx - 1 : cq->size - 1;
342 if (fill_cqe(msg, cqes, idx, "hwcq_idx"))
343 goto err;
344 idx = cq->cidx;
345 if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx"))
346 goto err;
347
348 return 0;
349err:
350 return -EMSGSIZE;
351}
352
353static int fill_swcqes(struct sk_buff *msg, struct t4_cq *cq,
354 struct t4_cqe *cqes)
355{
356 u16 idx;
357
358 if (!cq->sw_in_use)
359 return 0;
360
361 idx = cq->sw_cidx;
362 if (fill_cqe(msg, cqes, idx, "swcq_idx"))
363 goto err;
364 if (cq->sw_in_use == 1)
365 goto out;
366 idx = (cq->sw_pidx > 0) ? cq->sw_pidx - 1 : cq->size - 1;
367 if (fill_cqe(msg, cqes + 1, idx, "swcq_idx"))
368 goto err;
369out:
370 return 0;
371err:
372 return -EMSGSIZE;
373}
374
375static int fill_res_cq_entry(struct sk_buff *msg,
376 struct rdma_restrack_entry *res)
377{
378 struct ib_cq *ibcq = container_of(res, struct ib_cq, res);
379 struct c4iw_cq *chp = to_c4iw_cq(ibcq);
380 struct nlattr *table_attr;
381 struct t4_cqe hwcqes[2];
382 struct t4_cqe swcqes[2];
383 struct t4_cq cq;
384 u16 idx;
385
386
387 if (ibcq->uobject)
388 return 0;
389
390 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
391 if (!table_attr)
392 goto err;
393
394
395 spin_lock_irq(&chp->lock);
396
397
398 cq = chp->cq;
399
400
401 idx = (cq.cidx > 0) ? cq.cidx - 1 : cq.size - 1;
402 hwcqes[0] = chp->cq.queue[idx];
403
404 idx = cq.cidx;
405 hwcqes[1] = chp->cq.queue[idx];
406
407
408 if (cq.sw_in_use) {
409 swcqes[0] = chp->cq.sw_queue[cq.sw_cidx];
410 if (cq.sw_in_use > 1) {
411 idx = (cq.sw_pidx > 0) ? cq.sw_pidx - 1 : cq.size - 1;
412 swcqes[1] = chp->cq.sw_queue[idx];
413 }
414 }
415
416 spin_unlock_irq(&chp->lock);
417
418 if (fill_cq(msg, &cq))
419 goto err_cancel_table;
420
421 if (fill_swcqes(msg, &cq, swcqes))
422 goto err_cancel_table;
423
424 if (fill_hwcqes(msg, &cq, hwcqes))
425 goto err_cancel_table;
426
427 nla_nest_end(msg, table_attr);
428 return 0;
429
430err_cancel_table:
431 nla_nest_cancel(msg, table_attr);
432err:
433 return -EMSGSIZE;
434}
435
436static int fill_res_mr_entry(struct sk_buff *msg,
437 struct rdma_restrack_entry *res)
438{
439 struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
440 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
441 struct c4iw_dev *dev = mhp->rhp;
442 u32 stag = mhp->attr.stag;
443 struct nlattr *table_attr;
444 struct fw_ri_tpte tpte;
445 int ret;
446
447 if (!stag)
448 return 0;
449
450 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
451 if (!table_attr)
452 goto err;
453
454 ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, (__be32 *)&tpte);
455 if (ret) {
456 dev_err(&dev->rdev.lldi.pdev->dev,
457 "%s cxgb4_read_tpte err %d\n", __func__, ret);
458 return 0;
459 }
460
461 if (rdma_nl_put_driver_u32_hex(msg, "idx", stag >> 8))
462 goto err_cancel_table;
463 if (rdma_nl_put_driver_u32(msg, "valid",
464 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid))))
465 goto err_cancel_table;
466 if (rdma_nl_put_driver_u32_hex(msg, "key", stag & 0xff))
467 goto err_cancel_table;
468 if (rdma_nl_put_driver_u32(msg, "state",
469 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid))))
470 goto err_cancel_table;
471 if (rdma_nl_put_driver_u32(msg, "pdid",
472 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid))))
473 goto err_cancel_table;
474 if (rdma_nl_put_driver_u32_hex(msg, "perm",
475 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid))))
476 goto err_cancel_table;
477 if (rdma_nl_put_driver_u32(msg, "ps",
478 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid))))
479 goto err_cancel_table;
480 if (rdma_nl_put_driver_u64(msg, "len",
481 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo)))
482 goto err_cancel_table;
483 if (rdma_nl_put_driver_u32_hex(msg, "pbl_addr",
484 FW_RI_TPTE_PBLADDR_G(ntohl(tpte.nosnoop_pbladdr))))
485 goto err_cancel_table;
486
487 nla_nest_end(msg, table_attr);
488 return 0;
489
490err_cancel_table:
491 nla_nest_cancel(msg, table_attr);
492err:
493 return -EMSGSIZE;
494}
495
496c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = {
497 [RDMA_RESTRACK_QP] = fill_res_qp_entry,
498 [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry,
499 [RDMA_RESTRACK_CQ] = fill_res_cq_entry,
500 [RDMA_RESTRACK_MR] = fill_res_mr_entry,
501};
502