1
2
3
4
5
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM rpcrdma
9
10#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_RPCRDMA_H
12
13#include <linux/scatterlist.h>
14#include <linux/tracepoint.h>
15#include <trace/events/rdma.h>
16
17
18
19
20
21DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 TP_PROTO(
23 const struct rpcrdma_rep *rep
24 ),
25
26 TP_ARGS(rep),
27
28 TP_STRUCT__entry(
29 __field(const void *, rep)
30 __field(const void *, r_xprt)
31 __field(u32, xid)
32 __field(u32, version)
33 __field(u32, proc)
34 ),
35
36 TP_fast_assign(
37 __entry->rep = rep;
38 __entry->r_xprt = rep->rr_rxprt;
39 __entry->xid = be32_to_cpu(rep->rr_xid);
40 __entry->version = be32_to_cpu(rep->rr_vers);
41 __entry->proc = be32_to_cpu(rep->rr_proc);
42 ),
43
44 TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
45 __entry->r_xprt, __entry->xid, __entry->rep,
46 __entry->version, __entry->proc
47 )
48);
49
50#define DEFINE_REPLY_EVENT(name) \
51 DEFINE_EVENT(xprtrdma_reply_event, name, \
52 TP_PROTO( \
53 const struct rpcrdma_rep *rep \
54 ), \
55 TP_ARGS(rep))
56
57DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 TP_PROTO(
59 const struct rpcrdma_xprt *r_xprt
60 ),
61
62 TP_ARGS(r_xprt),
63
64 TP_STRUCT__entry(
65 __field(const void *, r_xprt)
66 __string(addr, rpcrdma_addrstr(r_xprt))
67 __string(port, rpcrdma_portstr(r_xprt))
68 ),
69
70 TP_fast_assign(
71 __entry->r_xprt = r_xprt;
72 __assign_str(addr, rpcrdma_addrstr(r_xprt));
73 __assign_str(port, rpcrdma_portstr(r_xprt));
74 ),
75
76 TP_printk("peer=[%s]:%s r_xprt=%p",
77 __get_str(addr), __get_str(port), __entry->r_xprt
78 )
79);
80
81#define DEFINE_RXPRT_EVENT(name) \
82 DEFINE_EVENT(xprtrdma_rxprt, name, \
83 TP_PROTO( \
84 const struct rpcrdma_xprt *r_xprt \
85 ), \
86 TP_ARGS(r_xprt))
87
88DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
89 TP_PROTO(
90 const struct rpc_task *task,
91 unsigned int pos,
92 struct rpcrdma_mr *mr,
93 int nsegs
94 ),
95
96 TP_ARGS(task, pos, mr, nsegs),
97
98 TP_STRUCT__entry(
99 __field(unsigned int, task_id)
100 __field(unsigned int, client_id)
101 __field(unsigned int, pos)
102 __field(int, nents)
103 __field(u32, handle)
104 __field(u32, length)
105 __field(u64, offset)
106 __field(int, nsegs)
107 ),
108
109 TP_fast_assign(
110 __entry->task_id = task->tk_pid;
111 __entry->client_id = task->tk_client->cl_clid;
112 __entry->pos = pos;
113 __entry->nents = mr->mr_nents;
114 __entry->handle = mr->mr_handle;
115 __entry->length = mr->mr_length;
116 __entry->offset = mr->mr_offset;
117 __entry->nsegs = nsegs;
118 ),
119
120 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
121 __entry->task_id, __entry->client_id,
122 __entry->pos, __entry->length,
123 (unsigned long long)__entry->offset, __entry->handle,
124 __entry->nents < __entry->nsegs ? "more" : "last"
125 )
126);
127
128#define DEFINE_RDCH_EVENT(name) \
129 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
130 TP_PROTO( \
131 const struct rpc_task *task, \
132 unsigned int pos, \
133 struct rpcrdma_mr *mr, \
134 int nsegs \
135 ), \
136 TP_ARGS(task, pos, mr, nsegs))
137
138DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
139 TP_PROTO(
140 const struct rpc_task *task,
141 struct rpcrdma_mr *mr,
142 int nsegs
143 ),
144
145 TP_ARGS(task, mr, nsegs),
146
147 TP_STRUCT__entry(
148 __field(unsigned int, task_id)
149 __field(unsigned int, client_id)
150 __field(int, nents)
151 __field(u32, handle)
152 __field(u32, length)
153 __field(u64, offset)
154 __field(int, nsegs)
155 ),
156
157 TP_fast_assign(
158 __entry->task_id = task->tk_pid;
159 __entry->client_id = task->tk_client->cl_clid;
160 __entry->nents = mr->mr_nents;
161 __entry->handle = mr->mr_handle;
162 __entry->length = mr->mr_length;
163 __entry->offset = mr->mr_offset;
164 __entry->nsegs = nsegs;
165 ),
166
167 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
168 __entry->task_id, __entry->client_id,
169 __entry->length, (unsigned long long)__entry->offset,
170 __entry->handle,
171 __entry->nents < __entry->nsegs ? "more" : "last"
172 )
173);
174
175#define DEFINE_WRCH_EVENT(name) \
176 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
177 TP_PROTO( \
178 const struct rpc_task *task, \
179 struct rpcrdma_mr *mr, \
180 int nsegs \
181 ), \
182 TP_ARGS(task, mr, nsegs))
183
184TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
185TRACE_DEFINE_ENUM(FRWR_IS_VALID);
186TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
187TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
188
189#define xprtrdma_show_frwr_state(x) \
190 __print_symbolic(x, \
191 { FRWR_IS_INVALID, "INVALID" }, \
192 { FRWR_IS_VALID, "VALID" }, \
193 { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
194 { FRWR_FLUSHED_LI, "FLUSHED_LI" })
195
196DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
197 TP_PROTO(
198 const struct ib_wc *wc,
199 const struct rpcrdma_frwr *frwr
200 ),
201
202 TP_ARGS(wc, frwr),
203
204 TP_STRUCT__entry(
205 __field(const void *, mr)
206 __field(unsigned int, state)
207 __field(unsigned int, status)
208 __field(unsigned int, vendor_err)
209 ),
210
211 TP_fast_assign(
212 __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
213 __entry->state = frwr->fr_state;
214 __entry->status = wc->status;
215 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
216 ),
217
218 TP_printk(
219 "mr=%p state=%s: %s (%u/0x%x)",
220 __entry->mr, xprtrdma_show_frwr_state(__entry->state),
221 rdma_show_wc_status(__entry->status),
222 __entry->status, __entry->vendor_err
223 )
224);
225
226#define DEFINE_FRWR_DONE_EVENT(name) \
227 DEFINE_EVENT(xprtrdma_frwr_done, name, \
228 TP_PROTO( \
229 const struct ib_wc *wc, \
230 const struct rpcrdma_frwr *frwr \
231 ), \
232 TP_ARGS(wc, frwr))
233
234TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
235TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
236TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
237TRACE_DEFINE_ENUM(DMA_NONE);
238
239#define xprtrdma_show_direction(x) \
240 __print_symbolic(x, \
241 { DMA_BIDIRECTIONAL, "BIDIR" }, \
242 { DMA_TO_DEVICE, "TO_DEVICE" }, \
243 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
244 { DMA_NONE, "NONE" })
245
246DECLARE_EVENT_CLASS(xprtrdma_mr,
247 TP_PROTO(
248 const struct rpcrdma_mr *mr
249 ),
250
251 TP_ARGS(mr),
252
253 TP_STRUCT__entry(
254 __field(const void *, mr)
255 __field(u32, handle)
256 __field(u32, length)
257 __field(u64, offset)
258 __field(u32, dir)
259 ),
260
261 TP_fast_assign(
262 __entry->mr = mr;
263 __entry->handle = mr->mr_handle;
264 __entry->length = mr->mr_length;
265 __entry->offset = mr->mr_offset;
266 __entry->dir = mr->mr_dir;
267 ),
268
269 TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
270 __entry->mr, __entry->length,
271 (unsigned long long)__entry->offset, __entry->handle,
272 xprtrdma_show_direction(__entry->dir)
273 )
274);
275
276#define DEFINE_MR_EVENT(name) \
277 DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
278 TP_PROTO( \
279 const struct rpcrdma_mr *mr \
280 ), \
281 TP_ARGS(mr))
282
283DECLARE_EVENT_CLASS(xprtrdma_cb_event,
284 TP_PROTO(
285 const struct rpc_rqst *rqst
286 ),
287
288 TP_ARGS(rqst),
289
290 TP_STRUCT__entry(
291 __field(const void *, rqst)
292 __field(const void *, rep)
293 __field(const void *, req)
294 __field(u32, xid)
295 ),
296
297 TP_fast_assign(
298 __entry->rqst = rqst;
299 __entry->req = rpcr_to_rdmar(rqst);
300 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
301 __entry->xid = be32_to_cpu(rqst->rq_xid);
302 ),
303
304 TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
305 __entry->xid, __entry->rqst, __entry->req, __entry->rep
306 )
307);
308
309#define DEFINE_CB_EVENT(name) \
310 DEFINE_EVENT(xprtrdma_cb_event, name, \
311 TP_PROTO( \
312 const struct rpc_rqst *rqst \
313 ), \
314 TP_ARGS(rqst))
315
316
317
318
319
320TRACE_EVENT(xprtrdma_cm_event,
321 TP_PROTO(
322 const struct rpcrdma_xprt *r_xprt,
323 struct rdma_cm_event *event
324 ),
325
326 TP_ARGS(r_xprt, event),
327
328 TP_STRUCT__entry(
329 __field(const void *, r_xprt)
330 __field(unsigned int, event)
331 __field(int, status)
332 __string(addr, rpcrdma_addrstr(r_xprt))
333 __string(port, rpcrdma_portstr(r_xprt))
334 ),
335
336 TP_fast_assign(
337 __entry->r_xprt = r_xprt;
338 __entry->event = event->event;
339 __entry->status = event->status;
340 __assign_str(addr, rpcrdma_addrstr(r_xprt));
341 __assign_str(port, rpcrdma_portstr(r_xprt));
342 ),
343
344 TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
345 __get_str(addr), __get_str(port),
346 __entry->r_xprt, rdma_show_cm_event(__entry->event),
347 __entry->event, __entry->status
348 )
349);
350
351TRACE_EVENT(xprtrdma_disconnect,
352 TP_PROTO(
353 const struct rpcrdma_xprt *r_xprt,
354 int status
355 ),
356
357 TP_ARGS(r_xprt, status),
358
359 TP_STRUCT__entry(
360 __field(const void *, r_xprt)
361 __field(int, status)
362 __field(int, connected)
363 __string(addr, rpcrdma_addrstr(r_xprt))
364 __string(port, rpcrdma_portstr(r_xprt))
365 ),
366
367 TP_fast_assign(
368 __entry->r_xprt = r_xprt;
369 __entry->status = status;
370 __entry->connected = r_xprt->rx_ep.rep_connected;
371 __assign_str(addr, rpcrdma_addrstr(r_xprt));
372 __assign_str(port, rpcrdma_portstr(r_xprt));
373 ),
374
375 TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
376 __get_str(addr), __get_str(port),
377 __entry->r_xprt, __entry->status,
378 __entry->connected == 1 ? "still " : "dis"
379 )
380);
381
382DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
383DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
384DEFINE_RXPRT_EVENT(xprtrdma_create);
385DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
386DEFINE_RXPRT_EVENT(xprtrdma_remove);
387DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
388DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
389DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
390DEFINE_RXPRT_EVENT(xprtrdma_op_close);
391DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
392
393TRACE_EVENT(xprtrdma_qp_event,
394 TP_PROTO(
395 const struct rpcrdma_xprt *r_xprt,
396 const struct ib_event *event
397 ),
398
399 TP_ARGS(r_xprt, event),
400
401 TP_STRUCT__entry(
402 __field(const void *, r_xprt)
403 __field(unsigned int, event)
404 __string(name, event->device->name)
405 __string(addr, rpcrdma_addrstr(r_xprt))
406 __string(port, rpcrdma_portstr(r_xprt))
407 ),
408
409 TP_fast_assign(
410 __entry->r_xprt = r_xprt;
411 __entry->event = event->event;
412 __assign_str(name, event->device->name);
413 __assign_str(addr, rpcrdma_addrstr(r_xprt));
414 __assign_str(port, rpcrdma_portstr(r_xprt));
415 ),
416
417 TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
418 __get_str(addr), __get_str(port), __entry->r_xprt,
419 __get_str(name), rdma_show_ib_event(__entry->event),
420 __entry->event
421 )
422);
423
424
425
426
427
428TRACE_EVENT(xprtrdma_createmrs,
429 TP_PROTO(
430 const struct rpcrdma_xprt *r_xprt,
431 unsigned int count
432 ),
433
434 TP_ARGS(r_xprt, count),
435
436 TP_STRUCT__entry(
437 __field(const void *, r_xprt)
438 __field(unsigned int, count)
439 ),
440
441 TP_fast_assign(
442 __entry->r_xprt = r_xprt;
443 __entry->count = count;
444 ),
445
446 TP_printk("r_xprt=%p: created %u MRs",
447 __entry->r_xprt, __entry->count
448 )
449);
450
451DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
452
453DEFINE_RDCH_EVENT(read);
454DEFINE_WRCH_EVENT(write);
455DEFINE_WRCH_EVENT(reply);
456
457TRACE_DEFINE_ENUM(rpcrdma_noch);
458TRACE_DEFINE_ENUM(rpcrdma_readch);
459TRACE_DEFINE_ENUM(rpcrdma_areadch);
460TRACE_DEFINE_ENUM(rpcrdma_writech);
461TRACE_DEFINE_ENUM(rpcrdma_replych);
462
463#define xprtrdma_show_chunktype(x) \
464 __print_symbolic(x, \
465 { rpcrdma_noch, "inline" }, \
466 { rpcrdma_readch, "read list" }, \
467 { rpcrdma_areadch, "*read list" }, \
468 { rpcrdma_writech, "write list" }, \
469 { rpcrdma_replych, "reply chunk" })
470
471TRACE_EVENT(xprtrdma_marshal,
472 TP_PROTO(
473 const struct rpc_rqst *rqst,
474 unsigned int hdrlen,
475 unsigned int rtype,
476 unsigned int wtype
477 ),
478
479 TP_ARGS(rqst, hdrlen, rtype, wtype),
480
481 TP_STRUCT__entry(
482 __field(unsigned int, task_id)
483 __field(unsigned int, client_id)
484 __field(u32, xid)
485 __field(unsigned int, hdrlen)
486 __field(unsigned int, headlen)
487 __field(unsigned int, pagelen)
488 __field(unsigned int, taillen)
489 __field(unsigned int, rtype)
490 __field(unsigned int, wtype)
491 ),
492
493 TP_fast_assign(
494 __entry->task_id = rqst->rq_task->tk_pid;
495 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
496 __entry->xid = be32_to_cpu(rqst->rq_xid);
497 __entry->hdrlen = hdrlen;
498 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
499 __entry->pagelen = rqst->rq_snd_buf.page_len;
500 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
501 __entry->rtype = rtype;
502 __entry->wtype = wtype;
503 ),
504
505 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
506 __entry->task_id, __entry->client_id, __entry->xid,
507 __entry->hdrlen,
508 __entry->headlen, __entry->pagelen, __entry->taillen,
509 xprtrdma_show_chunktype(__entry->rtype),
510 xprtrdma_show_chunktype(__entry->wtype)
511 )
512);
513
514TRACE_EVENT(xprtrdma_marshal_failed,
515 TP_PROTO(const struct rpc_rqst *rqst,
516 int ret
517 ),
518
519 TP_ARGS(rqst, ret),
520
521 TP_STRUCT__entry(
522 __field(unsigned int, task_id)
523 __field(unsigned int, client_id)
524 __field(u32, xid)
525 __field(int, ret)
526 ),
527
528 TP_fast_assign(
529 __entry->task_id = rqst->rq_task->tk_pid;
530 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
531 __entry->xid = be32_to_cpu(rqst->rq_xid);
532 __entry->ret = ret;
533 ),
534
535 TP_printk("task:%u@%u xid=0x%08x: ret=%d",
536 __entry->task_id, __entry->client_id, __entry->xid,
537 __entry->ret
538 )
539);
540
541TRACE_EVENT(xprtrdma_post_send,
542 TP_PROTO(
543 const struct rpcrdma_req *req,
544 int status
545 ),
546
547 TP_ARGS(req, status),
548
549 TP_STRUCT__entry(
550 __field(const void *, req)
551 __field(unsigned int, task_id)
552 __field(unsigned int, client_id)
553 __field(int, num_sge)
554 __field(int, signaled)
555 __field(int, status)
556 ),
557
558 TP_fast_assign(
559 const struct rpc_rqst *rqst = &req->rl_slot;
560
561 __entry->task_id = rqst->rq_task->tk_pid;
562 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
563 __entry->req = req;
564 __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
565 __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
566 IB_SEND_SIGNALED;
567 __entry->status = status;
568 ),
569
570 TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
571 __entry->task_id, __entry->client_id,
572 __entry->req, __entry->num_sge,
573 (__entry->num_sge == 1 ? "" : "s"),
574 (__entry->signaled ? "signaled " : ""),
575 __entry->status
576 )
577);
578
579TRACE_EVENT(xprtrdma_post_recv,
580 TP_PROTO(
581 const struct ib_cqe *cqe
582 ),
583
584 TP_ARGS(cqe),
585
586 TP_STRUCT__entry(
587 __field(const void *, cqe)
588 ),
589
590 TP_fast_assign(
591 __entry->cqe = cqe;
592 ),
593
594 TP_printk("cqe=%p",
595 __entry->cqe
596 )
597);
598
599TRACE_EVENT(xprtrdma_post_recvs,
600 TP_PROTO(
601 const struct rpcrdma_xprt *r_xprt,
602 unsigned int count,
603 int status
604 ),
605
606 TP_ARGS(r_xprt, count, status),
607
608 TP_STRUCT__entry(
609 __field(const void *, r_xprt)
610 __field(unsigned int, count)
611 __field(int, status)
612 __field(int, posted)
613 __string(addr, rpcrdma_addrstr(r_xprt))
614 __string(port, rpcrdma_portstr(r_xprt))
615 ),
616
617 TP_fast_assign(
618 __entry->r_xprt = r_xprt;
619 __entry->count = count;
620 __entry->status = status;
621 __entry->posted = r_xprt->rx_ep.rep_receive_count;
622 __assign_str(addr, rpcrdma_addrstr(r_xprt));
623 __assign_str(port, rpcrdma_portstr(r_xprt));
624 ),
625
626 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
627 __get_str(addr), __get_str(port), __entry->r_xprt,
628 __entry->count, __entry->posted, __entry->status
629 )
630);
631
632
633
634
635
636TRACE_EVENT(xprtrdma_wc_send,
637 TP_PROTO(
638 const struct rpcrdma_sendctx *sc,
639 const struct ib_wc *wc
640 ),
641
642 TP_ARGS(sc, wc),
643
644 TP_STRUCT__entry(
645 __field(const void *, req)
646 __field(unsigned int, unmap_count)
647 __field(unsigned int, status)
648 __field(unsigned int, vendor_err)
649 ),
650
651 TP_fast_assign(
652 __entry->req = sc->sc_req;
653 __entry->unmap_count = sc->sc_unmap_count;
654 __entry->status = wc->status;
655 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
656 ),
657
658 TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
659 __entry->req, __entry->unmap_count,
660 rdma_show_wc_status(__entry->status),
661 __entry->status, __entry->vendor_err
662 )
663);
664
665TRACE_EVENT(xprtrdma_wc_receive,
666 TP_PROTO(
667 const struct ib_wc *wc
668 ),
669
670 TP_ARGS(wc),
671
672 TP_STRUCT__entry(
673 __field(const void *, cqe)
674 __field(u32, byte_len)
675 __field(unsigned int, status)
676 __field(u32, vendor_err)
677 ),
678
679 TP_fast_assign(
680 __entry->cqe = wc->wr_cqe;
681 __entry->status = wc->status;
682 if (wc->status) {
683 __entry->byte_len = 0;
684 __entry->vendor_err = wc->vendor_err;
685 } else {
686 __entry->byte_len = wc->byte_len;
687 __entry->vendor_err = 0;
688 }
689 ),
690
691 TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
692 __entry->cqe, __entry->byte_len,
693 rdma_show_wc_status(__entry->status),
694 __entry->status, __entry->vendor_err
695 )
696);
697
698DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
699DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
700DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
701
702TRACE_EVENT(xprtrdma_frwr_alloc,
703 TP_PROTO(
704 const struct rpcrdma_mr *mr,
705 int rc
706 ),
707
708 TP_ARGS(mr, rc),
709
710 TP_STRUCT__entry(
711 __field(const void *, mr)
712 __field(int, rc)
713 ),
714
715 TP_fast_assign(
716 __entry->mr = mr;
717 __entry->rc = rc;
718 ),
719
720 TP_printk("mr=%p: rc=%d",
721 __entry->mr, __entry->rc
722 )
723);
724
725TRACE_EVENT(xprtrdma_frwr_dereg,
726 TP_PROTO(
727 const struct rpcrdma_mr *mr,
728 int rc
729 ),
730
731 TP_ARGS(mr, rc),
732
733 TP_STRUCT__entry(
734 __field(const void *, mr)
735 __field(u32, handle)
736 __field(u32, length)
737 __field(u64, offset)
738 __field(u32, dir)
739 __field(int, rc)
740 ),
741
742 TP_fast_assign(
743 __entry->mr = mr;
744 __entry->handle = mr->mr_handle;
745 __entry->length = mr->mr_length;
746 __entry->offset = mr->mr_offset;
747 __entry->dir = mr->mr_dir;
748 __entry->rc = rc;
749 ),
750
751 TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
752 __entry->mr, __entry->length,
753 (unsigned long long)__entry->offset, __entry->handle,
754 xprtrdma_show_direction(__entry->dir),
755 __entry->rc
756 )
757);
758
759TRACE_EVENT(xprtrdma_frwr_sgerr,
760 TP_PROTO(
761 const struct rpcrdma_mr *mr,
762 int sg_nents
763 ),
764
765 TP_ARGS(mr, sg_nents),
766
767 TP_STRUCT__entry(
768 __field(const void *, mr)
769 __field(u64, addr)
770 __field(u32, dir)
771 __field(int, nents)
772 ),
773
774 TP_fast_assign(
775 __entry->mr = mr;
776 __entry->addr = mr->mr_sg->dma_address;
777 __entry->dir = mr->mr_dir;
778 __entry->nents = sg_nents;
779 ),
780
781 TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
782 __entry->mr, __entry->addr,
783 xprtrdma_show_direction(__entry->dir),
784 __entry->nents
785 )
786);
787
788TRACE_EVENT(xprtrdma_frwr_maperr,
789 TP_PROTO(
790 const struct rpcrdma_mr *mr,
791 int num_mapped
792 ),
793
794 TP_ARGS(mr, num_mapped),
795
796 TP_STRUCT__entry(
797 __field(const void *, mr)
798 __field(u64, addr)
799 __field(u32, dir)
800 __field(int, num_mapped)
801 __field(int, nents)
802 ),
803
804 TP_fast_assign(
805 __entry->mr = mr;
806 __entry->addr = mr->mr_sg->dma_address;
807 __entry->dir = mr->mr_dir;
808 __entry->num_mapped = num_mapped;
809 __entry->nents = mr->mr_nents;
810 ),
811
812 TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
813 __entry->mr, __entry->addr,
814 xprtrdma_show_direction(__entry->dir),
815 __entry->num_mapped, __entry->nents
816 )
817);
818
819DEFINE_MR_EVENT(localinv);
820DEFINE_MR_EVENT(map);
821DEFINE_MR_EVENT(unmap);
822DEFINE_MR_EVENT(remoteinv);
823DEFINE_MR_EVENT(recycle);
824
825TRACE_EVENT(xprtrdma_dma_maperr,
826 TP_PROTO(
827 u64 addr
828 ),
829
830 TP_ARGS(addr),
831
832 TP_STRUCT__entry(
833 __field(u64, addr)
834 ),
835
836 TP_fast_assign(
837 __entry->addr = addr;
838 ),
839
840 TP_printk("dma addr=0x%llx\n", __entry->addr)
841);
842
843
844
845
846
847TRACE_EVENT(xprtrdma_reply,
848 TP_PROTO(
849 const struct rpc_task *task,
850 const struct rpcrdma_rep *rep,
851 const struct rpcrdma_req *req,
852 unsigned int credits
853 ),
854
855 TP_ARGS(task, rep, req, credits),
856
857 TP_STRUCT__entry(
858 __field(unsigned int, task_id)
859 __field(unsigned int, client_id)
860 __field(const void *, rep)
861 __field(const void *, req)
862 __field(u32, xid)
863 __field(unsigned int, credits)
864 ),
865
866 TP_fast_assign(
867 __entry->task_id = task->tk_pid;
868 __entry->client_id = task->tk_client->cl_clid;
869 __entry->rep = rep;
870 __entry->req = req;
871 __entry->xid = be32_to_cpu(rep->rr_xid);
872 __entry->credits = credits;
873 ),
874
875 TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
876 __entry->task_id, __entry->client_id, __entry->xid,
877 __entry->credits, __entry->rep, __entry->req
878 )
879);
880
881TRACE_EVENT(xprtrdma_defer_cmp,
882 TP_PROTO(
883 const struct rpcrdma_rep *rep
884 ),
885
886 TP_ARGS(rep),
887
888 TP_STRUCT__entry(
889 __field(unsigned int, task_id)
890 __field(unsigned int, client_id)
891 __field(const void *, rep)
892 __field(u32, xid)
893 ),
894
895 TP_fast_assign(
896 __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
897 __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
898 __entry->rep = rep;
899 __entry->xid = be32_to_cpu(rep->rr_xid);
900 ),
901
902 TP_printk("task:%u@%u xid=0x%08x rep=%p",
903 __entry->task_id, __entry->client_id, __entry->xid,
904 __entry->rep
905 )
906);
907
908DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
909DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
910DEFINE_REPLY_EVENT(xprtrdma_reply_short);
911DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
912
913TRACE_EVENT(xprtrdma_fixup,
914 TP_PROTO(
915 const struct rpc_rqst *rqst,
916 int len,
917 int hdrlen
918 ),
919
920 TP_ARGS(rqst, len, hdrlen),
921
922 TP_STRUCT__entry(
923 __field(unsigned int, task_id)
924 __field(unsigned int, client_id)
925 __field(const void *, base)
926 __field(int, len)
927 __field(int, hdrlen)
928 ),
929
930 TP_fast_assign(
931 __entry->task_id = rqst->rq_task->tk_pid;
932 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
933 __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
934 __entry->len = len;
935 __entry->hdrlen = hdrlen;
936 ),
937
938 TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
939 __entry->task_id, __entry->client_id,
940 __entry->base, __entry->len, __entry->hdrlen
941 )
942);
943
944TRACE_EVENT(xprtrdma_fixup_pg,
945 TP_PROTO(
946 const struct rpc_rqst *rqst,
947 int pageno,
948 const void *pos,
949 int len,
950 int curlen
951 ),
952
953 TP_ARGS(rqst, pageno, pos, len, curlen),
954
955 TP_STRUCT__entry(
956 __field(unsigned int, task_id)
957 __field(unsigned int, client_id)
958 __field(const void *, pos)
959 __field(int, pageno)
960 __field(int, len)
961 __field(int, curlen)
962 ),
963
964 TP_fast_assign(
965 __entry->task_id = rqst->rq_task->tk_pid;
966 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
967 __entry->pos = pos;
968 __entry->pageno = pageno;
969 __entry->len = len;
970 __entry->curlen = curlen;
971 ),
972
973 TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
974 __entry->task_id, __entry->client_id,
975 __entry->pageno, __entry->pos, __entry->len, __entry->curlen
976 )
977);
978
979TRACE_EVENT(xprtrdma_decode_seg,
980 TP_PROTO(
981 u32 handle,
982 u32 length,
983 u64 offset
984 ),
985
986 TP_ARGS(handle, length, offset),
987
988 TP_STRUCT__entry(
989 __field(u32, handle)
990 __field(u32, length)
991 __field(u64, offset)
992 ),
993
994 TP_fast_assign(
995 __entry->handle = handle;
996 __entry->length = length;
997 __entry->offset = offset;
998 ),
999
1000 TP_printk("%u@0x%016llx:0x%08x",
1001 __entry->length, (unsigned long long)__entry->offset,
1002 __entry->handle
1003 )
1004);
1005
1006
1007
1008
1009
1010TRACE_EVENT(xprtrdma_op_allocate,
1011 TP_PROTO(
1012 const struct rpc_task *task,
1013 const struct rpcrdma_req *req
1014 ),
1015
1016 TP_ARGS(task, req),
1017
1018 TP_STRUCT__entry(
1019 __field(unsigned int, task_id)
1020 __field(unsigned int, client_id)
1021 __field(const void *, req)
1022 __field(size_t, callsize)
1023 __field(size_t, rcvsize)
1024 ),
1025
1026 TP_fast_assign(
1027 __entry->task_id = task->tk_pid;
1028 __entry->client_id = task->tk_client->cl_clid;
1029 __entry->req = req;
1030 __entry->callsize = task->tk_rqstp->rq_callsize;
1031 __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1032 ),
1033
1034 TP_printk("task:%u@%u req=%p (%zu, %zu)",
1035 __entry->task_id, __entry->client_id,
1036 __entry->req, __entry->callsize, __entry->rcvsize
1037 )
1038);
1039
1040TRACE_EVENT(xprtrdma_op_free,
1041 TP_PROTO(
1042 const struct rpc_task *task,
1043 const struct rpcrdma_req *req
1044 ),
1045
1046 TP_ARGS(task, req),
1047
1048 TP_STRUCT__entry(
1049 __field(unsigned int, task_id)
1050 __field(unsigned int, client_id)
1051 __field(const void *, req)
1052 __field(const void *, rep)
1053 ),
1054
1055 TP_fast_assign(
1056 __entry->task_id = task->tk_pid;
1057 __entry->client_id = task->tk_client->cl_clid;
1058 __entry->req = req;
1059 __entry->rep = req->rl_reply;
1060 ),
1061
1062 TP_printk("task:%u@%u req=%p rep=%p",
1063 __entry->task_id, __entry->client_id,
1064 __entry->req, __entry->rep
1065 )
1066);
1067
1068
1069
1070
1071
1072TRACE_EVENT(xprtrdma_cb_setup,
1073 TP_PROTO(
1074 const struct rpcrdma_xprt *r_xprt,
1075 unsigned int reqs
1076 ),
1077
1078 TP_ARGS(r_xprt, reqs),
1079
1080 TP_STRUCT__entry(
1081 __field(const void *, r_xprt)
1082 __field(unsigned int, reqs)
1083 __string(addr, rpcrdma_addrstr(r_xprt))
1084 __string(port, rpcrdma_portstr(r_xprt))
1085 ),
1086
1087 TP_fast_assign(
1088 __entry->r_xprt = r_xprt;
1089 __entry->reqs = reqs;
1090 __assign_str(addr, rpcrdma_addrstr(r_xprt));
1091 __assign_str(port, rpcrdma_portstr(r_xprt));
1092 ),
1093
1094 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1095 __get_str(addr), __get_str(port),
1096 __entry->r_xprt, __entry->reqs
1097 )
1098);
1099
1100DEFINE_CB_EVENT(xprtrdma_cb_call);
1101DEFINE_CB_EVENT(xprtrdma_cb_reply);
1102
1103TRACE_EVENT(xprtrdma_leaked_rep,
1104 TP_PROTO(
1105 const struct rpc_rqst *rqst,
1106 const struct rpcrdma_rep *rep
1107 ),
1108
1109 TP_ARGS(rqst, rep),
1110
1111 TP_STRUCT__entry(
1112 __field(unsigned int, task_id)
1113 __field(unsigned int, client_id)
1114 __field(u32, xid)
1115 __field(const void *, rep)
1116 ),
1117
1118 TP_fast_assign(
1119 __entry->task_id = rqst->rq_task->tk_pid;
1120 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1121 __entry->xid = be32_to_cpu(rqst->rq_xid);
1122 __entry->rep = rep;
1123 ),
1124
1125 TP_printk("task:%u@%u xid=0x%08x rep=%p",
1126 __entry->task_id, __entry->client_id, __entry->xid,
1127 __entry->rep
1128 )
1129);
1130
1131
1132
1133
1134
1135DECLARE_EVENT_CLASS(svcrdma_xprt_event,
1136 TP_PROTO(
1137 const struct svc_xprt *xprt
1138 ),
1139
1140 TP_ARGS(xprt),
1141
1142 TP_STRUCT__entry(
1143 __field(const void *, xprt)
1144 __string(addr, xprt->xpt_remotebuf)
1145 ),
1146
1147 TP_fast_assign(
1148 __entry->xprt = xprt;
1149 __assign_str(addr, xprt->xpt_remotebuf);
1150 ),
1151
1152 TP_printk("xprt=%p addr=%s",
1153 __entry->xprt, __get_str(addr)
1154 )
1155);
1156
1157#define DEFINE_XPRT_EVENT(name) \
1158 DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
1159 TP_PROTO( \
1160 const struct svc_xprt *xprt \
1161 ), \
1162 TP_ARGS(xprt))
1163
1164DEFINE_XPRT_EVENT(accept);
1165DEFINE_XPRT_EVENT(fail);
1166DEFINE_XPRT_EVENT(free);
1167
1168TRACE_DEFINE_ENUM(RDMA_MSG);
1169TRACE_DEFINE_ENUM(RDMA_NOMSG);
1170TRACE_DEFINE_ENUM(RDMA_MSGP);
1171TRACE_DEFINE_ENUM(RDMA_DONE);
1172TRACE_DEFINE_ENUM(RDMA_ERROR);
1173
1174#define show_rpcrdma_proc(x) \
1175 __print_symbolic(x, \
1176 { RDMA_MSG, "RDMA_MSG" }, \
1177 { RDMA_NOMSG, "RDMA_NOMSG" }, \
1178 { RDMA_MSGP, "RDMA_MSGP" }, \
1179 { RDMA_DONE, "RDMA_DONE" }, \
1180 { RDMA_ERROR, "RDMA_ERROR" })
1181
1182TRACE_EVENT(svcrdma_decode_rqst,
1183 TP_PROTO(
1184 __be32 *p,
1185 unsigned int hdrlen
1186 ),
1187
1188 TP_ARGS(p, hdrlen),
1189
1190 TP_STRUCT__entry(
1191 __field(u32, xid)
1192 __field(u32, vers)
1193 __field(u32, proc)
1194 __field(u32, credits)
1195 __field(unsigned int, hdrlen)
1196 ),
1197
1198 TP_fast_assign(
1199 __entry->xid = be32_to_cpup(p++);
1200 __entry->vers = be32_to_cpup(p++);
1201 __entry->credits = be32_to_cpup(p++);
1202 __entry->proc = be32_to_cpup(p);
1203 __entry->hdrlen = hdrlen;
1204 ),
1205
1206 TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1207 __entry->xid, __entry->vers, __entry->credits,
1208 show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1209);
1210
1211TRACE_EVENT(svcrdma_decode_short,
1212 TP_PROTO(
1213 unsigned int hdrlen
1214 ),
1215
1216 TP_ARGS(hdrlen),
1217
1218 TP_STRUCT__entry(
1219 __field(unsigned int, hdrlen)
1220 ),
1221
1222 TP_fast_assign(
1223 __entry->hdrlen = hdrlen;
1224 ),
1225
1226 TP_printk("hdrlen=%u", __entry->hdrlen)
1227);
1228
1229DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1230 TP_PROTO(
1231 __be32 *p
1232 ),
1233
1234 TP_ARGS(p),
1235
1236 TP_STRUCT__entry(
1237 __field(u32, xid)
1238 __field(u32, vers)
1239 __field(u32, proc)
1240 __field(u32, credits)
1241 ),
1242
1243 TP_fast_assign(
1244 __entry->xid = be32_to_cpup(p++);
1245 __entry->vers = be32_to_cpup(p++);
1246 __entry->credits = be32_to_cpup(p++);
1247 __entry->proc = be32_to_cpup(p);
1248 ),
1249
1250 TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1251 __entry->xid, __entry->vers, __entry->credits, __entry->proc)
1252);
1253
1254#define DEFINE_BADREQ_EVENT(name) \
1255 DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1256 TP_PROTO( \
1257 __be32 *p \
1258 ), \
1259 TP_ARGS(p))
1260
1261DEFINE_BADREQ_EVENT(badvers);
1262DEFINE_BADREQ_EVENT(drop);
1263DEFINE_BADREQ_EVENT(badproc);
1264DEFINE_BADREQ_EVENT(parse);
1265
1266DECLARE_EVENT_CLASS(svcrdma_segment_event,
1267 TP_PROTO(
1268 u32 handle,
1269 u32 length,
1270 u64 offset
1271 ),
1272
1273 TP_ARGS(handle, length, offset),
1274
1275 TP_STRUCT__entry(
1276 __field(u32, handle)
1277 __field(u32, length)
1278 __field(u64, offset)
1279 ),
1280
1281 TP_fast_assign(
1282 __entry->handle = handle;
1283 __entry->length = length;
1284 __entry->offset = offset;
1285 ),
1286
1287 TP_printk("%u@0x%016llx:0x%08x",
1288 __entry->length, (unsigned long long)__entry->offset,
1289 __entry->handle
1290 )
1291);
1292
1293#define DEFINE_SEGMENT_EVENT(name) \
1294 DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1295 TP_PROTO( \
1296 u32 handle, \
1297 u32 length, \
1298 u64 offset \
1299 ), \
1300 TP_ARGS(handle, length, offset))
1301
1302DEFINE_SEGMENT_EVENT(rseg);
1303DEFINE_SEGMENT_EVENT(wseg);
1304
1305DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1306 TP_PROTO(
1307 u32 length
1308 ),
1309
1310 TP_ARGS(length),
1311
1312 TP_STRUCT__entry(
1313 __field(u32, length)
1314 ),
1315
1316 TP_fast_assign(
1317 __entry->length = length;
1318 ),
1319
1320 TP_printk("length=%u",
1321 __entry->length
1322 )
1323);
1324
1325#define DEFINE_CHUNK_EVENT(name) \
1326 DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1327 TP_PROTO( \
1328 u32 length \
1329 ), \
1330 TP_ARGS(length))
1331
1332DEFINE_CHUNK_EVENT(pzr);
1333DEFINE_CHUNK_EVENT(write);
1334DEFINE_CHUNK_EVENT(reply);
1335
1336TRACE_EVENT(svcrdma_encode_read,
1337 TP_PROTO(
1338 u32 length,
1339 u32 position
1340 ),
1341
1342 TP_ARGS(length, position),
1343
1344 TP_STRUCT__entry(
1345 __field(u32, length)
1346 __field(u32, position)
1347 ),
1348
1349 TP_fast_assign(
1350 __entry->length = length;
1351 __entry->position = position;
1352 ),
1353
1354 TP_printk("length=%u position=%u",
1355 __entry->length, __entry->position
1356 )
1357);
1358
1359DECLARE_EVENT_CLASS(svcrdma_error_event,
1360 TP_PROTO(
1361 __be32 xid
1362 ),
1363
1364 TP_ARGS(xid),
1365
1366 TP_STRUCT__entry(
1367 __field(u32, xid)
1368 ),
1369
1370 TP_fast_assign(
1371 __entry->xid = be32_to_cpu(xid);
1372 ),
1373
1374 TP_printk("xid=0x%08x",
1375 __entry->xid
1376 )
1377);
1378
1379#define DEFINE_ERROR_EVENT(name) \
1380 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
1381 TP_PROTO( \
1382 __be32 xid \
1383 ), \
1384 TP_ARGS(xid))
1385
1386DEFINE_ERROR_EVENT(vers);
1387DEFINE_ERROR_EVENT(chunk);
1388
1389
1390
1391
1392
1393TRACE_EVENT(svcrdma_dma_map_page,
1394 TP_PROTO(
1395 const struct svcxprt_rdma *rdma,
1396 const void *page
1397 ),
1398
1399 TP_ARGS(rdma, page),
1400
1401 TP_STRUCT__entry(
1402 __field(const void *, page);
1403 __string(device, rdma->sc_cm_id->device->name)
1404 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1405 ),
1406
1407 TP_fast_assign(
1408 __entry->page = page;
1409 __assign_str(device, rdma->sc_cm_id->device->name);
1410 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1411 ),
1412
1413 TP_printk("addr=%s device=%s page=%p",
1414 __get_str(addr), __get_str(device), __entry->page
1415 )
1416);
1417
1418TRACE_EVENT(svcrdma_dma_map_rwctx,
1419 TP_PROTO(
1420 const struct svcxprt_rdma *rdma,
1421 int status
1422 ),
1423
1424 TP_ARGS(rdma, status),
1425
1426 TP_STRUCT__entry(
1427 __field(int, status)
1428 __string(device, rdma->sc_cm_id->device->name)
1429 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1430 ),
1431
1432 TP_fast_assign(
1433 __entry->status = status;
1434 __assign_str(device, rdma->sc_cm_id->device->name);
1435 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1436 ),
1437
1438 TP_printk("addr=%s device=%s status=%d",
1439 __get_str(addr), __get_str(device), __entry->status
1440 )
1441);
1442
1443TRACE_EVENT(svcrdma_send_failed,
1444 TP_PROTO(
1445 const struct svc_rqst *rqst,
1446 int status
1447 ),
1448
1449 TP_ARGS(rqst, status),
1450
1451 TP_STRUCT__entry(
1452 __field(int, status)
1453 __field(u32, xid)
1454 __field(const void *, xprt)
1455 __string(addr, rqst->rq_xprt->xpt_remotebuf)
1456 ),
1457
1458 TP_fast_assign(
1459 __entry->status = status;
1460 __entry->xid = __be32_to_cpu(rqst->rq_xid);
1461 __entry->xprt = rqst->rq_xprt;
1462 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1463 ),
1464
1465 TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1466 __entry->xprt, __get_str(addr),
1467 __entry->xid, __entry->status
1468 )
1469);
1470
1471DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1472 TP_PROTO(
1473 const struct ib_wc *wc
1474 ),
1475
1476 TP_ARGS(wc),
1477
1478 TP_STRUCT__entry(
1479 __field(const void *, cqe)
1480 __field(unsigned int, status)
1481 __field(unsigned int, vendor_err)
1482 ),
1483
1484 TP_fast_assign(
1485 __entry->cqe = wc->wr_cqe;
1486 __entry->status = wc->status;
1487 if (wc->status)
1488 __entry->vendor_err = wc->vendor_err;
1489 else
1490 __entry->vendor_err = 0;
1491 ),
1492
1493 TP_printk("cqe=%p status=%s (%u/0x%x)",
1494 __entry->cqe, rdma_show_wc_status(__entry->status),
1495 __entry->status, __entry->vendor_err
1496 )
1497);
1498
1499#define DEFINE_SENDCOMP_EVENT(name) \
1500 DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
1501 TP_PROTO( \
1502 const struct ib_wc *wc \
1503 ), \
1504 TP_ARGS(wc))
1505
1506TRACE_EVENT(svcrdma_post_send,
1507 TP_PROTO(
1508 const struct ib_send_wr *wr,
1509 int status
1510 ),
1511
1512 TP_ARGS(wr, status),
1513
1514 TP_STRUCT__entry(
1515 __field(const void *, cqe)
1516 __field(unsigned int, num_sge)
1517 __field(u32, inv_rkey)
1518 __field(int, status)
1519 ),
1520
1521 TP_fast_assign(
1522 __entry->cqe = wr->wr_cqe;
1523 __entry->num_sge = wr->num_sge;
1524 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1525 wr->ex.invalidate_rkey : 0;
1526 __entry->status = status;
1527 ),
1528
1529 TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1530 __entry->cqe, __entry->num_sge,
1531 __entry->inv_rkey, __entry->status
1532 )
1533);
1534
1535DEFINE_SENDCOMP_EVENT(send);
1536
1537TRACE_EVENT(svcrdma_post_recv,
1538 TP_PROTO(
1539 const struct ib_recv_wr *wr,
1540 int status
1541 ),
1542
1543 TP_ARGS(wr, status),
1544
1545 TP_STRUCT__entry(
1546 __field(const void *, cqe)
1547 __field(int, status)
1548 ),
1549
1550 TP_fast_assign(
1551 __entry->cqe = wr->wr_cqe;
1552 __entry->status = status;
1553 ),
1554
1555 TP_printk("cqe=%p status=%d",
1556 __entry->cqe, __entry->status
1557 )
1558);
1559
1560TRACE_EVENT(svcrdma_wc_receive,
1561 TP_PROTO(
1562 const struct ib_wc *wc
1563 ),
1564
1565 TP_ARGS(wc),
1566
1567 TP_STRUCT__entry(
1568 __field(const void *, cqe)
1569 __field(u32, byte_len)
1570 __field(unsigned int, status)
1571 __field(u32, vendor_err)
1572 ),
1573
1574 TP_fast_assign(
1575 __entry->cqe = wc->wr_cqe;
1576 __entry->status = wc->status;
1577 if (wc->status) {
1578 __entry->byte_len = 0;
1579 __entry->vendor_err = wc->vendor_err;
1580 } else {
1581 __entry->byte_len = wc->byte_len;
1582 __entry->vendor_err = 0;
1583 }
1584 ),
1585
1586 TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1587 __entry->cqe, __entry->byte_len,
1588 rdma_show_wc_status(__entry->status),
1589 __entry->status, __entry->vendor_err
1590 )
1591);
1592
1593TRACE_EVENT(svcrdma_post_rw,
1594 TP_PROTO(
1595 const void *cqe,
1596 int sqecount,
1597 int status
1598 ),
1599
1600 TP_ARGS(cqe, sqecount, status),
1601
1602 TP_STRUCT__entry(
1603 __field(const void *, cqe)
1604 __field(int, sqecount)
1605 __field(int, status)
1606 ),
1607
1608 TP_fast_assign(
1609 __entry->cqe = cqe;
1610 __entry->sqecount = sqecount;
1611 __entry->status = status;
1612 ),
1613
1614 TP_printk("cqe=%p sqecount=%d status=%d",
1615 __entry->cqe, __entry->sqecount, __entry->status
1616 )
1617);
1618
1619DEFINE_SENDCOMP_EVENT(read);
1620DEFINE_SENDCOMP_EVENT(write);
1621
1622TRACE_EVENT(svcrdma_cm_event,
1623 TP_PROTO(
1624 const struct rdma_cm_event *event,
1625 const struct sockaddr *sap
1626 ),
1627
1628 TP_ARGS(event, sap),
1629
1630 TP_STRUCT__entry(
1631 __field(unsigned int, event)
1632 __field(int, status)
1633 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1634 ),
1635
1636 TP_fast_assign(
1637 __entry->event = event->event;
1638 __entry->status = event->status;
1639 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1640 "%pISpc", sap);
1641 ),
1642
1643 TP_printk("addr=%s event=%s (%u/%d)",
1644 __entry->addr,
1645 rdma_show_cm_event(__entry->event),
1646 __entry->event, __entry->status
1647 )
1648);
1649
1650TRACE_EVENT(svcrdma_qp_error,
1651 TP_PROTO(
1652 const struct ib_event *event,
1653 const struct sockaddr *sap
1654 ),
1655
1656 TP_ARGS(event, sap),
1657
1658 TP_STRUCT__entry(
1659 __field(unsigned int, event)
1660 __string(device, event->device->name)
1661 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1662 ),
1663
1664 TP_fast_assign(
1665 __entry->event = event->event;
1666 __assign_str(device, event->device->name);
1667 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1668 "%pISpc", sap);
1669 ),
1670
1671 TP_printk("addr=%s dev=%s event=%s (%u)",
1672 __entry->addr, __get_str(device),
1673 rdma_show_ib_event(__entry->event), __entry->event
1674 )
1675);
1676
1677DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1678 TP_PROTO(
1679 const struct svcxprt_rdma *rdma
1680 ),
1681
1682 TP_ARGS(rdma),
1683
1684 TP_STRUCT__entry(
1685 __field(int, avail)
1686 __field(int, depth)
1687 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1688 ),
1689
1690 TP_fast_assign(
1691 __entry->avail = atomic_read(&rdma->sc_sq_avail);
1692 __entry->depth = rdma->sc_sq_depth;
1693 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1694 ),
1695
1696 TP_printk("addr=%s sc_sq_avail=%d/%d",
1697 __get_str(addr), __entry->avail, __entry->depth
1698 )
1699);
1700
1701#define DEFINE_SQ_EVENT(name) \
1702 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1703 TP_PROTO( \
1704 const struct svcxprt_rdma *rdma \
1705 ), \
1706 TP_ARGS(rdma))
1707
1708DEFINE_SQ_EVENT(full);
1709DEFINE_SQ_EVENT(retry);
1710
1711#endif
1712
1713#include <trace/define_trace.h>
1714