1
2
3
4
5
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM rpcrdma
9
10#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_RPCRDMA_H
12
13#include <linux/tracepoint.h>
14#include <trace/events/rdma.h>
15
16
17
18
19
20DECLARE_EVENT_CLASS(xprtrdma_reply_event,
21 TP_PROTO(
22 const struct rpcrdma_rep *rep
23 ),
24
25 TP_ARGS(rep),
26
27 TP_STRUCT__entry(
28 __field(const void *, rep)
29 __field(const void *, r_xprt)
30 __field(u32, xid)
31 __field(u32, version)
32 __field(u32, proc)
33 ),
34
35 TP_fast_assign(
36 __entry->rep = rep;
37 __entry->r_xprt = rep->rr_rxprt;
38 __entry->xid = be32_to_cpu(rep->rr_xid);
39 __entry->version = be32_to_cpu(rep->rr_vers);
40 __entry->proc = be32_to_cpu(rep->rr_proc);
41 ),
42
43 TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
44 __entry->r_xprt, __entry->xid, __entry->rep,
45 __entry->version, __entry->proc
46 )
47);
48
49#define DEFINE_REPLY_EVENT(name) \
50 DEFINE_EVENT(xprtrdma_reply_event, name, \
51 TP_PROTO( \
52 const struct rpcrdma_rep *rep \
53 ), \
54 TP_ARGS(rep))
55
56DECLARE_EVENT_CLASS(xprtrdma_rxprt,
57 TP_PROTO(
58 const struct rpcrdma_xprt *r_xprt
59 ),
60
61 TP_ARGS(r_xprt),
62
63 TP_STRUCT__entry(
64 __field(const void *, r_xprt)
65 __string(addr, rpcrdma_addrstr(r_xprt))
66 __string(port, rpcrdma_portstr(r_xprt))
67 ),
68
69 TP_fast_assign(
70 __entry->r_xprt = r_xprt;
71 __assign_str(addr, rpcrdma_addrstr(r_xprt));
72 __assign_str(port, rpcrdma_portstr(r_xprt));
73 ),
74
75 TP_printk("peer=[%s]:%s r_xprt=%p",
76 __get_str(addr), __get_str(port), __entry->r_xprt
77 )
78);
79
80#define DEFINE_RXPRT_EVENT(name) \
81 DEFINE_EVENT(xprtrdma_rxprt, name, \
82 TP_PROTO( \
83 const struct rpcrdma_xprt *r_xprt \
84 ), \
85 TP_ARGS(r_xprt))
86
87DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
88 TP_PROTO(
89 const struct rpc_task *task,
90 unsigned int pos,
91 struct rpcrdma_mr *mr,
92 int nsegs
93 ),
94
95 TP_ARGS(task, pos, mr, nsegs),
96
97 TP_STRUCT__entry(
98 __field(unsigned int, task_id)
99 __field(unsigned int, client_id)
100 __field(const void *, mr)
101 __field(unsigned int, pos)
102 __field(int, nents)
103 __field(u32, handle)
104 __field(u32, length)
105 __field(u64, offset)
106 __field(int, nsegs)
107 ),
108
109 TP_fast_assign(
110 __entry->task_id = task->tk_pid;
111 __entry->client_id = task->tk_client->cl_clid;
112 __entry->mr = mr;
113 __entry->pos = pos;
114 __entry->nents = mr->mr_nents;
115 __entry->handle = mr->mr_handle;
116 __entry->length = mr->mr_length;
117 __entry->offset = mr->mr_offset;
118 __entry->nsegs = nsegs;
119 ),
120
121 TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
122 __entry->task_id, __entry->client_id, __entry->mr,
123 __entry->pos, __entry->length,
124 (unsigned long long)__entry->offset, __entry->handle,
125 __entry->nents < __entry->nsegs ? "more" : "last"
126 )
127);
128
129#define DEFINE_RDCH_EVENT(name) \
130 DEFINE_EVENT(xprtrdma_rdch_event, name, \
131 TP_PROTO( \
132 const struct rpc_task *task, \
133 unsigned int pos, \
134 struct rpcrdma_mr *mr, \
135 int nsegs \
136 ), \
137 TP_ARGS(task, pos, mr, nsegs))
138
139DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
140 TP_PROTO(
141 const struct rpc_task *task,
142 struct rpcrdma_mr *mr,
143 int nsegs
144 ),
145
146 TP_ARGS(task, mr, nsegs),
147
148 TP_STRUCT__entry(
149 __field(unsigned int, task_id)
150 __field(unsigned int, client_id)
151 __field(const void *, mr)
152 __field(int, nents)
153 __field(u32, handle)
154 __field(u32, length)
155 __field(u64, offset)
156 __field(int, nsegs)
157 ),
158
159 TP_fast_assign(
160 __entry->task_id = task->tk_pid;
161 __entry->client_id = task->tk_client->cl_clid;
162 __entry->mr = mr;
163 __entry->nents = mr->mr_nents;
164 __entry->handle = mr->mr_handle;
165 __entry->length = mr->mr_length;
166 __entry->offset = mr->mr_offset;
167 __entry->nsegs = nsegs;
168 ),
169
170 TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
171 __entry->task_id, __entry->client_id, __entry->mr,
172 __entry->length, (unsigned long long)__entry->offset,
173 __entry->handle,
174 __entry->nents < __entry->nsegs ? "more" : "last"
175 )
176);
177
178#define DEFINE_WRCH_EVENT(name) \
179 DEFINE_EVENT(xprtrdma_wrch_event, name, \
180 TP_PROTO( \
181 const struct rpc_task *task, \
182 struct rpcrdma_mr *mr, \
183 int nsegs \
184 ), \
185 TP_ARGS(task, mr, nsegs))
186
187TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
188TRACE_DEFINE_ENUM(FRWR_IS_VALID);
189TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
190TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
191
192#define xprtrdma_show_frwr_state(x) \
193 __print_symbolic(x, \
194 { FRWR_IS_INVALID, "INVALID" }, \
195 { FRWR_IS_VALID, "VALID" }, \
196 { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
197 { FRWR_FLUSHED_LI, "FLUSHED_LI" })
198
199DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
200 TP_PROTO(
201 const struct ib_wc *wc,
202 const struct rpcrdma_frwr *frwr
203 ),
204
205 TP_ARGS(wc, frwr),
206
207 TP_STRUCT__entry(
208 __field(const void *, mr)
209 __field(unsigned int, state)
210 __field(unsigned int, status)
211 __field(unsigned int, vendor_err)
212 ),
213
214 TP_fast_assign(
215 __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
216 __entry->state = frwr->fr_state;
217 __entry->status = wc->status;
218 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
219 ),
220
221 TP_printk(
222 "mr=%p state=%s: %s (%u/0x%x)",
223 __entry->mr, xprtrdma_show_frwr_state(__entry->state),
224 rdma_show_wc_status(__entry->status),
225 __entry->status, __entry->vendor_err
226 )
227);
228
229#define DEFINE_FRWR_DONE_EVENT(name) \
230 DEFINE_EVENT(xprtrdma_frwr_done, name, \
231 TP_PROTO( \
232 const struct ib_wc *wc, \
233 const struct rpcrdma_frwr *frwr \
234 ), \
235 TP_ARGS(wc, frwr))
236
237DECLARE_EVENT_CLASS(xprtrdma_mr,
238 TP_PROTO(
239 const struct rpcrdma_mr *mr
240 ),
241
242 TP_ARGS(mr),
243
244 TP_STRUCT__entry(
245 __field(const void *, mr)
246 __field(u32, handle)
247 __field(u32, length)
248 __field(u64, offset)
249 ),
250
251 TP_fast_assign(
252 __entry->mr = mr;
253 __entry->handle = mr->mr_handle;
254 __entry->length = mr->mr_length;
255 __entry->offset = mr->mr_offset;
256 ),
257
258 TP_printk("mr=%p %u@0x%016llx:0x%08x",
259 __entry->mr, __entry->length,
260 (unsigned long long)__entry->offset,
261 __entry->handle
262 )
263);
264
265#define DEFINE_MR_EVENT(name) \
266 DEFINE_EVENT(xprtrdma_mr, name, \
267 TP_PROTO( \
268 const struct rpcrdma_mr *mr \
269 ), \
270 TP_ARGS(mr))
271
272DECLARE_EVENT_CLASS(xprtrdma_cb_event,
273 TP_PROTO(
274 const struct rpc_rqst *rqst
275 ),
276
277 TP_ARGS(rqst),
278
279 TP_STRUCT__entry(
280 __field(const void *, rqst)
281 __field(const void *, rep)
282 __field(const void *, req)
283 __field(u32, xid)
284 ),
285
286 TP_fast_assign(
287 __entry->rqst = rqst;
288 __entry->req = rpcr_to_rdmar(rqst);
289 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
290 __entry->xid = be32_to_cpu(rqst->rq_xid);
291 ),
292
293 TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
294 __entry->xid, __entry->rqst, __entry->req, __entry->rep
295 )
296);
297
298#define DEFINE_CB_EVENT(name) \
299 DEFINE_EVENT(xprtrdma_cb_event, name, \
300 TP_PROTO( \
301 const struct rpc_rqst *rqst \
302 ), \
303 TP_ARGS(rqst))
304
305
306
307
308
309TRACE_EVENT(xprtrdma_conn_upcall,
310 TP_PROTO(
311 const struct rpcrdma_xprt *r_xprt,
312 struct rdma_cm_event *event
313 ),
314
315 TP_ARGS(r_xprt, event),
316
317 TP_STRUCT__entry(
318 __field(const void *, r_xprt)
319 __field(unsigned int, event)
320 __field(int, status)
321 __string(addr, rpcrdma_addrstr(r_xprt))
322 __string(port, rpcrdma_portstr(r_xprt))
323 ),
324
325 TP_fast_assign(
326 __entry->r_xprt = r_xprt;
327 __entry->event = event->event;
328 __entry->status = event->status;
329 __assign_str(addr, rpcrdma_addrstr(r_xprt));
330 __assign_str(port, rpcrdma_portstr(r_xprt));
331 ),
332
333 TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
334 __get_str(addr), __get_str(port),
335 __entry->r_xprt, rdma_show_cm_event(__entry->event),
336 __entry->event, __entry->status
337 )
338);
339
340TRACE_EVENT(xprtrdma_disconnect,
341 TP_PROTO(
342 const struct rpcrdma_xprt *r_xprt,
343 int status
344 ),
345
346 TP_ARGS(r_xprt, status),
347
348 TP_STRUCT__entry(
349 __field(const void *, r_xprt)
350 __field(int, status)
351 __field(int, connected)
352 __string(addr, rpcrdma_addrstr(r_xprt))
353 __string(port, rpcrdma_portstr(r_xprt))
354 ),
355
356 TP_fast_assign(
357 __entry->r_xprt = r_xprt;
358 __entry->status = status;
359 __entry->connected = r_xprt->rx_ep.rep_connected;
360 __assign_str(addr, rpcrdma_addrstr(r_xprt));
361 __assign_str(port, rpcrdma_portstr(r_xprt));
362 ),
363
364 TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
365 __get_str(addr), __get_str(port),
366 __entry->r_xprt, __entry->status,
367 __entry->connected == 1 ? "still " : "dis"
368 )
369);
370
371DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
372DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
373DEFINE_RXPRT_EVENT(xprtrdma_create);
374DEFINE_RXPRT_EVENT(xprtrdma_destroy);
375DEFINE_RXPRT_EVENT(xprtrdma_remove);
376DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
377DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
378DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
379
380TRACE_EVENT(xprtrdma_qp_error,
381 TP_PROTO(
382 const struct rpcrdma_xprt *r_xprt,
383 const struct ib_event *event
384 ),
385
386 TP_ARGS(r_xprt, event),
387
388 TP_STRUCT__entry(
389 __field(const void *, r_xprt)
390 __field(unsigned int, event)
391 __string(name, event->device->name)
392 __string(addr, rpcrdma_addrstr(r_xprt))
393 __string(port, rpcrdma_portstr(r_xprt))
394 ),
395
396 TP_fast_assign(
397 __entry->r_xprt = r_xprt;
398 __entry->event = event->event;
399 __assign_str(name, event->device->name);
400 __assign_str(addr, rpcrdma_addrstr(r_xprt));
401 __assign_str(port, rpcrdma_portstr(r_xprt));
402 ),
403
404 TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
405 __get_str(addr), __get_str(port), __entry->r_xprt,
406 __get_str(name), rdma_show_ib_event(__entry->event),
407 __entry->event
408 )
409);
410
411
412
413
414
415TRACE_EVENT(xprtrdma_createmrs,
416 TP_PROTO(
417 const struct rpcrdma_xprt *r_xprt,
418 unsigned int count
419 ),
420
421 TP_ARGS(r_xprt, count),
422
423 TP_STRUCT__entry(
424 __field(const void *, r_xprt)
425 __field(unsigned int, count)
426 ),
427
428 TP_fast_assign(
429 __entry->r_xprt = r_xprt;
430 __entry->count = count;
431 ),
432
433 TP_printk("r_xprt=%p: created %u MRs",
434 __entry->r_xprt, __entry->count
435 )
436);
437
438DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
439
440DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
441DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
442DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
443
444TRACE_DEFINE_ENUM(rpcrdma_noch);
445TRACE_DEFINE_ENUM(rpcrdma_readch);
446TRACE_DEFINE_ENUM(rpcrdma_areadch);
447TRACE_DEFINE_ENUM(rpcrdma_writech);
448TRACE_DEFINE_ENUM(rpcrdma_replych);
449
450#define xprtrdma_show_chunktype(x) \
451 __print_symbolic(x, \
452 { rpcrdma_noch, "inline" }, \
453 { rpcrdma_readch, "read list" }, \
454 { rpcrdma_areadch, "*read list" }, \
455 { rpcrdma_writech, "write list" }, \
456 { rpcrdma_replych, "reply chunk" })
457
458TRACE_EVENT(xprtrdma_marshal,
459 TP_PROTO(
460 const struct rpc_rqst *rqst,
461 unsigned int hdrlen,
462 unsigned int rtype,
463 unsigned int wtype
464 ),
465
466 TP_ARGS(rqst, hdrlen, rtype, wtype),
467
468 TP_STRUCT__entry(
469 __field(unsigned int, task_id)
470 __field(unsigned int, client_id)
471 __field(u32, xid)
472 __field(unsigned int, hdrlen)
473 __field(unsigned int, headlen)
474 __field(unsigned int, pagelen)
475 __field(unsigned int, taillen)
476 __field(unsigned int, rtype)
477 __field(unsigned int, wtype)
478 ),
479
480 TP_fast_assign(
481 __entry->task_id = rqst->rq_task->tk_pid;
482 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
483 __entry->xid = be32_to_cpu(rqst->rq_xid);
484 __entry->hdrlen = hdrlen;
485 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
486 __entry->pagelen = rqst->rq_snd_buf.page_len;
487 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
488 __entry->rtype = rtype;
489 __entry->wtype = wtype;
490 ),
491
492 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
493 __entry->task_id, __entry->client_id, __entry->xid,
494 __entry->hdrlen,
495 __entry->headlen, __entry->pagelen, __entry->taillen,
496 xprtrdma_show_chunktype(__entry->rtype),
497 xprtrdma_show_chunktype(__entry->wtype)
498 )
499);
500
501TRACE_EVENT(xprtrdma_post_send,
502 TP_PROTO(
503 const struct rpcrdma_req *req,
504 int status
505 ),
506
507 TP_ARGS(req, status),
508
509 TP_STRUCT__entry(
510 __field(const void *, req)
511 __field(int, num_sge)
512 __field(bool, signaled)
513 __field(int, status)
514 ),
515
516 TP_fast_assign(
517 __entry->req = req;
518 __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
519 __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
520 IB_SEND_SIGNALED;
521 __entry->status = status;
522 ),
523
524 TP_printk("req=%p, %d SGEs%s, status=%d",
525 __entry->req, __entry->num_sge,
526 (__entry->signaled ? ", signaled" : ""),
527 __entry->status
528 )
529);
530
531TRACE_EVENT(xprtrdma_post_recv,
532 TP_PROTO(
533 const struct ib_cqe *cqe
534 ),
535
536 TP_ARGS(cqe),
537
538 TP_STRUCT__entry(
539 __field(const void *, cqe)
540 ),
541
542 TP_fast_assign(
543 __entry->cqe = cqe;
544 ),
545
546 TP_printk("cqe=%p",
547 __entry->cqe
548 )
549);
550
551TRACE_EVENT(xprtrdma_post_recvs,
552 TP_PROTO(
553 const struct rpcrdma_xprt *r_xprt,
554 unsigned int count,
555 int status
556 ),
557
558 TP_ARGS(r_xprt, count, status),
559
560 TP_STRUCT__entry(
561 __field(const void *, r_xprt)
562 __field(unsigned int, count)
563 __field(int, status)
564 __field(int, posted)
565 __string(addr, rpcrdma_addrstr(r_xprt))
566 __string(port, rpcrdma_portstr(r_xprt))
567 ),
568
569 TP_fast_assign(
570 __entry->r_xprt = r_xprt;
571 __entry->count = count;
572 __entry->status = status;
573 __entry->posted = r_xprt->rx_buf.rb_posted_receives;
574 __assign_str(addr, rpcrdma_addrstr(r_xprt));
575 __assign_str(port, rpcrdma_portstr(r_xprt));
576 ),
577
578 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
579 __get_str(addr), __get_str(port), __entry->r_xprt,
580 __entry->count, __entry->posted, __entry->status
581 )
582);
583
584
585
586
587
588TRACE_EVENT(xprtrdma_wc_send,
589 TP_PROTO(
590 const struct rpcrdma_sendctx *sc,
591 const struct ib_wc *wc
592 ),
593
594 TP_ARGS(sc, wc),
595
596 TP_STRUCT__entry(
597 __field(const void *, req)
598 __field(unsigned int, unmap_count)
599 __field(unsigned int, status)
600 __field(unsigned int, vendor_err)
601 ),
602
603 TP_fast_assign(
604 __entry->req = sc->sc_req;
605 __entry->unmap_count = sc->sc_unmap_count;
606 __entry->status = wc->status;
607 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
608 ),
609
610 TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
611 __entry->req, __entry->unmap_count,
612 rdma_show_wc_status(__entry->status),
613 __entry->status, __entry->vendor_err
614 )
615);
616
617TRACE_EVENT(xprtrdma_wc_receive,
618 TP_PROTO(
619 const struct ib_wc *wc
620 ),
621
622 TP_ARGS(wc),
623
624 TP_STRUCT__entry(
625 __field(const void *, cqe)
626 __field(u32, byte_len)
627 __field(unsigned int, status)
628 __field(u32, vendor_err)
629 ),
630
631 TP_fast_assign(
632 __entry->cqe = wc->wr_cqe;
633 __entry->status = wc->status;
634 if (wc->status) {
635 __entry->byte_len = 0;
636 __entry->vendor_err = wc->vendor_err;
637 } else {
638 __entry->byte_len = wc->byte_len;
639 __entry->vendor_err = 0;
640 }
641 ),
642
643 TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
644 __entry->cqe, __entry->byte_len,
645 rdma_show_wc_status(__entry->status),
646 __entry->status, __entry->vendor_err
647 )
648);
649
650DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
651DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
652DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
653
654DEFINE_MR_EVENT(xprtrdma_localinv);
655DEFINE_MR_EVENT(xprtrdma_dma_map);
656DEFINE_MR_EVENT(xprtrdma_dma_unmap);
657DEFINE_MR_EVENT(xprtrdma_remoteinv);
658DEFINE_MR_EVENT(xprtrdma_recover_mr);
659
660
661
662
663
664TRACE_EVENT(xprtrdma_reply,
665 TP_PROTO(
666 const struct rpc_task *task,
667 const struct rpcrdma_rep *rep,
668 const struct rpcrdma_req *req,
669 unsigned int credits
670 ),
671
672 TP_ARGS(task, rep, req, credits),
673
674 TP_STRUCT__entry(
675 __field(unsigned int, task_id)
676 __field(unsigned int, client_id)
677 __field(const void *, rep)
678 __field(const void *, req)
679 __field(u32, xid)
680 __field(unsigned int, credits)
681 ),
682
683 TP_fast_assign(
684 __entry->task_id = task->tk_pid;
685 __entry->client_id = task->tk_client->cl_clid;
686 __entry->rep = rep;
687 __entry->req = req;
688 __entry->xid = be32_to_cpu(rep->rr_xid);
689 __entry->credits = credits;
690 ),
691
692 TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
693 __entry->task_id, __entry->client_id, __entry->xid,
694 __entry->credits, __entry->rep, __entry->req
695 )
696);
697
698TRACE_EVENT(xprtrdma_defer_cmp,
699 TP_PROTO(
700 const struct rpcrdma_rep *rep
701 ),
702
703 TP_ARGS(rep),
704
705 TP_STRUCT__entry(
706 __field(unsigned int, task_id)
707 __field(unsigned int, client_id)
708 __field(const void *, rep)
709 __field(u32, xid)
710 ),
711
712 TP_fast_assign(
713 __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
714 __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
715 __entry->rep = rep;
716 __entry->xid = be32_to_cpu(rep->rr_xid);
717 ),
718
719 TP_printk("task:%u@%u xid=0x%08x rep=%p",
720 __entry->task_id, __entry->client_id, __entry->xid,
721 __entry->rep
722 )
723);
724
725DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
726DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
727DEFINE_REPLY_EVENT(xprtrdma_reply_short);
728DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
729
730TRACE_EVENT(xprtrdma_fixup,
731 TP_PROTO(
732 const struct rpc_rqst *rqst,
733 int len,
734 int hdrlen
735 ),
736
737 TP_ARGS(rqst, len, hdrlen),
738
739 TP_STRUCT__entry(
740 __field(unsigned int, task_id)
741 __field(unsigned int, client_id)
742 __field(const void *, base)
743 __field(int, len)
744 __field(int, hdrlen)
745 ),
746
747 TP_fast_assign(
748 __entry->task_id = rqst->rq_task->tk_pid;
749 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
750 __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
751 __entry->len = len;
752 __entry->hdrlen = hdrlen;
753 ),
754
755 TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
756 __entry->task_id, __entry->client_id,
757 __entry->base, __entry->len, __entry->hdrlen
758 )
759);
760
761TRACE_EVENT(xprtrdma_fixup_pg,
762 TP_PROTO(
763 const struct rpc_rqst *rqst,
764 int pageno,
765 const void *pos,
766 int len,
767 int curlen
768 ),
769
770 TP_ARGS(rqst, pageno, pos, len, curlen),
771
772 TP_STRUCT__entry(
773 __field(unsigned int, task_id)
774 __field(unsigned int, client_id)
775 __field(const void *, pos)
776 __field(int, pageno)
777 __field(int, len)
778 __field(int, curlen)
779 ),
780
781 TP_fast_assign(
782 __entry->task_id = rqst->rq_task->tk_pid;
783 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
784 __entry->pos = pos;
785 __entry->pageno = pageno;
786 __entry->len = len;
787 __entry->curlen = curlen;
788 ),
789
790 TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
791 __entry->task_id, __entry->client_id,
792 __entry->pageno, __entry->pos, __entry->len, __entry->curlen
793 )
794);
795
796TRACE_EVENT(xprtrdma_decode_seg,
797 TP_PROTO(
798 u32 handle,
799 u32 length,
800 u64 offset
801 ),
802
803 TP_ARGS(handle, length, offset),
804
805 TP_STRUCT__entry(
806 __field(u32, handle)
807 __field(u32, length)
808 __field(u64, offset)
809 ),
810
811 TP_fast_assign(
812 __entry->handle = handle;
813 __entry->length = length;
814 __entry->offset = offset;
815 ),
816
817 TP_printk("%u@0x%016llx:0x%08x",
818 __entry->length, (unsigned long long)__entry->offset,
819 __entry->handle
820 )
821);
822
823
824
825
826
827TRACE_EVENT(xprtrdma_allocate,
828 TP_PROTO(
829 const struct rpc_task *task,
830 const struct rpcrdma_req *req
831 ),
832
833 TP_ARGS(task, req),
834
835 TP_STRUCT__entry(
836 __field(unsigned int, task_id)
837 __field(unsigned int, client_id)
838 __field(const void *, req)
839 __field(size_t, callsize)
840 __field(size_t, rcvsize)
841 ),
842
843 TP_fast_assign(
844 __entry->task_id = task->tk_pid;
845 __entry->client_id = task->tk_client->cl_clid;
846 __entry->req = req;
847 __entry->callsize = task->tk_rqstp->rq_callsize;
848 __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
849 ),
850
851 TP_printk("task:%u@%u req=%p (%zu, %zu)",
852 __entry->task_id, __entry->client_id,
853 __entry->req, __entry->callsize, __entry->rcvsize
854 )
855);
856
857TRACE_EVENT(xprtrdma_rpc_done,
858 TP_PROTO(
859 const struct rpc_task *task,
860 const struct rpcrdma_req *req
861 ),
862
863 TP_ARGS(task, req),
864
865 TP_STRUCT__entry(
866 __field(unsigned int, task_id)
867 __field(unsigned int, client_id)
868 __field(const void *, req)
869 __field(const void *, rep)
870 ),
871
872 TP_fast_assign(
873 __entry->task_id = task->tk_pid;
874 __entry->client_id = task->tk_client->cl_clid;
875 __entry->req = req;
876 __entry->rep = req->rl_reply;
877 ),
878
879 TP_printk("task:%u@%u req=%p rep=%p",
880 __entry->task_id, __entry->client_id,
881 __entry->req, __entry->rep
882 )
883);
884
885
886
887
888
889TRACE_EVENT(xprtrdma_cb_setup,
890 TP_PROTO(
891 const struct rpcrdma_xprt *r_xprt,
892 unsigned int reqs
893 ),
894
895 TP_ARGS(r_xprt, reqs),
896
897 TP_STRUCT__entry(
898 __field(const void *, r_xprt)
899 __field(unsigned int, reqs)
900 __string(addr, rpcrdma_addrstr(r_xprt))
901 __string(port, rpcrdma_portstr(r_xprt))
902 ),
903
904 TP_fast_assign(
905 __entry->r_xprt = r_xprt;
906 __entry->reqs = reqs;
907 __assign_str(addr, rpcrdma_addrstr(r_xprt));
908 __assign_str(port, rpcrdma_portstr(r_xprt));
909 ),
910
911 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
912 __get_str(addr), __get_str(port),
913 __entry->r_xprt, __entry->reqs
914 )
915);
916
917DEFINE_CB_EVENT(xprtrdma_cb_call);
918DEFINE_CB_EVENT(xprtrdma_cb_reply);
919
920
921
922
923
924DECLARE_EVENT_CLASS(svcrdma_xprt_event,
925 TP_PROTO(
926 const struct svc_xprt *xprt
927 ),
928
929 TP_ARGS(xprt),
930
931 TP_STRUCT__entry(
932 __field(const void *, xprt)
933 __string(addr, xprt->xpt_remotebuf)
934 ),
935
936 TP_fast_assign(
937 __entry->xprt = xprt;
938 __assign_str(addr, xprt->xpt_remotebuf);
939 ),
940
941 TP_printk("xprt=%p addr=%s",
942 __entry->xprt, __get_str(addr)
943 )
944);
945
946#define DEFINE_XPRT_EVENT(name) \
947 DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
948 TP_PROTO( \
949 const struct svc_xprt *xprt \
950 ), \
951 TP_ARGS(xprt))
952
953DEFINE_XPRT_EVENT(accept);
954DEFINE_XPRT_EVENT(fail);
955DEFINE_XPRT_EVENT(free);
956
957TRACE_DEFINE_ENUM(RDMA_MSG);
958TRACE_DEFINE_ENUM(RDMA_NOMSG);
959TRACE_DEFINE_ENUM(RDMA_MSGP);
960TRACE_DEFINE_ENUM(RDMA_DONE);
961TRACE_DEFINE_ENUM(RDMA_ERROR);
962
963#define show_rpcrdma_proc(x) \
964 __print_symbolic(x, \
965 { RDMA_MSG, "RDMA_MSG" }, \
966 { RDMA_NOMSG, "RDMA_NOMSG" }, \
967 { RDMA_MSGP, "RDMA_MSGP" }, \
968 { RDMA_DONE, "RDMA_DONE" }, \
969 { RDMA_ERROR, "RDMA_ERROR" })
970
971TRACE_EVENT(svcrdma_decode_rqst,
972 TP_PROTO(
973 __be32 *p,
974 unsigned int hdrlen
975 ),
976
977 TP_ARGS(p, hdrlen),
978
979 TP_STRUCT__entry(
980 __field(u32, xid)
981 __field(u32, vers)
982 __field(u32, proc)
983 __field(u32, credits)
984 __field(unsigned int, hdrlen)
985 ),
986
987 TP_fast_assign(
988 __entry->xid = be32_to_cpup(p++);
989 __entry->vers = be32_to_cpup(p++);
990 __entry->credits = be32_to_cpup(p++);
991 __entry->proc = be32_to_cpup(p);
992 __entry->hdrlen = hdrlen;
993 ),
994
995 TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
996 __entry->xid, __entry->vers, __entry->credits,
997 show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
998);
999
1000TRACE_EVENT(svcrdma_decode_short,
1001 TP_PROTO(
1002 unsigned int hdrlen
1003 ),
1004
1005 TP_ARGS(hdrlen),
1006
1007 TP_STRUCT__entry(
1008 __field(unsigned int, hdrlen)
1009 ),
1010
1011 TP_fast_assign(
1012 __entry->hdrlen = hdrlen;
1013 ),
1014
1015 TP_printk("hdrlen=%u", __entry->hdrlen)
1016);
1017
1018DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1019 TP_PROTO(
1020 __be32 *p
1021 ),
1022
1023 TP_ARGS(p),
1024
1025 TP_STRUCT__entry(
1026 __field(u32, xid)
1027 __field(u32, vers)
1028 __field(u32, proc)
1029 __field(u32, credits)
1030 ),
1031
1032 TP_fast_assign(
1033 __entry->xid = be32_to_cpup(p++);
1034 __entry->vers = be32_to_cpup(p++);
1035 __entry->credits = be32_to_cpup(p++);
1036 __entry->proc = be32_to_cpup(p);
1037 ),
1038
1039 TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1040 __entry->xid, __entry->vers, __entry->credits, __entry->proc)
1041);
1042
1043#define DEFINE_BADREQ_EVENT(name) \
1044 DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1045 TP_PROTO( \
1046 __be32 *p \
1047 ), \
1048 TP_ARGS(p))
1049
1050DEFINE_BADREQ_EVENT(badvers);
1051DEFINE_BADREQ_EVENT(drop);
1052DEFINE_BADREQ_EVENT(badproc);
1053DEFINE_BADREQ_EVENT(parse);
1054
1055DECLARE_EVENT_CLASS(svcrdma_segment_event,
1056 TP_PROTO(
1057 u32 handle,
1058 u32 length,
1059 u64 offset
1060 ),
1061
1062 TP_ARGS(handle, length, offset),
1063
1064 TP_STRUCT__entry(
1065 __field(u32, handle)
1066 __field(u32, length)
1067 __field(u64, offset)
1068 ),
1069
1070 TP_fast_assign(
1071 __entry->handle = handle;
1072 __entry->length = length;
1073 __entry->offset = offset;
1074 ),
1075
1076 TP_printk("%u@0x%016llx:0x%08x",
1077 __entry->length, (unsigned long long)__entry->offset,
1078 __entry->handle
1079 )
1080);
1081
1082#define DEFINE_SEGMENT_EVENT(name) \
1083 DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1084 TP_PROTO( \
1085 u32 handle, \
1086 u32 length, \
1087 u64 offset \
1088 ), \
1089 TP_ARGS(handle, length, offset))
1090
1091DEFINE_SEGMENT_EVENT(rseg);
1092DEFINE_SEGMENT_EVENT(wseg);
1093
1094DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1095 TP_PROTO(
1096 u32 length
1097 ),
1098
1099 TP_ARGS(length),
1100
1101 TP_STRUCT__entry(
1102 __field(u32, length)
1103 ),
1104
1105 TP_fast_assign(
1106 __entry->length = length;
1107 ),
1108
1109 TP_printk("length=%u",
1110 __entry->length
1111 )
1112);
1113
1114#define DEFINE_CHUNK_EVENT(name) \
1115 DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1116 TP_PROTO( \
1117 u32 length \
1118 ), \
1119 TP_ARGS(length))
1120
1121DEFINE_CHUNK_EVENT(pzr);
1122DEFINE_CHUNK_EVENT(write);
1123DEFINE_CHUNK_EVENT(reply);
1124
1125TRACE_EVENT(svcrdma_encode_read,
1126 TP_PROTO(
1127 u32 length,
1128 u32 position
1129 ),
1130
1131 TP_ARGS(length, position),
1132
1133 TP_STRUCT__entry(
1134 __field(u32, length)
1135 __field(u32, position)
1136 ),
1137
1138 TP_fast_assign(
1139 __entry->length = length;
1140 __entry->position = position;
1141 ),
1142
1143 TP_printk("length=%u position=%u",
1144 __entry->length, __entry->position
1145 )
1146);
1147
1148DECLARE_EVENT_CLASS(svcrdma_error_event,
1149 TP_PROTO(
1150 __be32 xid
1151 ),
1152
1153 TP_ARGS(xid),
1154
1155 TP_STRUCT__entry(
1156 __field(u32, xid)
1157 ),
1158
1159 TP_fast_assign(
1160 __entry->xid = be32_to_cpu(xid);
1161 ),
1162
1163 TP_printk("xid=0x%08x",
1164 __entry->xid
1165 )
1166);
1167
1168#define DEFINE_ERROR_EVENT(name) \
1169 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
1170 TP_PROTO( \
1171 __be32 xid \
1172 ), \
1173 TP_ARGS(xid))
1174
1175DEFINE_ERROR_EVENT(vers);
1176DEFINE_ERROR_EVENT(chunk);
1177
1178
1179
1180
1181
1182TRACE_EVENT(svcrdma_dma_map_page,
1183 TP_PROTO(
1184 const struct svcxprt_rdma *rdma,
1185 const void *page
1186 ),
1187
1188 TP_ARGS(rdma, page),
1189
1190 TP_STRUCT__entry(
1191 __field(const void *, page);
1192 __string(device, rdma->sc_cm_id->device->name)
1193 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1194 ),
1195
1196 TP_fast_assign(
1197 __entry->page = page;
1198 __assign_str(device, rdma->sc_cm_id->device->name);
1199 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1200 ),
1201
1202 TP_printk("addr=%s device=%s page=%p",
1203 __get_str(addr), __get_str(device), __entry->page
1204 )
1205);
1206
1207TRACE_EVENT(svcrdma_dma_map_rwctx,
1208 TP_PROTO(
1209 const struct svcxprt_rdma *rdma,
1210 int status
1211 ),
1212
1213 TP_ARGS(rdma, status),
1214
1215 TP_STRUCT__entry(
1216 __field(int, status)
1217 __string(device, rdma->sc_cm_id->device->name)
1218 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1219 ),
1220
1221 TP_fast_assign(
1222 __entry->status = status;
1223 __assign_str(device, rdma->sc_cm_id->device->name);
1224 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1225 ),
1226
1227 TP_printk("addr=%s device=%s status=%d",
1228 __get_str(addr), __get_str(device), __entry->status
1229 )
1230);
1231
1232TRACE_EVENT(svcrdma_send_failed,
1233 TP_PROTO(
1234 const struct svc_rqst *rqst,
1235 int status
1236 ),
1237
1238 TP_ARGS(rqst, status),
1239
1240 TP_STRUCT__entry(
1241 __field(int, status)
1242 __field(u32, xid)
1243 __field(const void *, xprt)
1244 __string(addr, rqst->rq_xprt->xpt_remotebuf)
1245 ),
1246
1247 TP_fast_assign(
1248 __entry->status = status;
1249 __entry->xid = __be32_to_cpu(rqst->rq_xid);
1250 __entry->xprt = rqst->rq_xprt;
1251 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1252 ),
1253
1254 TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1255 __entry->xprt, __get_str(addr),
1256 __entry->xid, __entry->status
1257 )
1258);
1259
1260DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1261 TP_PROTO(
1262 const struct ib_wc *wc
1263 ),
1264
1265 TP_ARGS(wc),
1266
1267 TP_STRUCT__entry(
1268 __field(const void *, cqe)
1269 __field(unsigned int, status)
1270 __field(unsigned int, vendor_err)
1271 ),
1272
1273 TP_fast_assign(
1274 __entry->cqe = wc->wr_cqe;
1275 __entry->status = wc->status;
1276 if (wc->status)
1277 __entry->vendor_err = wc->vendor_err;
1278 else
1279 __entry->vendor_err = 0;
1280 ),
1281
1282 TP_printk("cqe=%p status=%s (%u/0x%x)",
1283 __entry->cqe, rdma_show_wc_status(__entry->status),
1284 __entry->status, __entry->vendor_err
1285 )
1286);
1287
1288#define DEFINE_SENDCOMP_EVENT(name) \
1289 DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
1290 TP_PROTO( \
1291 const struct ib_wc *wc \
1292 ), \
1293 TP_ARGS(wc))
1294
1295TRACE_EVENT(svcrdma_post_send,
1296 TP_PROTO(
1297 const struct ib_send_wr *wr,
1298 int status
1299 ),
1300
1301 TP_ARGS(wr, status),
1302
1303 TP_STRUCT__entry(
1304 __field(const void *, cqe)
1305 __field(unsigned int, num_sge)
1306 __field(u32, inv_rkey)
1307 __field(int, status)
1308 ),
1309
1310 TP_fast_assign(
1311 __entry->cqe = wr->wr_cqe;
1312 __entry->num_sge = wr->num_sge;
1313 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1314 wr->ex.invalidate_rkey : 0;
1315 __entry->status = status;
1316 ),
1317
1318 TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1319 __entry->cqe, __entry->num_sge,
1320 __entry->inv_rkey, __entry->status
1321 )
1322);
1323
1324DEFINE_SENDCOMP_EVENT(send);
1325
1326TRACE_EVENT(svcrdma_post_recv,
1327 TP_PROTO(
1328 const struct ib_recv_wr *wr,
1329 int status
1330 ),
1331
1332 TP_ARGS(wr, status),
1333
1334 TP_STRUCT__entry(
1335 __field(const void *, cqe)
1336 __field(int, status)
1337 ),
1338
1339 TP_fast_assign(
1340 __entry->cqe = wr->wr_cqe;
1341 __entry->status = status;
1342 ),
1343
1344 TP_printk("cqe=%p status=%d",
1345 __entry->cqe, __entry->status
1346 )
1347);
1348
1349TRACE_EVENT(svcrdma_wc_receive,
1350 TP_PROTO(
1351 const struct ib_wc *wc
1352 ),
1353
1354 TP_ARGS(wc),
1355
1356 TP_STRUCT__entry(
1357 __field(const void *, cqe)
1358 __field(u32, byte_len)
1359 __field(unsigned int, status)
1360 __field(u32, vendor_err)
1361 ),
1362
1363 TP_fast_assign(
1364 __entry->cqe = wc->wr_cqe;
1365 __entry->status = wc->status;
1366 if (wc->status) {
1367 __entry->byte_len = 0;
1368 __entry->vendor_err = wc->vendor_err;
1369 } else {
1370 __entry->byte_len = wc->byte_len;
1371 __entry->vendor_err = 0;
1372 }
1373 ),
1374
1375 TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1376 __entry->cqe, __entry->byte_len,
1377 rdma_show_wc_status(__entry->status),
1378 __entry->status, __entry->vendor_err
1379 )
1380);
1381
1382TRACE_EVENT(svcrdma_post_rw,
1383 TP_PROTO(
1384 const void *cqe,
1385 int sqecount,
1386 int status
1387 ),
1388
1389 TP_ARGS(cqe, sqecount, status),
1390
1391 TP_STRUCT__entry(
1392 __field(const void *, cqe)
1393 __field(int, sqecount)
1394 __field(int, status)
1395 ),
1396
1397 TP_fast_assign(
1398 __entry->cqe = cqe;
1399 __entry->sqecount = sqecount;
1400 __entry->status = status;
1401 ),
1402
1403 TP_printk("cqe=%p sqecount=%d status=%d",
1404 __entry->cqe, __entry->sqecount, __entry->status
1405 )
1406);
1407
1408DEFINE_SENDCOMP_EVENT(read);
1409DEFINE_SENDCOMP_EVENT(write);
1410
1411TRACE_EVENT(svcrdma_cm_event,
1412 TP_PROTO(
1413 const struct rdma_cm_event *event,
1414 const struct sockaddr *sap
1415 ),
1416
1417 TP_ARGS(event, sap),
1418
1419 TP_STRUCT__entry(
1420 __field(unsigned int, event)
1421 __field(int, status)
1422 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1423 ),
1424
1425 TP_fast_assign(
1426 __entry->event = event->event;
1427 __entry->status = event->status;
1428 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1429 "%pISpc", sap);
1430 ),
1431
1432 TP_printk("addr=%s event=%s (%u/%d)",
1433 __entry->addr,
1434 rdma_show_cm_event(__entry->event),
1435 __entry->event, __entry->status
1436 )
1437);
1438
1439TRACE_EVENT(svcrdma_qp_error,
1440 TP_PROTO(
1441 const struct ib_event *event,
1442 const struct sockaddr *sap
1443 ),
1444
1445 TP_ARGS(event, sap),
1446
1447 TP_STRUCT__entry(
1448 __field(unsigned int, event)
1449 __string(device, event->device->name)
1450 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1451 ),
1452
1453 TP_fast_assign(
1454 __entry->event = event->event;
1455 __assign_str(device, event->device->name);
1456 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1457 "%pISpc", sap);
1458 ),
1459
1460 TP_printk("addr=%s dev=%s event=%s (%u)",
1461 __entry->addr, __get_str(device),
1462 rdma_show_ib_event(__entry->event), __entry->event
1463 )
1464);
1465
1466DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1467 TP_PROTO(
1468 const struct svcxprt_rdma *rdma
1469 ),
1470
1471 TP_ARGS(rdma),
1472
1473 TP_STRUCT__entry(
1474 __field(int, avail)
1475 __field(int, depth)
1476 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1477 ),
1478
1479 TP_fast_assign(
1480 __entry->avail = atomic_read(&rdma->sc_sq_avail);
1481 __entry->depth = rdma->sc_sq_depth;
1482 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1483 ),
1484
1485 TP_printk("addr=%s sc_sq_avail=%d/%d",
1486 __get_str(addr), __entry->avail, __entry->depth
1487 )
1488);
1489
1490#define DEFINE_SQ_EVENT(name) \
1491 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1492 TP_PROTO( \
1493 const struct svcxprt_rdma *rdma \
1494 ), \
1495 TP_ARGS(rdma))
1496
1497DEFINE_SQ_EVENT(full);
1498DEFINE_SQ_EVENT(retry);
1499
1500#endif
1501
1502#include <trace/define_trace.h>
1503