• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/tracepoint.h>
15 #include <trace/events/rdma.h>
16 
17 /**
18  ** Event classes
19  **/
20 
21 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 	TP_PROTO(
23 		const struct rpcrdma_rep *rep
24 	),
25 
26 	TP_ARGS(rep),
27 
28 	TP_STRUCT__entry(
29 		__field(const void *, rep)
30 		__field(const void *, r_xprt)
31 		__field(u32, xid)
32 		__field(u32, version)
33 		__field(u32, proc)
34 	),
35 
36 	TP_fast_assign(
37 		__entry->rep = rep;
38 		__entry->r_xprt = rep->rr_rxprt;
39 		__entry->xid = be32_to_cpu(rep->rr_xid);
40 		__entry->version = be32_to_cpu(rep->rr_vers);
41 		__entry->proc = be32_to_cpu(rep->rr_proc);
42 	),
43 
44 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
45 		__entry->r_xprt, __entry->xid, __entry->rep,
46 		__entry->version, __entry->proc
47 	)
48 );
49 
50 #define DEFINE_REPLY_EVENT(name)					\
51 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
52 				TP_PROTO(				\
53 					const struct rpcrdma_rep *rep	\
54 				),					\
55 				TP_ARGS(rep))
56 
57 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 	TP_PROTO(
59 		const struct rpcrdma_xprt *r_xprt
60 	),
61 
62 	TP_ARGS(r_xprt),
63 
64 	TP_STRUCT__entry(
65 		__field(const void *, r_xprt)
66 		__string(addr, rpcrdma_addrstr(r_xprt))
67 		__string(port, rpcrdma_portstr(r_xprt))
68 	),
69 
70 	TP_fast_assign(
71 		__entry->r_xprt = r_xprt;
72 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
73 		__assign_str(port, rpcrdma_portstr(r_xprt));
74 	),
75 
76 	TP_printk("peer=[%s]:%s r_xprt=%p",
77 		__get_str(addr), __get_str(port), __entry->r_xprt
78 	)
79 );
80 
81 #define DEFINE_RXPRT_EVENT(name)					\
82 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
83 				TP_PROTO(				\
84 					const struct rpcrdma_xprt *r_xprt \
85 				),					\
86 				TP_ARGS(r_xprt))
87 
88 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
89 	TP_PROTO(
90 		const struct rpc_task *task,
91 		unsigned int pos,
92 		struct rpcrdma_mr *mr,
93 		int nsegs
94 	),
95 
96 	TP_ARGS(task, pos, mr, nsegs),
97 
98 	TP_STRUCT__entry(
99 		__field(unsigned int, task_id)
100 		__field(unsigned int, client_id)
101 		__field(unsigned int, pos)
102 		__field(int, nents)
103 		__field(u32, handle)
104 		__field(u32, length)
105 		__field(u64, offset)
106 		__field(int, nsegs)
107 	),
108 
109 	TP_fast_assign(
110 		__entry->task_id = task->tk_pid;
111 		__entry->client_id = task->tk_client->cl_clid;
112 		__entry->pos = pos;
113 		__entry->nents = mr->mr_nents;
114 		__entry->handle = mr->mr_handle;
115 		__entry->length = mr->mr_length;
116 		__entry->offset = mr->mr_offset;
117 		__entry->nsegs = nsegs;
118 	),
119 
120 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
121 		__entry->task_id, __entry->client_id,
122 		__entry->pos, __entry->length,
123 		(unsigned long long)__entry->offset, __entry->handle,
124 		__entry->nents < __entry->nsegs ? "more" : "last"
125 	)
126 );
127 
128 #define DEFINE_RDCH_EVENT(name)						\
129 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
130 				TP_PROTO(				\
131 					const struct rpc_task *task,	\
132 					unsigned int pos,		\
133 					struct rpcrdma_mr *mr,		\
134 					int nsegs			\
135 				),					\
136 				TP_ARGS(task, pos, mr, nsegs))
137 
138 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
139 	TP_PROTO(
140 		const struct rpc_task *task,
141 		struct rpcrdma_mr *mr,
142 		int nsegs
143 	),
144 
145 	TP_ARGS(task, mr, nsegs),
146 
147 	TP_STRUCT__entry(
148 		__field(unsigned int, task_id)
149 		__field(unsigned int, client_id)
150 		__field(int, nents)
151 		__field(u32, handle)
152 		__field(u32, length)
153 		__field(u64, offset)
154 		__field(int, nsegs)
155 	),
156 
157 	TP_fast_assign(
158 		__entry->task_id = task->tk_pid;
159 		__entry->client_id = task->tk_client->cl_clid;
160 		__entry->nents = mr->mr_nents;
161 		__entry->handle = mr->mr_handle;
162 		__entry->length = mr->mr_length;
163 		__entry->offset = mr->mr_offset;
164 		__entry->nsegs = nsegs;
165 	),
166 
167 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
168 		__entry->task_id, __entry->client_id,
169 		__entry->length, (unsigned long long)__entry->offset,
170 		__entry->handle,
171 		__entry->nents < __entry->nsegs ? "more" : "last"
172 	)
173 );
174 
175 #define DEFINE_WRCH_EVENT(name)						\
176 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
177 				TP_PROTO(				\
178 					const struct rpc_task *task,	\
179 					struct rpcrdma_mr *mr,		\
180 					int nsegs			\
181 				),					\
182 				TP_ARGS(task, mr, nsegs))
183 
184 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
185 	TP_PROTO(
186 		const struct ib_wc *wc,
187 		const struct rpcrdma_frwr *frwr
188 	),
189 
190 	TP_ARGS(wc, frwr),
191 
192 	TP_STRUCT__entry(
193 		__field(const void *, mr)
194 		__field(unsigned int, status)
195 		__field(unsigned int, vendor_err)
196 	),
197 
198 	TP_fast_assign(
199 		__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
200 		__entry->status = wc->status;
201 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
202 	),
203 
204 	TP_printk(
205 		"mr=%p: %s (%u/0x%x)",
206 		__entry->mr, rdma_show_wc_status(__entry->status),
207 		__entry->status, __entry->vendor_err
208 	)
209 );
210 
211 #define DEFINE_FRWR_DONE_EVENT(name)					\
212 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
213 				TP_PROTO(				\
214 					const struct ib_wc *wc,		\
215 					const struct rpcrdma_frwr *frwr	\
216 				),					\
217 				TP_ARGS(wc, frwr))
218 
219 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
220 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
221 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
222 TRACE_DEFINE_ENUM(DMA_NONE);
223 
224 #define xprtrdma_show_direction(x)					\
225 		__print_symbolic(x,					\
226 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
227 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
228 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
229 				{ DMA_NONE, "NONE" })
230 
231 DECLARE_EVENT_CLASS(xprtrdma_mr,
232 	TP_PROTO(
233 		const struct rpcrdma_mr *mr
234 	),
235 
236 	TP_ARGS(mr),
237 
238 	TP_STRUCT__entry(
239 		__field(const void *, mr)
240 		__field(u32, handle)
241 		__field(u32, length)
242 		__field(u64, offset)
243 		__field(u32, dir)
244 	),
245 
246 	TP_fast_assign(
247 		__entry->mr = mr;
248 		__entry->handle = mr->mr_handle;
249 		__entry->length = mr->mr_length;
250 		__entry->offset = mr->mr_offset;
251 		__entry->dir    = mr->mr_dir;
252 	),
253 
254 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
255 		__entry->mr, __entry->length,
256 		(unsigned long long)__entry->offset, __entry->handle,
257 		xprtrdma_show_direction(__entry->dir)
258 	)
259 );
260 
261 #define DEFINE_MR_EVENT(name) \
262 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
263 				TP_PROTO( \
264 					const struct rpcrdma_mr *mr \
265 				), \
266 				TP_ARGS(mr))
267 
268 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
269 	TP_PROTO(
270 		const struct rpc_rqst *rqst
271 	),
272 
273 	TP_ARGS(rqst),
274 
275 	TP_STRUCT__entry(
276 		__field(const void *, rqst)
277 		__field(const void *, rep)
278 		__field(const void *, req)
279 		__field(u32, xid)
280 	),
281 
282 	TP_fast_assign(
283 		__entry->rqst = rqst;
284 		__entry->req = rpcr_to_rdmar(rqst);
285 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
286 		__entry->xid = be32_to_cpu(rqst->rq_xid);
287 	),
288 
289 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
290 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
291 	)
292 );
293 
294 #define DEFINE_CB_EVENT(name)						\
295 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
296 				TP_PROTO(				\
297 					const struct rpc_rqst *rqst	\
298 				),					\
299 				TP_ARGS(rqst))
300 
301 /**
302  ** Connection events
303  **/
304 
305 TRACE_EVENT(xprtrdma_cm_event,
306 	TP_PROTO(
307 		const struct rpcrdma_xprt *r_xprt,
308 		struct rdma_cm_event *event
309 	),
310 
311 	TP_ARGS(r_xprt, event),
312 
313 	TP_STRUCT__entry(
314 		__field(const void *, r_xprt)
315 		__field(unsigned int, event)
316 		__field(int, status)
317 		__string(addr, rpcrdma_addrstr(r_xprt))
318 		__string(port, rpcrdma_portstr(r_xprt))
319 	),
320 
321 	TP_fast_assign(
322 		__entry->r_xprt = r_xprt;
323 		__entry->event = event->event;
324 		__entry->status = event->status;
325 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
326 		__assign_str(port, rpcrdma_portstr(r_xprt));
327 	),
328 
329 	TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
330 		__get_str(addr), __get_str(port),
331 		__entry->r_xprt, rdma_show_cm_event(__entry->event),
332 		__entry->event, __entry->status
333 	)
334 );
335 
336 TRACE_EVENT(xprtrdma_disconnect,
337 	TP_PROTO(
338 		const struct rpcrdma_xprt *r_xprt,
339 		int status
340 	),
341 
342 	TP_ARGS(r_xprt, status),
343 
344 	TP_STRUCT__entry(
345 		__field(const void *, r_xprt)
346 		__field(int, status)
347 		__field(int, connected)
348 		__string(addr, rpcrdma_addrstr(r_xprt))
349 		__string(port, rpcrdma_portstr(r_xprt))
350 	),
351 
352 	TP_fast_assign(
353 		__entry->r_xprt = r_xprt;
354 		__entry->status = status;
355 		__entry->connected = r_xprt->rx_ep.rep_connected;
356 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
357 		__assign_str(port, rpcrdma_portstr(r_xprt));
358 	),
359 
360 	TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
361 		__get_str(addr), __get_str(port),
362 		__entry->r_xprt, __entry->status,
363 		__entry->connected == 1 ? "still " : "dis"
364 	)
365 );
366 
367 DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
368 DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
369 DEFINE_RXPRT_EVENT(xprtrdma_create);
370 DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
371 DEFINE_RXPRT_EVENT(xprtrdma_remove);
372 DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
373 DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
374 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
375 DEFINE_RXPRT_EVENT(xprtrdma_op_close);
376 DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
377 
378 TRACE_EVENT(xprtrdma_op_set_cto,
379 	TP_PROTO(
380 		const struct rpcrdma_xprt *r_xprt,
381 		unsigned long connect,
382 		unsigned long reconnect
383 	),
384 
385 	TP_ARGS(r_xprt, connect, reconnect),
386 
387 	TP_STRUCT__entry(
388 		__field(const void *, r_xprt)
389 		__field(unsigned long, connect)
390 		__field(unsigned long, reconnect)
391 		__string(addr, rpcrdma_addrstr(r_xprt))
392 		__string(port, rpcrdma_portstr(r_xprt))
393 	),
394 
395 	TP_fast_assign(
396 		__entry->r_xprt = r_xprt;
397 		__entry->connect = connect;
398 		__entry->reconnect = reconnect;
399 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
400 		__assign_str(port, rpcrdma_portstr(r_xprt));
401 	),
402 
403 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
404 		__get_str(addr), __get_str(port), __entry->r_xprt,
405 		__entry->connect / HZ, __entry->reconnect / HZ
406 	)
407 );
408 
409 TRACE_EVENT(xprtrdma_qp_event,
410 	TP_PROTO(
411 		const struct rpcrdma_xprt *r_xprt,
412 		const struct ib_event *event
413 	),
414 
415 	TP_ARGS(r_xprt, event),
416 
417 	TP_STRUCT__entry(
418 		__field(const void *, r_xprt)
419 		__field(unsigned int, event)
420 		__string(name, event->device->name)
421 		__string(addr, rpcrdma_addrstr(r_xprt))
422 		__string(port, rpcrdma_portstr(r_xprt))
423 	),
424 
425 	TP_fast_assign(
426 		__entry->r_xprt = r_xprt;
427 		__entry->event = event->event;
428 		__assign_str(name, event->device->name);
429 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
430 		__assign_str(port, rpcrdma_portstr(r_xprt));
431 	),
432 
433 	TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
434 		__get_str(addr), __get_str(port), __entry->r_xprt,
435 		__get_str(name), rdma_show_ib_event(__entry->event),
436 		__entry->event
437 	)
438 );
439 
440 /**
441  ** Call events
442  **/
443 
444 TRACE_EVENT(xprtrdma_createmrs,
445 	TP_PROTO(
446 		const struct rpcrdma_xprt *r_xprt,
447 		unsigned int count
448 	),
449 
450 	TP_ARGS(r_xprt, count),
451 
452 	TP_STRUCT__entry(
453 		__field(const void *, r_xprt)
454 		__string(addr, rpcrdma_addrstr(r_xprt))
455 		__string(port, rpcrdma_portstr(r_xprt))
456 		__field(unsigned int, count)
457 	),
458 
459 	TP_fast_assign(
460 		__entry->r_xprt = r_xprt;
461 		__entry->count = count;
462 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
463 		__assign_str(port, rpcrdma_portstr(r_xprt));
464 	),
465 
466 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
467 		__get_str(addr), __get_str(port), __entry->r_xprt,
468 		__entry->count
469 	)
470 );
471 
472 TRACE_EVENT(xprtrdma_mr_get,
473 	TP_PROTO(
474 		const struct rpcrdma_req *req
475 	),
476 
477 	TP_ARGS(req),
478 
479 	TP_STRUCT__entry(
480 		__field(const void *, req)
481 		__field(unsigned int, task_id)
482 		__field(unsigned int, client_id)
483 		__field(u32, xid)
484 	),
485 
486 	TP_fast_assign(
487 		const struct rpc_rqst *rqst = &req->rl_slot;
488 
489 		__entry->req = req;
490 		__entry->task_id = rqst->rq_task->tk_pid;
491 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
492 		__entry->xid = be32_to_cpu(rqst->rq_xid);
493 	),
494 
495 	TP_printk("task:%u@%u xid=0x%08x req=%p",
496 		__entry->task_id, __entry->client_id, __entry->xid,
497 		__entry->req
498 	)
499 );
500 
501 TRACE_EVENT(xprtrdma_nomrs,
502 	TP_PROTO(
503 		const struct rpcrdma_req *req
504 	),
505 
506 	TP_ARGS(req),
507 
508 	TP_STRUCT__entry(
509 		__field(const void *, req)
510 		__field(unsigned int, task_id)
511 		__field(unsigned int, client_id)
512 		__field(u32, xid)
513 	),
514 
515 	TP_fast_assign(
516 		const struct rpc_rqst *rqst = &req->rl_slot;
517 
518 		__entry->req = req;
519 		__entry->task_id = rqst->rq_task->tk_pid;
520 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
521 		__entry->xid = be32_to_cpu(rqst->rq_xid);
522 	),
523 
524 	TP_printk("task:%u@%u xid=0x%08x req=%p",
525 		__entry->task_id, __entry->client_id, __entry->xid,
526 		__entry->req
527 	)
528 );
529 
530 DEFINE_RDCH_EVENT(read);
531 DEFINE_WRCH_EVENT(write);
532 DEFINE_WRCH_EVENT(reply);
533 
534 TRACE_DEFINE_ENUM(rpcrdma_noch);
535 TRACE_DEFINE_ENUM(rpcrdma_readch);
536 TRACE_DEFINE_ENUM(rpcrdma_areadch);
537 TRACE_DEFINE_ENUM(rpcrdma_writech);
538 TRACE_DEFINE_ENUM(rpcrdma_replych);
539 
540 #define xprtrdma_show_chunktype(x)					\
541 		__print_symbolic(x,					\
542 				{ rpcrdma_noch, "inline" },		\
543 				{ rpcrdma_readch, "read list" },	\
544 				{ rpcrdma_areadch, "*read list" },	\
545 				{ rpcrdma_writech, "write list" },	\
546 				{ rpcrdma_replych, "reply chunk" })
547 
548 TRACE_EVENT(xprtrdma_marshal,
549 	TP_PROTO(
550 		const struct rpcrdma_req *req,
551 		unsigned int rtype,
552 		unsigned int wtype
553 	),
554 
555 	TP_ARGS(req, rtype, wtype),
556 
557 	TP_STRUCT__entry(
558 		__field(unsigned int, task_id)
559 		__field(unsigned int, client_id)
560 		__field(u32, xid)
561 		__field(unsigned int, hdrlen)
562 		__field(unsigned int, headlen)
563 		__field(unsigned int, pagelen)
564 		__field(unsigned int, taillen)
565 		__field(unsigned int, rtype)
566 		__field(unsigned int, wtype)
567 	),
568 
569 	TP_fast_assign(
570 		const struct rpc_rqst *rqst = &req->rl_slot;
571 
572 		__entry->task_id = rqst->rq_task->tk_pid;
573 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
574 		__entry->xid = be32_to_cpu(rqst->rq_xid);
575 		__entry->hdrlen = req->rl_hdrbuf.len;
576 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
577 		__entry->pagelen = rqst->rq_snd_buf.page_len;
578 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
579 		__entry->rtype = rtype;
580 		__entry->wtype = wtype;
581 	),
582 
583 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
584 		__entry->task_id, __entry->client_id, __entry->xid,
585 		__entry->hdrlen,
586 		__entry->headlen, __entry->pagelen, __entry->taillen,
587 		xprtrdma_show_chunktype(__entry->rtype),
588 		xprtrdma_show_chunktype(__entry->wtype)
589 	)
590 );
591 
592 TRACE_EVENT(xprtrdma_marshal_failed,
593 	TP_PROTO(const struct rpc_rqst *rqst,
594 		 int ret
595 	),
596 
597 	TP_ARGS(rqst, ret),
598 
599 	TP_STRUCT__entry(
600 		__field(unsigned int, task_id)
601 		__field(unsigned int, client_id)
602 		__field(u32, xid)
603 		__field(int, ret)
604 	),
605 
606 	TP_fast_assign(
607 		__entry->task_id = rqst->rq_task->tk_pid;
608 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
609 		__entry->xid = be32_to_cpu(rqst->rq_xid);
610 		__entry->ret = ret;
611 	),
612 
613 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
614 		__entry->task_id, __entry->client_id, __entry->xid,
615 		__entry->ret
616 	)
617 );
618 
619 TRACE_EVENT(xprtrdma_prepsend_failed,
620 	TP_PROTO(const struct rpc_rqst *rqst,
621 		 int ret
622 	),
623 
624 	TP_ARGS(rqst, ret),
625 
626 	TP_STRUCT__entry(
627 		__field(unsigned int, task_id)
628 		__field(unsigned int, client_id)
629 		__field(u32, xid)
630 		__field(int, ret)
631 	),
632 
633 	TP_fast_assign(
634 		__entry->task_id = rqst->rq_task->tk_pid;
635 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
636 		__entry->xid = be32_to_cpu(rqst->rq_xid);
637 		__entry->ret = ret;
638 	),
639 
640 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
641 		__entry->task_id, __entry->client_id, __entry->xid,
642 		__entry->ret
643 	)
644 );
645 
646 TRACE_EVENT(xprtrdma_post_send,
647 	TP_PROTO(
648 		const struct rpcrdma_req *req,
649 		int status
650 	),
651 
652 	TP_ARGS(req, status),
653 
654 	TP_STRUCT__entry(
655 		__field(const void *, req)
656 		__field(unsigned int, task_id)
657 		__field(unsigned int, client_id)
658 		__field(int, num_sge)
659 		__field(int, signaled)
660 		__field(int, status)
661 	),
662 
663 	TP_fast_assign(
664 		const struct rpc_rqst *rqst = &req->rl_slot;
665 
666 		__entry->task_id = rqst->rq_task->tk_pid;
667 		__entry->client_id = rqst->rq_task->tk_client ?
668 				     rqst->rq_task->tk_client->cl_clid : -1;
669 		__entry->req = req;
670 		__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
671 		__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
672 				    IB_SEND_SIGNALED;
673 		__entry->status = status;
674 	),
675 
676 	TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
677 		__entry->task_id, __entry->client_id,
678 		__entry->req, __entry->num_sge,
679 		(__entry->num_sge == 1 ? "" : "s"),
680 		(__entry->signaled ? "signaled " : ""),
681 		__entry->status
682 	)
683 );
684 
685 TRACE_EVENT(xprtrdma_post_recv,
686 	TP_PROTO(
687 		const struct rpcrdma_rep *rep
688 	),
689 
690 	TP_ARGS(rep),
691 
692 	TP_STRUCT__entry(
693 		__field(const void *, rep)
694 	),
695 
696 	TP_fast_assign(
697 		__entry->rep = rep;
698 	),
699 
700 	TP_printk("rep=%p",
701 		__entry->rep
702 	)
703 );
704 
705 TRACE_EVENT(xprtrdma_post_recvs,
706 	TP_PROTO(
707 		const struct rpcrdma_xprt *r_xprt,
708 		unsigned int count,
709 		int status
710 	),
711 
712 	TP_ARGS(r_xprt, count, status),
713 
714 	TP_STRUCT__entry(
715 		__field(const void *, r_xprt)
716 		__field(unsigned int, count)
717 		__field(int, status)
718 		__field(int, posted)
719 		__string(addr, rpcrdma_addrstr(r_xprt))
720 		__string(port, rpcrdma_portstr(r_xprt))
721 	),
722 
723 	TP_fast_assign(
724 		__entry->r_xprt = r_xprt;
725 		__entry->count = count;
726 		__entry->status = status;
727 		__entry->posted = r_xprt->rx_ep.rep_receive_count;
728 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
729 		__assign_str(port, rpcrdma_portstr(r_xprt));
730 	),
731 
732 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
733 		__get_str(addr), __get_str(port), __entry->r_xprt,
734 		__entry->count, __entry->posted, __entry->status
735 	)
736 );
737 
738 TRACE_EVENT(xprtrdma_post_linv,
739 	TP_PROTO(
740 		const struct rpcrdma_req *req,
741 		int status
742 	),
743 
744 	TP_ARGS(req, status),
745 
746 	TP_STRUCT__entry(
747 		__field(const void *, req)
748 		__field(int, status)
749 		__field(u32, xid)
750 	),
751 
752 	TP_fast_assign(
753 		__entry->req = req;
754 		__entry->status = status;
755 		__entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
756 	),
757 
758 	TP_printk("req=%p xid=0x%08x status=%d",
759 		__entry->req, __entry->xid, __entry->status
760 	)
761 );
762 
763 /**
764  ** Completion events
765  **/
766 
767 TRACE_EVENT(xprtrdma_wc_send,
768 	TP_PROTO(
769 		const struct rpcrdma_sendctx *sc,
770 		const struct ib_wc *wc
771 	),
772 
773 	TP_ARGS(sc, wc),
774 
775 	TP_STRUCT__entry(
776 		__field(const void *, req)
777 		__field(unsigned int, unmap_count)
778 		__field(unsigned int, status)
779 		__field(unsigned int, vendor_err)
780 	),
781 
782 	TP_fast_assign(
783 		__entry->req = sc->sc_req;
784 		__entry->unmap_count = sc->sc_unmap_count;
785 		__entry->status = wc->status;
786 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
787 	),
788 
789 	TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
790 		__entry->req, __entry->unmap_count,
791 		rdma_show_wc_status(__entry->status),
792 		__entry->status, __entry->vendor_err
793 	)
794 );
795 
796 TRACE_EVENT(xprtrdma_wc_receive,
797 	TP_PROTO(
798 		const struct ib_wc *wc
799 	),
800 
801 	TP_ARGS(wc),
802 
803 	TP_STRUCT__entry(
804 		__field(const void *, rep)
805 		__field(u32, byte_len)
806 		__field(unsigned int, status)
807 		__field(u32, vendor_err)
808 	),
809 
810 	TP_fast_assign(
811 		__entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
812 					    rr_cqe);
813 		__entry->status = wc->status;
814 		if (wc->status) {
815 			__entry->byte_len = 0;
816 			__entry->vendor_err = wc->vendor_err;
817 		} else {
818 			__entry->byte_len = wc->byte_len;
819 			__entry->vendor_err = 0;
820 		}
821 	),
822 
823 	TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
824 		__entry->rep, __entry->byte_len,
825 		rdma_show_wc_status(__entry->status),
826 		__entry->status, __entry->vendor_err
827 	)
828 );
829 
830 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
831 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
832 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
833 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
834 
835 TRACE_EVENT(xprtrdma_frwr_alloc,
836 	TP_PROTO(
837 		const struct rpcrdma_mr *mr,
838 		int rc
839 	),
840 
841 	TP_ARGS(mr, rc),
842 
843 	TP_STRUCT__entry(
844 		__field(const void *, mr)
845 		__field(int, rc)
846 	),
847 
848 	TP_fast_assign(
849 		__entry->mr = mr;
850 		__entry->rc	= rc;
851 	),
852 
853 	TP_printk("mr=%p: rc=%d",
854 		__entry->mr, __entry->rc
855 	)
856 );
857 
858 TRACE_EVENT(xprtrdma_frwr_dereg,
859 	TP_PROTO(
860 		const struct rpcrdma_mr *mr,
861 		int rc
862 	),
863 
864 	TP_ARGS(mr, rc),
865 
866 	TP_STRUCT__entry(
867 		__field(const void *, mr)
868 		__field(u32, handle)
869 		__field(u32, length)
870 		__field(u64, offset)
871 		__field(u32, dir)
872 		__field(int, rc)
873 	),
874 
875 	TP_fast_assign(
876 		__entry->mr = mr;
877 		__entry->handle = mr->mr_handle;
878 		__entry->length = mr->mr_length;
879 		__entry->offset = mr->mr_offset;
880 		__entry->dir    = mr->mr_dir;
881 		__entry->rc	= rc;
882 	),
883 
884 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
885 		__entry->mr, __entry->length,
886 		(unsigned long long)__entry->offset, __entry->handle,
887 		xprtrdma_show_direction(__entry->dir),
888 		__entry->rc
889 	)
890 );
891 
892 TRACE_EVENT(xprtrdma_frwr_sgerr,
893 	TP_PROTO(
894 		const struct rpcrdma_mr *mr,
895 		int sg_nents
896 	),
897 
898 	TP_ARGS(mr, sg_nents),
899 
900 	TP_STRUCT__entry(
901 		__field(const void *, mr)
902 		__field(u64, addr)
903 		__field(u32, dir)
904 		__field(int, nents)
905 	),
906 
907 	TP_fast_assign(
908 		__entry->mr = mr;
909 		__entry->addr = mr->mr_sg->dma_address;
910 		__entry->dir = mr->mr_dir;
911 		__entry->nents = sg_nents;
912 	),
913 
914 	TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
915 		__entry->mr, __entry->addr,
916 		xprtrdma_show_direction(__entry->dir),
917 		__entry->nents
918 	)
919 );
920 
921 TRACE_EVENT(xprtrdma_frwr_maperr,
922 	TP_PROTO(
923 		const struct rpcrdma_mr *mr,
924 		int num_mapped
925 	),
926 
927 	TP_ARGS(mr, num_mapped),
928 
929 	TP_STRUCT__entry(
930 		__field(const void *, mr)
931 		__field(u64, addr)
932 		__field(u32, dir)
933 		__field(int, num_mapped)
934 		__field(int, nents)
935 	),
936 
937 	TP_fast_assign(
938 		__entry->mr = mr;
939 		__entry->addr = mr->mr_sg->dma_address;
940 		__entry->dir = mr->mr_dir;
941 		__entry->num_mapped = num_mapped;
942 		__entry->nents = mr->mr_nents;
943 	),
944 
945 	TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
946 		__entry->mr, __entry->addr,
947 		xprtrdma_show_direction(__entry->dir),
948 		__entry->num_mapped, __entry->nents
949 	)
950 );
951 
952 DEFINE_MR_EVENT(localinv);
953 DEFINE_MR_EVENT(map);
954 DEFINE_MR_EVENT(unmap);
955 DEFINE_MR_EVENT(remoteinv);
956 DEFINE_MR_EVENT(recycle);
957 
958 TRACE_EVENT(xprtrdma_dma_maperr,
959 	TP_PROTO(
960 		u64 addr
961 	),
962 
963 	TP_ARGS(addr),
964 
965 	TP_STRUCT__entry(
966 		__field(u64, addr)
967 	),
968 
969 	TP_fast_assign(
970 		__entry->addr = addr;
971 	),
972 
973 	TP_printk("dma addr=0x%llx\n", __entry->addr)
974 );
975 
976 /**
977  ** Reply events
978  **/
979 
980 TRACE_EVENT(xprtrdma_reply,
981 	TP_PROTO(
982 		const struct rpc_task *task,
983 		const struct rpcrdma_rep *rep,
984 		const struct rpcrdma_req *req,
985 		unsigned int credits
986 	),
987 
988 	TP_ARGS(task, rep, req, credits),
989 
990 	TP_STRUCT__entry(
991 		__field(unsigned int, task_id)
992 		__field(unsigned int, client_id)
993 		__field(const void *, rep)
994 		__field(const void *, req)
995 		__field(u32, xid)
996 		__field(unsigned int, credits)
997 	),
998 
999 	TP_fast_assign(
1000 		__entry->task_id = task->tk_pid;
1001 		__entry->client_id = task->tk_client->cl_clid;
1002 		__entry->rep = rep;
1003 		__entry->req = req;
1004 		__entry->xid = be32_to_cpu(rep->rr_xid);
1005 		__entry->credits = credits;
1006 	),
1007 
1008 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
1009 		__entry->task_id, __entry->client_id, __entry->xid,
1010 		__entry->credits, __entry->rep, __entry->req
1011 	)
1012 );
1013 
1014 TRACE_EVENT(xprtrdma_defer_cmp,
1015 	TP_PROTO(
1016 		const struct rpcrdma_rep *rep
1017 	),
1018 
1019 	TP_ARGS(rep),
1020 
1021 	TP_STRUCT__entry(
1022 		__field(unsigned int, task_id)
1023 		__field(unsigned int, client_id)
1024 		__field(const void *, rep)
1025 		__field(u32, xid)
1026 	),
1027 
1028 	TP_fast_assign(
1029 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1030 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1031 		__entry->rep = rep;
1032 		__entry->xid = be32_to_cpu(rep->rr_xid);
1033 	),
1034 
1035 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1036 		__entry->task_id, __entry->client_id, __entry->xid,
1037 		__entry->rep
1038 	)
1039 );
1040 
1041 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
1042 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
1043 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
1044 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
1045 
1046 TRACE_EVENT(xprtrdma_fixup,
1047 	TP_PROTO(
1048 		const struct rpc_rqst *rqst,
1049 		int len,
1050 		int hdrlen
1051 	),
1052 
1053 	TP_ARGS(rqst, len, hdrlen),
1054 
1055 	TP_STRUCT__entry(
1056 		__field(unsigned int, task_id)
1057 		__field(unsigned int, client_id)
1058 		__field(const void *, base)
1059 		__field(int, len)
1060 		__field(int, hdrlen)
1061 	),
1062 
1063 	TP_fast_assign(
1064 		__entry->task_id = rqst->rq_task->tk_pid;
1065 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1066 		__entry->base = rqst->rq_rcv_buf.head[0].iov_base;
1067 		__entry->len = len;
1068 		__entry->hdrlen = hdrlen;
1069 	),
1070 
1071 	TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
1072 		__entry->task_id, __entry->client_id,
1073 		__entry->base, __entry->len, __entry->hdrlen
1074 	)
1075 );
1076 
1077 TRACE_EVENT(xprtrdma_fixup_pg,
1078 	TP_PROTO(
1079 		const struct rpc_rqst *rqst,
1080 		int pageno,
1081 		const void *pos,
1082 		int len,
1083 		int curlen
1084 	),
1085 
1086 	TP_ARGS(rqst, pageno, pos, len, curlen),
1087 
1088 	TP_STRUCT__entry(
1089 		__field(unsigned int, task_id)
1090 		__field(unsigned int, client_id)
1091 		__field(const void *, pos)
1092 		__field(int, pageno)
1093 		__field(int, len)
1094 		__field(int, curlen)
1095 	),
1096 
1097 	TP_fast_assign(
1098 		__entry->task_id = rqst->rq_task->tk_pid;
1099 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1100 		__entry->pos = pos;
1101 		__entry->pageno = pageno;
1102 		__entry->len = len;
1103 		__entry->curlen = curlen;
1104 	),
1105 
1106 	TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
1107 		__entry->task_id, __entry->client_id,
1108 		__entry->pageno, __entry->pos, __entry->len, __entry->curlen
1109 	)
1110 );
1111 
1112 TRACE_EVENT(xprtrdma_decode_seg,
1113 	TP_PROTO(
1114 		u32 handle,
1115 		u32 length,
1116 		u64 offset
1117 	),
1118 
1119 	TP_ARGS(handle, length, offset),
1120 
1121 	TP_STRUCT__entry(
1122 		__field(u32, handle)
1123 		__field(u32, length)
1124 		__field(u64, offset)
1125 	),
1126 
1127 	TP_fast_assign(
1128 		__entry->handle = handle;
1129 		__entry->length = length;
1130 		__entry->offset = offset;
1131 	),
1132 
1133 	TP_printk("%u@0x%016llx:0x%08x",
1134 		__entry->length, (unsigned long long)__entry->offset,
1135 		__entry->handle
1136 	)
1137 );
1138 
1139 /**
1140  ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
1141  **/
1142 
1143 TRACE_EVENT(xprtrdma_op_allocate,
1144 	TP_PROTO(
1145 		const struct rpc_task *task,
1146 		const struct rpcrdma_req *req
1147 	),
1148 
1149 	TP_ARGS(task, req),
1150 
1151 	TP_STRUCT__entry(
1152 		__field(unsigned int, task_id)
1153 		__field(unsigned int, client_id)
1154 		__field(const void *, req)
1155 		__field(size_t, callsize)
1156 		__field(size_t, rcvsize)
1157 	),
1158 
1159 	TP_fast_assign(
1160 		__entry->task_id = task->tk_pid;
1161 		__entry->client_id = task->tk_client->cl_clid;
1162 		__entry->req = req;
1163 		__entry->callsize = task->tk_rqstp->rq_callsize;
1164 		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1165 	),
1166 
1167 	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1168 		__entry->task_id, __entry->client_id,
1169 		__entry->req, __entry->callsize, __entry->rcvsize
1170 	)
1171 );
1172 
1173 TRACE_EVENT(xprtrdma_op_free,
1174 	TP_PROTO(
1175 		const struct rpc_task *task,
1176 		const struct rpcrdma_req *req
1177 	),
1178 
1179 	TP_ARGS(task, req),
1180 
1181 	TP_STRUCT__entry(
1182 		__field(unsigned int, task_id)
1183 		__field(unsigned int, client_id)
1184 		__field(const void *, req)
1185 		__field(const void *, rep)
1186 	),
1187 
1188 	TP_fast_assign(
1189 		__entry->task_id = task->tk_pid;
1190 		__entry->client_id = task->tk_client->cl_clid;
1191 		__entry->req = req;
1192 		__entry->rep = req->rl_reply;
1193 	),
1194 
1195 	TP_printk("task:%u@%u req=%p rep=%p",
1196 		__entry->task_id, __entry->client_id,
1197 		__entry->req, __entry->rep
1198 	)
1199 );
1200 
1201 /**
1202  ** Callback events
1203  **/
1204 
1205 TRACE_EVENT(xprtrdma_cb_setup,
1206 	TP_PROTO(
1207 		const struct rpcrdma_xprt *r_xprt,
1208 		unsigned int reqs
1209 	),
1210 
1211 	TP_ARGS(r_xprt, reqs),
1212 
1213 	TP_STRUCT__entry(
1214 		__field(const void *, r_xprt)
1215 		__field(unsigned int, reqs)
1216 		__string(addr, rpcrdma_addrstr(r_xprt))
1217 		__string(port, rpcrdma_portstr(r_xprt))
1218 	),
1219 
1220 	TP_fast_assign(
1221 		__entry->r_xprt = r_xprt;
1222 		__entry->reqs = reqs;
1223 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1224 		__assign_str(port, rpcrdma_portstr(r_xprt));
1225 	),
1226 
1227 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1228 		__get_str(addr), __get_str(port),
1229 		__entry->r_xprt, __entry->reqs
1230 	)
1231 );
1232 
1233 DEFINE_CB_EVENT(xprtrdma_cb_call);
1234 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1235 
1236 TRACE_EVENT(xprtrdma_leaked_rep,
1237 	TP_PROTO(
1238 		const struct rpc_rqst *rqst,
1239 		const struct rpcrdma_rep *rep
1240 	),
1241 
1242 	TP_ARGS(rqst, rep),
1243 
1244 	TP_STRUCT__entry(
1245 		__field(unsigned int, task_id)
1246 		__field(unsigned int, client_id)
1247 		__field(u32, xid)
1248 		__field(const void *, rep)
1249 	),
1250 
1251 	TP_fast_assign(
1252 		__entry->task_id = rqst->rq_task->tk_pid;
1253 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1254 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1255 		__entry->rep = rep;
1256 	),
1257 
1258 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1259 		__entry->task_id, __entry->client_id, __entry->xid,
1260 		__entry->rep
1261 	)
1262 );
1263 
1264 /**
1265  ** Server-side RPC/RDMA events
1266  **/
1267 
1268 DECLARE_EVENT_CLASS(svcrdma_xprt_event,
1269 	TP_PROTO(
1270 		const struct svc_xprt *xprt
1271 	),
1272 
1273 	TP_ARGS(xprt),
1274 
1275 	TP_STRUCT__entry(
1276 		__field(const void *, xprt)
1277 		__string(addr, xprt->xpt_remotebuf)
1278 	),
1279 
1280 	TP_fast_assign(
1281 		__entry->xprt = xprt;
1282 		__assign_str(addr, xprt->xpt_remotebuf);
1283 	),
1284 
1285 	TP_printk("xprt=%p addr=%s",
1286 		__entry->xprt, __get_str(addr)
1287 	)
1288 );
1289 
1290 #define DEFINE_XPRT_EVENT(name)						\
1291 		DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name,	\
1292 				TP_PROTO(				\
1293 					const struct svc_xprt *xprt	\
1294 				),					\
1295 				TP_ARGS(xprt))
1296 
1297 DEFINE_XPRT_EVENT(accept);
1298 DEFINE_XPRT_EVENT(fail);
1299 DEFINE_XPRT_EVENT(free);
1300 
1301 TRACE_DEFINE_ENUM(RDMA_MSG);
1302 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1303 TRACE_DEFINE_ENUM(RDMA_MSGP);
1304 TRACE_DEFINE_ENUM(RDMA_DONE);
1305 TRACE_DEFINE_ENUM(RDMA_ERROR);
1306 
1307 #define show_rpcrdma_proc(x)						\
1308 		__print_symbolic(x,					\
1309 				{ RDMA_MSG, "RDMA_MSG" },		\
1310 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1311 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1312 				{ RDMA_DONE, "RDMA_DONE" },		\
1313 				{ RDMA_ERROR, "RDMA_ERROR" })
1314 
1315 TRACE_EVENT(svcrdma_decode_rqst,
1316 	TP_PROTO(
1317 		__be32 *p,
1318 		unsigned int hdrlen
1319 	),
1320 
1321 	TP_ARGS(p, hdrlen),
1322 
1323 	TP_STRUCT__entry(
1324 		__field(u32, xid)
1325 		__field(u32, vers)
1326 		__field(u32, proc)
1327 		__field(u32, credits)
1328 		__field(unsigned int, hdrlen)
1329 	),
1330 
1331 	TP_fast_assign(
1332 		__entry->xid = be32_to_cpup(p++);
1333 		__entry->vers = be32_to_cpup(p++);
1334 		__entry->credits = be32_to_cpup(p++);
1335 		__entry->proc = be32_to_cpup(p);
1336 		__entry->hdrlen = hdrlen;
1337 	),
1338 
1339 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1340 		__entry->xid, __entry->vers, __entry->credits,
1341 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1342 );
1343 
1344 TRACE_EVENT(svcrdma_decode_short,
1345 	TP_PROTO(
1346 		unsigned int hdrlen
1347 	),
1348 
1349 	TP_ARGS(hdrlen),
1350 
1351 	TP_STRUCT__entry(
1352 		__field(unsigned int, hdrlen)
1353 	),
1354 
1355 	TP_fast_assign(
1356 		__entry->hdrlen = hdrlen;
1357 	),
1358 
1359 	TP_printk("hdrlen=%u", __entry->hdrlen)
1360 );
1361 
1362 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1363 	TP_PROTO(
1364 		__be32 *p
1365 	),
1366 
1367 	TP_ARGS(p),
1368 
1369 	TP_STRUCT__entry(
1370 		__field(u32, xid)
1371 		__field(u32, vers)
1372 		__field(u32, proc)
1373 		__field(u32, credits)
1374 	),
1375 
1376 	TP_fast_assign(
1377 		__entry->xid = be32_to_cpup(p++);
1378 		__entry->vers = be32_to_cpup(p++);
1379 		__entry->credits = be32_to_cpup(p++);
1380 		__entry->proc = be32_to_cpup(p);
1381 	),
1382 
1383 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1384 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1385 );
1386 
1387 #define DEFINE_BADREQ_EVENT(name)					\
1388 		DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1389 				TP_PROTO(				\
1390 					__be32 *p			\
1391 				),					\
1392 				TP_ARGS(p))
1393 
1394 DEFINE_BADREQ_EVENT(badvers);
1395 DEFINE_BADREQ_EVENT(drop);
1396 DEFINE_BADREQ_EVENT(badproc);
1397 DEFINE_BADREQ_EVENT(parse);
1398 
1399 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1400 	TP_PROTO(
1401 		u32 handle,
1402 		u32 length,
1403 		u64 offset
1404 	),
1405 
1406 	TP_ARGS(handle, length, offset),
1407 
1408 	TP_STRUCT__entry(
1409 		__field(u32, handle)
1410 		__field(u32, length)
1411 		__field(u64, offset)
1412 	),
1413 
1414 	TP_fast_assign(
1415 		__entry->handle = handle;
1416 		__entry->length = length;
1417 		__entry->offset = offset;
1418 	),
1419 
1420 	TP_printk("%u@0x%016llx:0x%08x",
1421 		__entry->length, (unsigned long long)__entry->offset,
1422 		__entry->handle
1423 	)
1424 );
1425 
1426 #define DEFINE_SEGMENT_EVENT(name)					\
1427 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1428 				TP_PROTO(				\
1429 					u32 handle,			\
1430 					u32 length,			\
1431 					u64 offset			\
1432 				),					\
1433 				TP_ARGS(handle, length, offset))
1434 
1435 DEFINE_SEGMENT_EVENT(rseg);
1436 DEFINE_SEGMENT_EVENT(wseg);
1437 
1438 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1439 	TP_PROTO(
1440 		u32 length
1441 	),
1442 
1443 	TP_ARGS(length),
1444 
1445 	TP_STRUCT__entry(
1446 		__field(u32, length)
1447 	),
1448 
1449 	TP_fast_assign(
1450 		__entry->length = length;
1451 	),
1452 
1453 	TP_printk("length=%u",
1454 		__entry->length
1455 	)
1456 );
1457 
1458 #define DEFINE_CHUNK_EVENT(name)					\
1459 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1460 				TP_PROTO(				\
1461 					u32 length			\
1462 				),					\
1463 				TP_ARGS(length))
1464 
1465 DEFINE_CHUNK_EVENT(pzr);
1466 DEFINE_CHUNK_EVENT(write);
1467 DEFINE_CHUNK_EVENT(reply);
1468 
1469 TRACE_EVENT(svcrdma_encode_read,
1470 	TP_PROTO(
1471 		u32 length,
1472 		u32 position
1473 	),
1474 
1475 	TP_ARGS(length, position),
1476 
1477 	TP_STRUCT__entry(
1478 		__field(u32, length)
1479 		__field(u32, position)
1480 	),
1481 
1482 	TP_fast_assign(
1483 		__entry->length = length;
1484 		__entry->position = position;
1485 	),
1486 
1487 	TP_printk("length=%u position=%u",
1488 		__entry->length, __entry->position
1489 	)
1490 );
1491 
1492 DECLARE_EVENT_CLASS(svcrdma_error_event,
1493 	TP_PROTO(
1494 		__be32 xid
1495 	),
1496 
1497 	TP_ARGS(xid),
1498 
1499 	TP_STRUCT__entry(
1500 		__field(u32, xid)
1501 	),
1502 
1503 	TP_fast_assign(
1504 		__entry->xid = be32_to_cpu(xid);
1505 	),
1506 
1507 	TP_printk("xid=0x%08x",
1508 		__entry->xid
1509 	)
1510 );
1511 
1512 #define DEFINE_ERROR_EVENT(name)					\
1513 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1514 				TP_PROTO(				\
1515 					__be32 xid			\
1516 				),					\
1517 				TP_ARGS(xid))
1518 
1519 DEFINE_ERROR_EVENT(vers);
1520 DEFINE_ERROR_EVENT(chunk);
1521 
1522 /**
1523  ** Server-side RDMA API events
1524  **/
1525 
1526 TRACE_EVENT(svcrdma_dma_map_page,
1527 	TP_PROTO(
1528 		const struct svcxprt_rdma *rdma,
1529 		const void *page
1530 	),
1531 
1532 	TP_ARGS(rdma, page),
1533 
1534 	TP_STRUCT__entry(
1535 		__field(const void *, page);
1536 		__string(device, rdma->sc_cm_id->device->name)
1537 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1538 	),
1539 
1540 	TP_fast_assign(
1541 		__entry->page = page;
1542 		__assign_str(device, rdma->sc_cm_id->device->name);
1543 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1544 	),
1545 
1546 	TP_printk("addr=%s device=%s page=%p",
1547 		__get_str(addr), __get_str(device), __entry->page
1548 	)
1549 );
1550 
1551 TRACE_EVENT(svcrdma_dma_map_rwctx,
1552 	TP_PROTO(
1553 		const struct svcxprt_rdma *rdma,
1554 		int status
1555 	),
1556 
1557 	TP_ARGS(rdma, status),
1558 
1559 	TP_STRUCT__entry(
1560 		__field(int, status)
1561 		__string(device, rdma->sc_cm_id->device->name)
1562 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1563 	),
1564 
1565 	TP_fast_assign(
1566 		__entry->status = status;
1567 		__assign_str(device, rdma->sc_cm_id->device->name);
1568 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1569 	),
1570 
1571 	TP_printk("addr=%s device=%s status=%d",
1572 		__get_str(addr), __get_str(device), __entry->status
1573 	)
1574 );
1575 
1576 TRACE_EVENT(svcrdma_send_failed,
1577 	TP_PROTO(
1578 		const struct svc_rqst *rqst,
1579 		int status
1580 	),
1581 
1582 	TP_ARGS(rqst, status),
1583 
1584 	TP_STRUCT__entry(
1585 		__field(int, status)
1586 		__field(u32, xid)
1587 		__field(const void *, xprt)
1588 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1589 	),
1590 
1591 	TP_fast_assign(
1592 		__entry->status = status;
1593 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1594 		__entry->xprt = rqst->rq_xprt;
1595 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1596 	),
1597 
1598 	TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1599 		__entry->xprt, __get_str(addr),
1600 		__entry->xid, __entry->status
1601 	)
1602 );
1603 
1604 DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1605 	TP_PROTO(
1606 		const struct ib_wc *wc
1607 	),
1608 
1609 	TP_ARGS(wc),
1610 
1611 	TP_STRUCT__entry(
1612 		__field(const void *, cqe)
1613 		__field(unsigned int, status)
1614 		__field(unsigned int, vendor_err)
1615 	),
1616 
1617 	TP_fast_assign(
1618 		__entry->cqe = wc->wr_cqe;
1619 		__entry->status = wc->status;
1620 		if (wc->status)
1621 			__entry->vendor_err = wc->vendor_err;
1622 		else
1623 			__entry->vendor_err = 0;
1624 	),
1625 
1626 	TP_printk("cqe=%p status=%s (%u/0x%x)",
1627 		__entry->cqe, rdma_show_wc_status(__entry->status),
1628 		__entry->status, __entry->vendor_err
1629 	)
1630 );
1631 
1632 #define DEFINE_SENDCOMP_EVENT(name)					\
1633 		DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name,	\
1634 				TP_PROTO(				\
1635 					const struct ib_wc *wc		\
1636 				),					\
1637 				TP_ARGS(wc))
1638 
1639 TRACE_EVENT(svcrdma_post_send,
1640 	TP_PROTO(
1641 		const struct ib_send_wr *wr,
1642 		int status
1643 	),
1644 
1645 	TP_ARGS(wr, status),
1646 
1647 	TP_STRUCT__entry(
1648 		__field(const void *, cqe)
1649 		__field(unsigned int, num_sge)
1650 		__field(u32, inv_rkey)
1651 		__field(int, status)
1652 	),
1653 
1654 	TP_fast_assign(
1655 		__entry->cqe = wr->wr_cqe;
1656 		__entry->num_sge = wr->num_sge;
1657 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1658 					wr->ex.invalidate_rkey : 0;
1659 		__entry->status = status;
1660 	),
1661 
1662 	TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1663 		__entry->cqe, __entry->num_sge,
1664 		__entry->inv_rkey, __entry->status
1665 	)
1666 );
1667 
1668 DEFINE_SENDCOMP_EVENT(send);
1669 
1670 TRACE_EVENT(svcrdma_post_recv,
1671 	TP_PROTO(
1672 		const struct ib_recv_wr *wr,
1673 		int status
1674 	),
1675 
1676 	TP_ARGS(wr, status),
1677 
1678 	TP_STRUCT__entry(
1679 		__field(const void *, cqe)
1680 		__field(int, status)
1681 	),
1682 
1683 	TP_fast_assign(
1684 		__entry->cqe = wr->wr_cqe;
1685 		__entry->status = status;
1686 	),
1687 
1688 	TP_printk("cqe=%p status=%d",
1689 		__entry->cqe, __entry->status
1690 	)
1691 );
1692 
1693 TRACE_EVENT(svcrdma_wc_receive,
1694 	TP_PROTO(
1695 		const struct ib_wc *wc
1696 	),
1697 
1698 	TP_ARGS(wc),
1699 
1700 	TP_STRUCT__entry(
1701 		__field(const void *, cqe)
1702 		__field(u32, byte_len)
1703 		__field(unsigned int, status)
1704 		__field(u32, vendor_err)
1705 	),
1706 
1707 	TP_fast_assign(
1708 		__entry->cqe = wc->wr_cqe;
1709 		__entry->status = wc->status;
1710 		if (wc->status) {
1711 			__entry->byte_len = 0;
1712 			__entry->vendor_err = wc->vendor_err;
1713 		} else {
1714 			__entry->byte_len = wc->byte_len;
1715 			__entry->vendor_err = 0;
1716 		}
1717 	),
1718 
1719 	TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1720 		__entry->cqe, __entry->byte_len,
1721 		rdma_show_wc_status(__entry->status),
1722 		__entry->status, __entry->vendor_err
1723 	)
1724 );
1725 
1726 TRACE_EVENT(svcrdma_post_rw,
1727 	TP_PROTO(
1728 		const void *cqe,
1729 		int sqecount,
1730 		int status
1731 	),
1732 
1733 	TP_ARGS(cqe, sqecount, status),
1734 
1735 	TP_STRUCT__entry(
1736 		__field(const void *, cqe)
1737 		__field(int, sqecount)
1738 		__field(int, status)
1739 	),
1740 
1741 	TP_fast_assign(
1742 		__entry->cqe = cqe;
1743 		__entry->sqecount = sqecount;
1744 		__entry->status = status;
1745 	),
1746 
1747 	TP_printk("cqe=%p sqecount=%d status=%d",
1748 		__entry->cqe, __entry->sqecount, __entry->status
1749 	)
1750 );
1751 
1752 DEFINE_SENDCOMP_EVENT(read);
1753 DEFINE_SENDCOMP_EVENT(write);
1754 
1755 TRACE_EVENT(svcrdma_cm_event,
1756 	TP_PROTO(
1757 		const struct rdma_cm_event *event,
1758 		const struct sockaddr *sap
1759 	),
1760 
1761 	TP_ARGS(event, sap),
1762 
1763 	TP_STRUCT__entry(
1764 		__field(unsigned int, event)
1765 		__field(int, status)
1766 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1767 	),
1768 
1769 	TP_fast_assign(
1770 		__entry->event = event->event;
1771 		__entry->status = event->status;
1772 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1773 			 "%pISpc", sap);
1774 	),
1775 
1776 	TP_printk("addr=%s event=%s (%u/%d)",
1777 		__entry->addr,
1778 		rdma_show_cm_event(__entry->event),
1779 		__entry->event, __entry->status
1780 	)
1781 );
1782 
1783 TRACE_EVENT(svcrdma_qp_error,
1784 	TP_PROTO(
1785 		const struct ib_event *event,
1786 		const struct sockaddr *sap
1787 	),
1788 
1789 	TP_ARGS(event, sap),
1790 
1791 	TP_STRUCT__entry(
1792 		__field(unsigned int, event)
1793 		__string(device, event->device->name)
1794 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1795 	),
1796 
1797 	TP_fast_assign(
1798 		__entry->event = event->event;
1799 		__assign_str(device, event->device->name);
1800 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1801 			 "%pISpc", sap);
1802 	),
1803 
1804 	TP_printk("addr=%s dev=%s event=%s (%u)",
1805 		__entry->addr, __get_str(device),
1806 		rdma_show_ib_event(__entry->event), __entry->event
1807 	)
1808 );
1809 
1810 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1811 	TP_PROTO(
1812 		const struct svcxprt_rdma *rdma
1813 	),
1814 
1815 	TP_ARGS(rdma),
1816 
1817 	TP_STRUCT__entry(
1818 		__field(int, avail)
1819 		__field(int, depth)
1820 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1821 	),
1822 
1823 	TP_fast_assign(
1824 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1825 		__entry->depth = rdma->sc_sq_depth;
1826 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1827 	),
1828 
1829 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1830 		__get_str(addr), __entry->avail, __entry->depth
1831 	)
1832 );
1833 
1834 #define DEFINE_SQ_EVENT(name)						\
1835 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1836 				TP_PROTO(				\
1837 					const struct svcxprt_rdma *rdma \
1838 				),					\
1839 				TP_ARGS(rdma))
1840 
1841 DEFINE_SQ_EVENT(full);
1842 DEFINE_SQ_EVENT(retry);
1843 
1844 #endif /* _TRACE_RPCRDMA_H */
1845 
1846 #include <trace/define_trace.h>
1847