• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Greybus driver and device API
4  *
5  * Copyright 2015 Google Inc.
6  * Copyright 2015 Linaro Ltd.
7  */
8 #undef TRACE_SYSTEM
9 #define TRACE_SYSTEM greybus
10 
11 #if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ)
12 #define _TRACE_GREYBUS_H
13 
14 #include <linux/tracepoint.h>
15 
16 struct gb_message;
17 struct gb_operation;
18 struct gb_connection;
19 struct gb_bundle;
20 struct gb_host_device;
21 
22 DECLARE_EVENT_CLASS(gb_message,
23 
24 	TP_PROTO(struct gb_message *message),
25 
26 	TP_ARGS(message),
27 
28 	TP_STRUCT__entry(
29 		__field(u16, size)
30 		__field(u16, operation_id)
31 		__field(u8, type)
32 		__field(u8, result)
33 	),
34 
35 	TP_fast_assign(
36 		__entry->size = le16_to_cpu(message->header->size);
37 		__entry->operation_id =
38 			le16_to_cpu(message->header->operation_id);
39 		__entry->type = message->header->type;
40 		__entry->result = message->header->result;
41 	),
42 
43 	TP_printk("size=%u operation_id=0x%04x type=0x%02x result=0x%02x",
44 		  __entry->size, __entry->operation_id,
45 		  __entry->type, __entry->result)
46 );
47 
48 #define DEFINE_MESSAGE_EVENT(name)					\
49 		DEFINE_EVENT(gb_message, name,				\
50 				TP_PROTO(struct gb_message *message),	\
51 				TP_ARGS(message))
52 
53 /*
54  * Occurs immediately before calling a host device's message_send()
55  * method.
56  */
57 DEFINE_MESSAGE_EVENT(gb_message_send);
58 
59 /*
60  * Occurs after an incoming request message has been received
61  */
62 DEFINE_MESSAGE_EVENT(gb_message_recv_request);
63 
64 /*
65  * Occurs after an incoming response message has been received,
66  * after its matching request has been found.
67  */
68 DEFINE_MESSAGE_EVENT(gb_message_recv_response);
69 
70 /*
71  * Occurs after an operation has been canceled, possibly before the
72  * cancellation is complete.
73  */
74 DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing);
75 
76 /*
77  * Occurs when an incoming request is cancelled; if the response has
78  * been queued for sending, this occurs after it is sent.
79  */
80 DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming);
81 
82 /*
83  * Occurs in the host driver message_send() function just prior to
84  * handing off the data to be processed by hardware.
85  */
86 DEFINE_MESSAGE_EVENT(gb_message_submit);
87 
88 #undef DEFINE_MESSAGE_EVENT
89 
90 DECLARE_EVENT_CLASS(gb_operation,
91 
92 	TP_PROTO(struct gb_operation *operation),
93 
94 	TP_ARGS(operation),
95 
96 	TP_STRUCT__entry(
97 		__field(u16, cport_id)	/* CPort of HD side of connection */
98 		__field(u16, id)	/* Operation ID */
99 		__field(u8, type)
100 		__field(unsigned long, flags)
101 		__field(int, active)
102 		__field(int, waiters)
103 		__field(int, errno)
104 	),
105 
106 	TP_fast_assign(
107 		__entry->cport_id = operation->connection->hd_cport_id;
108 		__entry->id = operation->id;
109 		__entry->type = operation->type;
110 		__entry->flags = operation->flags;
111 		__entry->active = operation->active;
112 		__entry->waiters = atomic_read(&operation->waiters);
113 		__entry->errno = operation->errno;
114 	),
115 
116 	TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d",
117 		  __entry->id, __entry->cport_id, __entry->type, __entry->flags,
118 		  __entry->active, __entry->waiters, __entry->errno)
119 );
120 
121 #define DEFINE_OPERATION_EVENT(name)					\
122 		DEFINE_EVENT(gb_operation, name,			\
123 				TP_PROTO(struct gb_operation *operation), \
124 				TP_ARGS(operation))
125 
126 /*
127  * Occurs after a new operation is created for an outgoing request
128  * has been successfully created.
129  */
130 DEFINE_OPERATION_EVENT(gb_operation_create);
131 
132 /*
133  * Occurs after a new core operation has been created.
134  */
135 DEFINE_OPERATION_EVENT(gb_operation_create_core);
136 
137 /*
138  * Occurs after a new operation has been created for an incoming
139  * request has been successfully created and initialized.
140  */
141 DEFINE_OPERATION_EVENT(gb_operation_create_incoming);
142 
143 /*
144  * Occurs when the last reference to an operation has been dropped,
145  * prior to freeing resources.
146  */
147 DEFINE_OPERATION_EVENT(gb_operation_destroy);
148 
149 /*
150  * Occurs when an operation has been marked active, after updating
151  * its active count.
152  */
153 DEFINE_OPERATION_EVENT(gb_operation_get_active);
154 
155 /*
156  * Occurs when an operation has been marked active, before updating
157  * its active count.
158  */
159 DEFINE_OPERATION_EVENT(gb_operation_put_active);
160 
161 #undef DEFINE_OPERATION_EVENT
162 
163 DECLARE_EVENT_CLASS(gb_connection,
164 
165 	TP_PROTO(struct gb_connection *connection),
166 
167 	TP_ARGS(connection),
168 
169 	TP_STRUCT__entry(
170 		__field(int, hd_bus_id)
171 		__field(u8, bundle_id)
172 		/* name contains "hd_cport_id/intf_id:cport_id" */
173 		__dynamic_array(char, name, sizeof(connection->name))
174 		__field(enum gb_connection_state, state)
175 		__field(unsigned long, flags)
176 	),
177 
178 	TP_fast_assign(
179 		__entry->hd_bus_id = connection->hd->bus_id;
180 		__entry->bundle_id = connection->bundle ?
181 				connection->bundle->id : BUNDLE_ID_NONE;
182 		memcpy(__get_str(name), connection->name,
183 					sizeof(connection->name));
184 		__entry->state = connection->state;
185 		__entry->flags = connection->flags;
186 	),
187 
188 	TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx",
189 		  __entry->hd_bus_id, __entry->bundle_id, __get_str(name),
190 		  (unsigned int)__entry->state, __entry->flags)
191 );
192 
193 #define DEFINE_CONNECTION_EVENT(name)					\
194 		DEFINE_EVENT(gb_connection, name,			\
195 				TP_PROTO(struct gb_connection *connection), \
196 				TP_ARGS(connection))
197 
198 /*
199  * Occurs after a new connection is successfully created.
200  */
201 DEFINE_CONNECTION_EVENT(gb_connection_create);
202 
203 /*
204  * Occurs when the last reference to a connection has been dropped,
205  * before its resources are freed.
206  */
207 DEFINE_CONNECTION_EVENT(gb_connection_release);
208 
209 /*
210  * Occurs when a new reference to connection is added, currently
211  * only when a message over the connection is received.
212  */
213 DEFINE_CONNECTION_EVENT(gb_connection_get);
214 
215 /*
216  * Occurs when a new reference to connection is dropped, after a
217  * a received message is handled, or when the connection is
218  * destroyed.
219  */
220 DEFINE_CONNECTION_EVENT(gb_connection_put);
221 
222 /*
223  * Occurs when a request to enable a connection is made, either for
224  * transmit only, or for both transmit and receive.
225  */
226 DEFINE_CONNECTION_EVENT(gb_connection_enable);
227 
228 /*
229  * Occurs when a request to disable a connection is made, either for
230  * receive only, or for both transmit and receive.  Also occurs when
231  * a request to forcefully disable a connection is made.
232  */
233 DEFINE_CONNECTION_EVENT(gb_connection_disable);
234 
235 #undef DEFINE_CONNECTION_EVENT
236 
237 DECLARE_EVENT_CLASS(gb_bundle,
238 
239 	TP_PROTO(struct gb_bundle *bundle),
240 
241 	TP_ARGS(bundle),
242 
243 	TP_STRUCT__entry(
244 		__field(u8, intf_id)
245 		__field(u8, id)
246 		__field(u8, class)
247 		__field(size_t, num_cports)
248 	),
249 
250 	TP_fast_assign(
251 		__entry->intf_id = bundle->intf->interface_id;
252 		__entry->id = bundle->id;
253 		__entry->class = bundle->class;
254 		__entry->num_cports = bundle->num_cports;
255 	),
256 
257 	TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu",
258 		  __entry->intf_id, __entry->id, __entry->class,
259 		  __entry->num_cports)
260 );
261 
262 #define DEFINE_BUNDLE_EVENT(name)					\
263 		DEFINE_EVENT(gb_bundle, name,			\
264 				TP_PROTO(struct gb_bundle *bundle), \
265 				TP_ARGS(bundle))
266 
267 /*
268  * Occurs after a new bundle is successfully created.
269  */
270 DEFINE_BUNDLE_EVENT(gb_bundle_create);
271 
272 /*
273  * Occurs when the last reference to a bundle has been dropped,
274  * before its resources are freed.
275  */
276 DEFINE_BUNDLE_EVENT(gb_bundle_release);
277 
278 /*
279  * Occurs when a bundle is added to an interface when the interface
280  * is enabled.
281  */
282 DEFINE_BUNDLE_EVENT(gb_bundle_add);
283 
284 /*
285  * Occurs when a registered bundle gets destroyed, normally at the
286  * time an interface is disabled.
287  */
288 DEFINE_BUNDLE_EVENT(gb_bundle_destroy);
289 
290 #undef DEFINE_BUNDLE_EVENT
291 
292 DECLARE_EVENT_CLASS(gb_interface,
293 
294 	TP_PROTO(struct gb_interface *intf),
295 
296 	TP_ARGS(intf),
297 
298 	TP_STRUCT__entry(
299 		__field(u8, module_id)
300 		__field(u8, id)		/* Interface id */
301 		__field(u8, device_id)
302 		__field(int, disconnected)	/* bool */
303 		__field(int, ejected)		/* bool */
304 		__field(int, active)		/* bool */
305 		__field(int, enabled)		/* bool */
306 		__field(int, mode_switch)	/* bool */
307 	),
308 
309 	TP_fast_assign(
310 		__entry->module_id = intf->module->module_id;
311 		__entry->id = intf->interface_id;
312 		__entry->device_id = intf->device_id;
313 		__entry->disconnected = intf->disconnected;
314 		__entry->ejected = intf->ejected;
315 		__entry->active = intf->active;
316 		__entry->enabled = intf->enabled;
317 		__entry->mode_switch = intf->mode_switch;
318 	),
319 
320 	TP_printk("intf_id=%u device_id=%u module_id=%u D=%d J=%d A=%d E=%d M=%d",
321 		__entry->id, __entry->device_id, __entry->module_id,
322 		__entry->disconnected, __entry->ejected, __entry->active,
323 		__entry->enabled, __entry->mode_switch)
324 );
325 
326 #define DEFINE_INTERFACE_EVENT(name)					\
327 		DEFINE_EVENT(gb_interface, name,			\
328 				TP_PROTO(struct gb_interface *intf),	\
329 				TP_ARGS(intf))
330 
331 /*
332  * Occurs after a new interface is successfully created.
333  */
334 DEFINE_INTERFACE_EVENT(gb_interface_create);
335 
336 /*
337  * Occurs after the last reference to an interface has been dropped.
338  */
339 DEFINE_INTERFACE_EVENT(gb_interface_release);
340 
341 /*
342  * Occurs after an interface been registerd.
343  */
344 DEFINE_INTERFACE_EVENT(gb_interface_add);
345 
346 /*
347  * Occurs when a registered interface gets deregisterd.
348  */
349 DEFINE_INTERFACE_EVENT(gb_interface_del);
350 
351 /*
352  * Occurs when a registered interface has been successfully
353  * activated.
354  */
355 DEFINE_INTERFACE_EVENT(gb_interface_activate);
356 
357 /*
358  * Occurs when an activated interface is being deactivated.
359  */
360 DEFINE_INTERFACE_EVENT(gb_interface_deactivate);
361 
362 /*
363  * Occurs when an interface has been successfully enabled.
364  */
365 DEFINE_INTERFACE_EVENT(gb_interface_enable);
366 
367 /*
368  * Occurs when an enabled interface is being disabled.
369  */
370 DEFINE_INTERFACE_EVENT(gb_interface_disable);
371 
372 #undef DEFINE_INTERFACE_EVENT
373 
374 DECLARE_EVENT_CLASS(gb_module,
375 
376 	TP_PROTO(struct gb_module *module),
377 
378 	TP_ARGS(module),
379 
380 	TP_STRUCT__entry(
381 		__field(int, hd_bus_id)
382 		__field(u8, module_id)
383 		__field(size_t, num_interfaces)
384 		__field(int, disconnected)	/* bool */
385 	),
386 
387 	TP_fast_assign(
388 		__entry->hd_bus_id = module->hd->bus_id;
389 		__entry->module_id = module->module_id;
390 		__entry->num_interfaces = module->num_interfaces;
391 		__entry->disconnected = module->disconnected;
392 	),
393 
394 	TP_printk("hd_bus_id=%d module_id=%u num_interfaces=%zu disconnected=%d",
395 		__entry->hd_bus_id, __entry->module_id,
396 		__entry->num_interfaces, __entry->disconnected)
397 );
398 
399 #define DEFINE_MODULE_EVENT(name)					\
400 		DEFINE_EVENT(gb_module, name,				\
401 				TP_PROTO(struct gb_module *module),	\
402 				TP_ARGS(module))
403 
404 /*
405  * Occurs after a new module is successfully created, before
406  * creating any of its interfaces.
407  */
408 DEFINE_MODULE_EVENT(gb_module_create);
409 
410 /*
411  * Occurs after the last reference to a module has been dropped.
412  */
413 DEFINE_MODULE_EVENT(gb_module_release);
414 
415 /*
416  * Occurs after a module is successfully created, before registering
417  * any of its interfaces.
418  */
419 DEFINE_MODULE_EVENT(gb_module_add);
420 
421 /*
422  * Occurs when a module is deleted, before deregistering its
423  * interfaces.
424  */
425 DEFINE_MODULE_EVENT(gb_module_del);
426 
427 #undef DEFINE_MODULE_EVENT
428 
429 DECLARE_EVENT_CLASS(gb_host_device,
430 
431 	TP_PROTO(struct gb_host_device *hd),
432 
433 	TP_ARGS(hd),
434 
435 	TP_STRUCT__entry(
436 		__field(int, bus_id)
437 		__field(size_t, num_cports)
438 		__field(size_t, buffer_size_max)
439 	),
440 
441 	TP_fast_assign(
442 		__entry->bus_id = hd->bus_id;
443 		__entry->num_cports = hd->num_cports;
444 		__entry->buffer_size_max = hd->buffer_size_max;
445 	),
446 
447 	TP_printk("bus_id=%d num_cports=%zu mtu=%zu",
448 		__entry->bus_id, __entry->num_cports,
449 		__entry->buffer_size_max)
450 );
451 
452 #define DEFINE_HD_EVENT(name)						\
453 		DEFINE_EVENT(gb_host_device, name,			\
454 				TP_PROTO(struct gb_host_device *hd),	\
455 				TP_ARGS(hd))
456 
457 /*
458  * Occurs after a new host device is successfully created, before
459  * its SVC has been set up.
460  */
461 DEFINE_HD_EVENT(gb_hd_create);
462 
463 /*
464  * Occurs after the last reference to a host device has been
465  * dropped.
466  */
467 DEFINE_HD_EVENT(gb_hd_release);
468 
469 /*
470  * Occurs after a new host device has been added, after the
471  * connection to its SVC has been enabled.
472  */
473 DEFINE_HD_EVENT(gb_hd_add);
474 
475 /*
476  * Occurs when a host device is being disconnected from the AP USB
477  * host controller.
478  */
479 DEFINE_HD_EVENT(gb_hd_del);
480 
481 /*
482  * Occurs when a host device has passed received data to the Greybus
483  * core, after it has been determined it is destined for a valid
484  * CPort.
485  */
486 DEFINE_HD_EVENT(gb_hd_in);
487 
488 #undef DEFINE_HD_EVENT
489 
490 #endif /* _TRACE_GREYBUS_H */
491 
492 /* This part must be outside protection */
493 #undef TRACE_INCLUDE_PATH
494 #define TRACE_INCLUDE_PATH .
495 
496 /*
497  * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
498  */
499 #undef TRACE_INCLUDE_FILE
500 #define TRACE_INCLUDE_FILE greybus_trace
501 #include <trace/define_trace.h>
502 
503