• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Greybus driver and device API
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 #undef TRACE_SYSTEM
10 #define TRACE_SYSTEM greybus
11 
12 #if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ)
13 #define _TRACE_GREYBUS_H
14 
15 #include <linux/tracepoint.h>
16 
17 struct gb_message;
18 struct gb_operation;
19 struct gb_connection;
20 struct gb_bundle;
21 struct gb_host_device;
22 
23 DECLARE_EVENT_CLASS(gb_message,
24 
25 	TP_PROTO(struct gb_message *message),
26 
27 	TP_ARGS(message),
28 
29 	TP_STRUCT__entry(
30 		__field(u16, size)
31 		__field(u16, operation_id)
32 		__field(u8, type)
33 		__field(u8, result)
34 	),
35 
36 	TP_fast_assign(
37 		__entry->size = le16_to_cpu(message->header->size);
38 		__entry->operation_id =
39 			le16_to_cpu(message->header->operation_id);
40 		__entry->type = message->header->type;
41 		__entry->result = message->header->result;
42 	),
43 
44 	TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
45 		  __entry->size, __entry->operation_id,
46 		  __entry->type, __entry->result)
47 );
48 
49 #define DEFINE_MESSAGE_EVENT(name)					\
50 		DEFINE_EVENT(gb_message, name,				\
51 				TP_PROTO(struct gb_message *message),	\
52 				TP_ARGS(message))
53 
54 /*
55  * Occurs immediately before calling a host device's message_send()
56  * method.
57  */
58 DEFINE_MESSAGE_EVENT(gb_message_send);
59 
60 /*
61  * Occurs after an incoming request message has been received
62  */
63 DEFINE_MESSAGE_EVENT(gb_message_recv_request);
64 
65 /*
66  * Occurs after an incoming response message has been received,
67  * after its matching request has been found.
68  */
69 DEFINE_MESSAGE_EVENT(gb_message_recv_response);
70 
71 /*
72  * Occurs after an operation has been canceled, possibly before the
73  * cancellation is complete.
74  */
75 DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing);
76 
77 /*
78  * Occurs when an incoming request is cancelled; if the response has
79  * been queued for sending, this occurs after it is sent.
80  */
81 DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming);
82 
83 /*
84  * Occurs in the host driver message_send() function just prior to
85  * handing off the data to be processed by hardware.
86  */
87 DEFINE_MESSAGE_EVENT(gb_message_submit);
88 
89 #undef DEFINE_MESSAGE_EVENT
90 
91 DECLARE_EVENT_CLASS(gb_operation,
92 
93 	TP_PROTO(struct gb_operation *operation),
94 
95 	TP_ARGS(operation),
96 
97 	TP_STRUCT__entry(
98 		__field(u16, cport_id)	/* CPort of HD side of connection */
99 		__field(u16, id)	/* Operation ID */
100 		__field(u8, type)
101 		__field(unsigned long, flags)
102 		__field(int, active)
103 		__field(int, waiters)
104 		__field(int, errno)
105 	),
106 
107 	TP_fast_assign(
108 		__entry->cport_id = operation->connection->hd_cport_id;
109 		__entry->id = operation->id;
110 		__entry->type = operation->type;
111 		__entry->flags = operation->flags;
112 		__entry->active = operation->active;
113 		__entry->waiters = atomic_read(&operation->waiters);
114 		__entry->errno = operation->errno;
115 	),
116 
117 	TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d",
118 		  __entry->id, __entry->cport_id, __entry->type, __entry->flags,
119 		  __entry->active, __entry->waiters, __entry->errno)
120 );
121 
122 #define DEFINE_OPERATION_EVENT(name)					\
123 		DEFINE_EVENT(gb_operation, name,			\
124 				TP_PROTO(struct gb_operation *operation), \
125 				TP_ARGS(operation))
126 
127 /*
128  * Occurs after a new operation is created for an outgoing request
129  * has been successfully created.
130  */
131 DEFINE_OPERATION_EVENT(gb_operation_create);
132 
133 /*
134  * Occurs after a new core operation has been created.
135  */
136 DEFINE_OPERATION_EVENT(gb_operation_create_core);
137 
138 /*
139  * Occurs after a new operation has been created for an incoming
140  * request has been successfully created and initialized.
141  */
142 DEFINE_OPERATION_EVENT(gb_operation_create_incoming);
143 
144 /*
145  * Occurs when the last reference to an operation has been dropped,
146  * prior to freeing resources.
147  */
148 DEFINE_OPERATION_EVENT(gb_operation_destroy);
149 
150 /*
151  * Occurs when an operation has been marked active, after updating
152  * its active count.
153  */
154 DEFINE_OPERATION_EVENT(gb_operation_get_active);
155 
156 /*
157  * Occurs when an operation has been marked active, before updating
158  * its active count.
159  */
160 DEFINE_OPERATION_EVENT(gb_operation_put_active);
161 
162 #undef DEFINE_OPERATION_EVENT
163 
164 DECLARE_EVENT_CLASS(gb_connection,
165 
166 	TP_PROTO(struct gb_connection *connection),
167 
168 	TP_ARGS(connection),
169 
170 	TP_STRUCT__entry(
171 		__field(int, hd_bus_id)
172 		__field(u8, bundle_id)
173 		/* name contains "hd_cport_id/intf_id:cport_id" */
174 		__dynamic_array(char, name, sizeof(connection->name))
175 		__field(enum gb_connection_state, state)
176 		__field(unsigned long, flags)
177 	),
178 
179 	TP_fast_assign(
180 		__entry->hd_bus_id = connection->hd->bus_id;
181 		__entry->bundle_id = connection->bundle ?
182 				connection->bundle->id : BUNDLE_ID_NONE;
183 		memcpy(__get_str(name), connection->name,
184 					sizeof(connection->name));
185 		__entry->state = connection->state;
186 		__entry->flags = connection->flags;
187 	),
188 
189 	TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx",
190 		  __entry->hd_bus_id, __entry->bundle_id, __get_str(name),
191 		  (unsigned int)__entry->state, __entry->flags)
192 );
193 
194 #define DEFINE_CONNECTION_EVENT(name)					\
195 		DEFINE_EVENT(gb_connection, name,			\
196 				TP_PROTO(struct gb_connection *connection), \
197 				TP_ARGS(connection))
198 
199 /*
200  * Occurs after a new connection is successfully created.
201  */
202 DEFINE_CONNECTION_EVENT(gb_connection_create);
203 
204 /*
205  * Occurs when the last reference to a connection has been dropped,
206  * before its resources are freed.
207  */
208 DEFINE_CONNECTION_EVENT(gb_connection_release);
209 
210 /*
211  * Occurs when a new reference to connection is added, currently
212  * only when a message over the connection is received.
213  */
214 DEFINE_CONNECTION_EVENT(gb_connection_get);
215 
216 /*
217  * Occurs when a new reference to connection is dropped, after a
218  * a received message is handled, or when the connection is
219  * destroyed.
220  */
221 DEFINE_CONNECTION_EVENT(gb_connection_put);
222 
223 /*
224  * Occurs when a request to enable a connection is made, either for
225  * transmit only, or for both transmit and receive.
226  */
227 DEFINE_CONNECTION_EVENT(gb_connection_enable);
228 
229 /*
230  * Occurs when a request to disable a connection is made, either for
231  * receive only, or for both transmit and receive.  Also occurs when
232  * a request to forcefully disable a connection is made.
233  */
234 DEFINE_CONNECTION_EVENT(gb_connection_disable);
235 
236 #undef DEFINE_CONNECTION_EVENT
237 
238 DECLARE_EVENT_CLASS(gb_bundle,
239 
240 	TP_PROTO(struct gb_bundle *bundle),
241 
242 	TP_ARGS(bundle),
243 
244 	TP_STRUCT__entry(
245 		__field(u8, intf_id)
246 		__field(u8, id)
247 		__field(u8, class)
248 		__field(size_t, num_cports)
249 	),
250 
251 	TP_fast_assign(
252 		__entry->intf_id = bundle->intf->interface_id;
253 		__entry->id = bundle->id;
254 		__entry->class = bundle->class;
255 		__entry->num_cports = bundle->num_cports;
256 	),
257 
258 	TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu",
259 		  __entry->intf_id, __entry->id, __entry->class,
260 		  __entry->num_cports)
261 );
262 
263 #define DEFINE_BUNDLE_EVENT(name)					\
264 		DEFINE_EVENT(gb_bundle, name,			\
265 				TP_PROTO(struct gb_bundle *bundle), \
266 				TP_ARGS(bundle))
267 
268 /*
269  * Occurs after a new bundle is successfully created.
270  */
271 DEFINE_BUNDLE_EVENT(gb_bundle_create);
272 
273 /*
274  * Occurs when the last reference to a bundle has been dropped,
275  * before its resources are freed.
276  */
277 DEFINE_BUNDLE_EVENT(gb_bundle_release);
278 
279 /*
280  * Occurs when a bundle is added to an interface when the interface
281  * is enabled.
282  */
283 DEFINE_BUNDLE_EVENT(gb_bundle_add);
284 
285 /*
286  * Occurs when a registered bundle gets destroyed, normally at the
287  * time an interface is disabled.
288  */
289 DEFINE_BUNDLE_EVENT(gb_bundle_destroy);
290 
291 #undef DEFINE_BUNDLE_EVENT
292 
293 DECLARE_EVENT_CLASS(gb_interface,
294 
295 	TP_PROTO(struct gb_interface *intf),
296 
297 	TP_ARGS(intf),
298 
299 	TP_STRUCT__entry(
300 		__field(u8, module_id)
301 		__field(u8, id)		/* Interface id */
302 		__field(u8, device_id)
303 		__field(int, disconnected)	/* bool */
304 		__field(int, ejected)		/* bool */
305 		__field(int, active)		/* bool */
306 		__field(int, enabled)		/* bool */
307 		__field(int, mode_switch)	/* bool */
308 	),
309 
310 	TP_fast_assign(
311 		__entry->module_id = intf->module->module_id;
312 		__entry->id = intf->interface_id;
313 		__entry->device_id = intf->device_id;
314 		__entry->disconnected = intf->disconnected;
315 		__entry->ejected = intf->ejected;
316 		__entry->active = intf->active;
317 		__entry->enabled = intf->enabled;
318 		__entry->mode_switch = intf->mode_switch;
319 	),
320 
321 	TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
322 		__entry->id, __entry->device_id, __entry->module_id,
323 		__entry->disconnected, __entry->ejected, __entry->active,
324 		__entry->enabled, __entry->mode_switch)
325 );
326 
327 #define DEFINE_INTERFACE_EVENT(name)					\
328 		DEFINE_EVENT(gb_interface, name,			\
329 				TP_PROTO(struct gb_interface *intf),	\
330 				TP_ARGS(intf))
331 
332 /*
333  * Occurs after a new interface is successfully created.
334  */
335 DEFINE_INTERFACE_EVENT(gb_interface_create);
336 
337 /*
338  * Occurs after the last reference to an interface has been dropped.
339  */
340 DEFINE_INTERFACE_EVENT(gb_interface_release);
341 
342 /*
343  * Occurs after an interface been registerd.
344  */
345 DEFINE_INTERFACE_EVENT(gb_interface_add);
346 
347 /*
348  * Occurs when a registered interface gets deregisterd.
349  */
350 DEFINE_INTERFACE_EVENT(gb_interface_del);
351 
352 /*
353  * Occurs when a registered interface has been successfully
354  * activated.
355  */
356 DEFINE_INTERFACE_EVENT(gb_interface_activate);
357 
358 /*
359  * Occurs when an activated interface is being deactivated.
360  */
361 DEFINE_INTERFACE_EVENT(gb_interface_deactivate);
362 
363 /*
364  * Occurs when an interface has been successfully enabled.
365  */
366 DEFINE_INTERFACE_EVENT(gb_interface_enable);
367 
368 /*
369  * Occurs when an enabled interface is being disabled.
370  */
371 DEFINE_INTERFACE_EVENT(gb_interface_disable);
372 
373 #undef DEFINE_INTERFACE_EVENT
374 
375 DECLARE_EVENT_CLASS(gb_module,
376 
377 	TP_PROTO(struct gb_module *module),
378 
379 	TP_ARGS(module),
380 
381 	TP_STRUCT__entry(
382 		__field(int, hd_bus_id)
383 		__field(u8, module_id)
384 		__field(size_t, num_interfaces)
385 		__field(int, disconnected)	/* bool */
386 	),
387 
388 	TP_fast_assign(
389 		__entry->hd_bus_id = module->hd->bus_id;
390 		__entry->module_id = module->module_id;
391 		__entry->num_interfaces = module->num_interfaces;
392 		__entry->disconnected = module->disconnected;
393 	),
394 
395 	TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
396 		__entry->hd_bus_id, __entry->module_id,
397 		__entry->num_interfaces, __entry->disconnected)
398 );
399 
400 #define DEFINE_MODULE_EVENT(name)					\
401 		DEFINE_EVENT(gb_module, name,				\
402 				TP_PROTO(struct gb_module *module),	\
403 				TP_ARGS(module))
404 
405 /*
406  * Occurs after a new module is successfully created, before
407  * creating any of its interfaces.
408  */
409 DEFINE_MODULE_EVENT(gb_module_create);
410 
411 /*
412  * Occurs after the last reference to a module has been dropped.
413  */
414 DEFINE_MODULE_EVENT(gb_module_release);
415 
416 /*
417  * Occurs after a module is successfully created, before registering
418  * any of its interfaces.
419  */
420 DEFINE_MODULE_EVENT(gb_module_add);
421 
422 /*
423  * Occurs when a module is deleted, before deregistering its
424  * interfaces.
425  */
426 DEFINE_MODULE_EVENT(gb_module_del);
427 
428 #undef DEFINE_MODULE_EVENT
429 
430 DECLARE_EVENT_CLASS(gb_host_device,
431 
432 	TP_PROTO(struct gb_host_device *hd),
433 
434 	TP_ARGS(hd),
435 
436 	TP_STRUCT__entry(
437 		__field(int, bus_id)
438 		__field(size_t, num_cports)
439 		__field(size_t, buffer_size_max)
440 	),
441 
442 	TP_fast_assign(
443 		__entry->bus_id = hd->bus_id;
444 		__entry->num_cports = hd->num_cports;
445 		__entry->buffer_size_max = hd->buffer_size_max;
446 	),
447 
448 	TP_printk("bus_id=%d num_cports=%zu mtu=%zu",
449 		__entry->bus_id, __entry->num_cports,
450 		__entry->buffer_size_max)
451 );
452 
453 #define DEFINE_HD_EVENT(name)						\
454 		DEFINE_EVENT(gb_host_device, name,			\
455 				TP_PROTO(struct gb_host_device *hd),	\
456 				TP_ARGS(hd))
457 
458 /*
459  * Occurs after a new host device is successfully created, before
460  * its SVC has been set up.
461  */
462 DEFINE_HD_EVENT(gb_hd_create);
463 
464 /*
465  * Occurs after the last reference to a host device has been
466  * dropped.
467  */
468 DEFINE_HD_EVENT(gb_hd_release);
469 
470 /*
471  * Occurs after a new host device has been added, after the
472  * connection to its SVC has been enabled.
473  */
474 DEFINE_HD_EVENT(gb_hd_add);
475 
476 /*
477  * Occurs when a host device is being disconnected from the AP USB
478  * host controller.
479  */
480 DEFINE_HD_EVENT(gb_hd_del);
481 
482 /*
483  * Occurs when a host device has passed received data to the Greybus
484  * core, after it has been determined it is destined for a valid
485  * CPort.
486  */
487 DEFINE_HD_EVENT(gb_hd_in);
488 
489 #undef DEFINE_HD_EVENT
490 
491 #endif /* _TRACE_GREYBUS_H */
492 
493 /* This part must be outside protection */
494 #undef TRACE_INCLUDE_PATH
495 #define TRACE_INCLUDE_PATH .
496 
497 /*
498  * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
499  */
500 #undef TRACE_INCLUDE_FILE
501 #define TRACE_INCLUDE_FILE greybus_trace
502 #include <trace/define_trace.h>
503 
504