Home
last modified time | relevance | path

Searched refs:event (Results 1 – 14 of 14) sorted by relevance

/kernel/trace/
Dring_buffer.c137 rb_event_length(struct ring_buffer_event *event) in rb_event_length() argument
141 switch (event->type) { in rb_event_length()
153 if (event->len) in rb_event_length()
154 length = event->len << RB_ALIGNMENT_SHIFT; in rb_event_length()
156 length = event->array[0]; in rb_event_length()
169 unsigned ring_buffer_event_length(struct ring_buffer_event *event) in ring_buffer_event_length() argument
171 unsigned length = rb_event_length(event); in ring_buffer_event_length()
172 if (event->type != RINGBUF_TYPE_DATA) in ring_buffer_event_length()
175 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length()
176 length -= sizeof(event->array[0]); in ring_buffer_event_length()
[all …]
Dtrace_boot.c129 struct ring_buffer_event *event; in trace_boot_call() local
143 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in trace_boot_call()
145 if (!event) in trace_boot_call()
147 entry = ring_buffer_event_data(event); in trace_boot_call()
151 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); in trace_boot_call()
161 struct ring_buffer_event *event; in trace_boot_ret() local
172 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in trace_boot_ret()
174 if (!event) in trace_boot_ret()
176 entry = ring_buffer_event_data(event); in trace_boot_ret()
180 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); in trace_boot_ret()
Dtrace_power.c114 struct ring_buffer_event *event; in trace_power_end() local
127 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in trace_power_end()
129 if (!event) in trace_power_end()
131 entry = ring_buffer_event_data(event); in trace_power_end()
135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); in trace_power_end()
147 struct ring_buffer_event *event; in trace_power_mark() local
164 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in trace_power_mark()
166 if (!event) in trace_power_mark()
168 entry = ring_buffer_event_data(event); in trace_power_mark()
172 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); in trace_power_mark()
Dtrace_mmiotrace.c309 struct ring_buffer_event *event; in __trace_mmiotrace_rw() local
313 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in __trace_mmiotrace_rw()
315 if (!event) { in __trace_mmiotrace_rw()
319 entry = ring_buffer_event_data(event); in __trace_mmiotrace_rw()
323 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); in __trace_mmiotrace_rw()
339 struct ring_buffer_event *event; in __trace_mmiotrace_map() local
343 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in __trace_mmiotrace_map()
345 if (!event) { in __trace_mmiotrace_map()
349 entry = ring_buffer_event_data(event); in __trace_mmiotrace_map()
353 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); in __trace_mmiotrace_map()
Dtrace.c884 struct ring_buffer_event *event; local
892 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
894 if (!event)
896 entry = ring_buffer_event_data(event);
901 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
911 struct ring_buffer_event *event; local
918 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
920 if (!event)
922 entry = ring_buffer_event_data(event);
926 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
[all …]
Dtrace_hw_branches.c108 struct ring_buffer_event *event; in trace_hw_branch() local
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); in trace_hw_branch()
113 if (!event) in trace_hw_branch()
115 entry = ring_buffer_event_data(event); in trace_hw_branch()
121 ring_buffer_unlock_commit(tr->buffer, event, irq); in trace_hw_branch()
Dtrace_branch.c29 struct ring_buffer_event *event; in probe_likely_condition() local
50 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in probe_likely_condition()
52 if (!event) in probe_likely_condition()
56 entry = ring_buffer_event_data(event); in probe_likely_condition()
73 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); in probe_likely_condition()
Dtrace_functions_graph.c209 struct ring_buffer_event *event; in trace_branch_is_leaf() local
217 event = ring_buffer_iter_peek(ring_iter, NULL); in trace_branch_is_leaf()
219 if (!event) in trace_branch_is_leaf()
222 next = ring_buffer_event_data(event); in trace_branch_is_leaf()
346 struct ring_buffer_event *event; in print_graph_entry_leaf() local
352 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); in print_graph_entry_leaf()
353 ret_entry = ring_buffer_event_data(event); in print_graph_entry_leaf()
Dtrace_selftest.c24 struct ring_buffer_event *event; in trace_test_buffer_cpu() local
28 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { in trace_test_buffer_cpu()
29 entry = ring_buffer_event_data(event); in trace_test_buffer_cpu()
/kernel/
Drtmutex-tester.c31 int event; member
67 td->event = atomic_add_return(1, &rttest_event); in handle_op()
102 td->event = atomic_add_return(1, &rttest_event); in handle_op()
104 td->event = atomic_add_return(1, &rttest_event); in handle_op()
115 td->event = atomic_add_return(1, &rttest_event); in handle_op()
117 td->event = atomic_add_return(1, &rttest_event); in handle_op()
126 td->event = atomic_add_return(1, &rttest_event); in handle_op()
128 td->event = atomic_add_return(1, &rttest_event); in handle_op()
190 td->event = atomic_add_return(1, &rttest_event); in schedule_rt_mutex_test()
211 td->event = atomic_add_return(1, &rttest_event); in schedule_rt_mutex_test()
[all …]
Dposix-timers.c420 static struct pid *good_sigevent(sigevent_t * event) in good_sigevent() argument
424 if ((event->sigev_notify & SIGEV_THREAD_ID ) && in good_sigevent()
425 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || in good_sigevent()
427 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) in good_sigevent()
430 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && in good_sigevent()
431 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) in good_sigevent()
486 sigevent_t event; in SYSCALL_DEFINE3() local
534 if (copy_from_user(&event, timer_event_spec, sizeof (event))) { in SYSCALL_DEFINE3()
539 new_timer->it_pid = get_pid(good_sigevent(&event)); in SYSCALL_DEFINE3()
546 event.sigev_notify = SIGEV_SIGNAL; in SYSCALL_DEFINE3()
[all …]
Dcompat.c545 struct sigevent __user *event = NULL; in compat_sys_timer_create() local
550 event = compat_alloc_user_space(sizeof(*event)); in compat_sys_timer_create()
552 copy_to_user(event, &kevent, sizeof(*event))) in compat_sys_timer_create()
556 return sys_timer_create(which_clock, event, created_timer_id); in compat_sys_timer_create()
712 int get_compat_sigevent(struct sigevent *event, in get_compat_sigevent() argument
715 memset(event, 0, sizeof(*event)); in get_compat_sigevent()
717 __get_user(event->sigev_value.sival_int, in get_compat_sigevent()
719 __get_user(event->sigev_signo, &u_event->sigev_signo) || in get_compat_sigevent()
720 __get_user(event->sigev_notify, &u_event->sigev_notify) || in get_compat_sigevent()
721 __get_user(event->sigev_notify_thread_id, in get_compat_sigevent()
Dsoftlockup.c48 softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) in softlock_panic() argument
/kernel/power/
DKconfig44 This enables code to save the last PM event point across
57 bool "Suspend/resume event tracing"
63 This enables some cheesy code to save the last PM event point in the