Lines Matching refs:entry
860 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, argument
865 entry->preempt_count = pc & 0xff;
866 entry->pid = (tsk) ? tsk->pid : 0;
867 entry->tgid = (tsk) ? tsk->tgid : 0;
868 entry->flags =
885 struct ftrace_entry *entry; local
892 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
896 entry = ring_buffer_event_data(event);
897 tracing_generic_entry_update(&entry->ent, flags, pc);
898 entry->ent.type = TRACE_FN;
899 entry->ip = ip;
900 entry->parent_ip = parent_ip;
912 struct ftrace_graph_ent_entry *entry; local
918 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
922 entry = ring_buffer_event_data(event);
923 tracing_generic_entry_update(&entry->ent, flags, pc);
924 entry->ent.type = TRACE_GRAPH_ENT;
925 entry->graph_ent = *trace;
936 struct ftrace_graph_ret_entry *entry; local
942 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
946 entry = ring_buffer_event_data(event);
947 tracing_generic_entry_update(&entry->ent, flags, pc);
948 entry->ent.type = TRACE_GRAPH_RET;
949 entry->ret = *trace;
970 struct stack_entry *entry; local
977 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
981 entry = ring_buffer_event_data(event);
982 tracing_generic_entry_update(&entry->ent, flags, pc);
983 entry->ent.type = TRACE_STACK;
985 memset(&entry->caller, 0, sizeof(entry->caller));
990 trace.entries = entry->caller;
1011 struct userstack_entry *entry; local
1018 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1022 entry = ring_buffer_event_data(event);
1023 tracing_generic_entry_update(&entry->ent, flags, pc);
1024 entry->ent.type = TRACE_USER_STACK;
1026 memset(&entry->caller, 0, sizeof(entry->caller));
1031 trace.entries = entry->caller;
1053 struct special_entry *entry; local
1056 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1060 entry = ring_buffer_event_data(event);
1061 tracing_generic_entry_update(&entry->ent, 0, pc);
1062 entry->ent.type = TRACE_SPECIAL;
1063 entry->arg1 = arg1;
1064 entry->arg2 = arg2;
1065 entry->arg3 = arg3;
1088 struct ctx_switch_entry *entry; local
1091 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1095 entry = ring_buffer_event_data(event);
1096 tracing_generic_entry_update(&entry->ent, flags, pc);
1097 entry->ent.type = TRACE_CTX;
1098 entry->prev_pid = prev->pid;
1099 entry->prev_prio = prev->prio;
1100 entry->prev_state = prev->state;
1101 entry->next_pid = next->pid;
1102 entry->next_prio = next->prio;
1103 entry->next_state = next->state;
1104 entry->next_cpu = task_cpu(next);
1118 struct ctx_switch_entry *entry; local
1121 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1125 entry = ring_buffer_event_data(event);
1126 tracing_generic_entry_update(&entry->ent, flags, pc);
1127 entry->ent.type = TRACE_WAKE;
1128 entry->prev_pid = curr->pid;
1129 entry->prev_prio = curr->prio;
1130 entry->prev_state = curr->state;
1131 entry->next_pid = wakee->pid;
1132 entry->next_prio = wakee->prio;
1133 entry->next_state = wakee->state;
1134 entry->next_cpu = task_cpu(wakee);
1580 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, argument
1594 task = find_task_by_vpid(entry->ent.tgid);
1601 unsigned long ip = entry->caller[i];
1708 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) argument
1713 comm = trace_find_cmdline(entry->pid);
1715 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1718 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1719 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
1720 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1722 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1723 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1737 if (entry->preempt_count)
1738 trace_seq_printf(s, "%x", entry->preempt_count);
1828 struct trace_entry *entry = iter->ent; local
1836 if (entry->type == TRACE_CONT)
1848 comm = trace_find_cmdline(entry->pid);
1852 entry->pid, cpu, entry->flags,
1853 entry->preempt_count, trace_idx,
1859 lat_print_generic(s, entry, cpu);
1862 switch (entry->type) {
1866 trace_assign_type(field, entry);
1878 trace_assign_type(field, entry);
1886 S, entry->type == TRACE_CTX ? "==>" : " +",
1896 trace_assign_type(field, entry);
1907 trace_assign_type(field, entry);
1920 trace_assign_type(field, entry);
1924 if (entry->flags & TRACE_FLAG_CONT)
1931 trace_assign_type(field, entry);
1943 trace_assign_type(field, entry);
1950 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1959 struct trace_entry *entry; local
1968 entry = iter->ent;
1970 if (entry->type == TRACE_CONT)
1981 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1991 switch (entry->type) {
1995 trace_assign_type(field, entry);
2020 trace_assign_type(field, entry);
2028 entry->type == TRACE_CTX ? "==>" : " +",
2040 trace_assign_type(field, entry);
2053 trace_assign_type(field, entry);
2074 trace_assign_type(field, entry);
2078 if (entry->flags & TRACE_FLAG_CONT)
2091 trace_assign_type(field, entry);
2103 trace_assign_type(field, entry);
2120 struct trace_entry *entry; local
2124 entry = iter->ent;
2126 if (entry->type == TRACE_CONT)
2130 entry->pid, iter->cpu, iter->ts);
2134 switch (entry->type) {
2138 trace_assign_type(field, entry);
2151 trace_assign_type(field, entry);
2154 S = entry->type == TRACE_WAKE ? '+' :
2173 trace_assign_type(field, entry);
2186 trace_assign_type(field, entry);
2189 if (entry->flags & TRACE_FLAG_CONT)
2214 struct trace_entry *entry; local
2217 entry = iter->ent;
2219 if (entry->type == TRACE_CONT)
2222 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2226 switch (entry->type) {
2230 trace_assign_type(field, entry);
2240 trace_assign_type(field, entry);
2243 S = entry->type == TRACE_WAKE ? '+' :
2259 trace_assign_type(field, entry);
2275 struct trace_entry *entry = iter->ent; local
2279 trace_assign_type(field, entry);
2285 if (entry->flags & TRACE_FLAG_CONT)
2294 struct trace_entry *entry; local
2296 entry = iter->ent;
2298 if (entry->type == TRACE_CONT)
2301 SEQ_PUT_FIELD_RET(s, entry->pid);
2302 SEQ_PUT_FIELD_RET(s, entry->cpu);
2305 switch (entry->type) {
2309 trace_assign_type(field, entry);
2318 trace_assign_type(field, entry);
2333 trace_assign_type(field, entry);
3537 struct dentry *entry; local
3541 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
3543 if (!entry)
3546 entry = debugfs_create_file("trace_options", 0644, d_tracer,
3548 if (!entry)
3551 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
3553 if (!entry)
3556 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
3558 if (!entry)
3561 entry = debugfs_create_file("trace", 0444, d_tracer,
3563 if (!entry)
3566 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
3568 if (!entry)
3571 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
3573 if (!entry)
3576 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
3579 if (!entry)
3583 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
3585 if (!entry)
3588 entry = debugfs_create_file("README", 0644, d_tracer,
3590 if (!entry)
3593 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
3595 if (!entry)
3599 entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
3601 if (!entry)
3605 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
3607 if (!entry)
3612 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
3615 if (!entry)
3634 struct print_entry *entry; local
3655 size = sizeof(*entry) + len + 1;
3659 entry = ring_buffer_event_data(event);
3660 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
3661 entry->ent.type = TRACE_PRINT;
3662 entry->ip = ip;
3663 entry->depth = depth;
3665 memcpy(&entry->buf, trace_buf, len);
3666 entry->buf[len] = 0;