Lines Matching refs:tr
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) in dummy_set_flag() argument
176 int tracing_set_tracer(struct trace_array *tr, const char *buf);
177 static void ftrace_trace_userstack(struct trace_array *tr,
437 struct trace_array *tr; in trace_array_get() local
441 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_get()
442 if (tr == this_tr) { in trace_array_get()
443 tr->ref++; in trace_array_get()
478 int tracing_check_open_get_tr(struct trace_array *tr) in tracing_check_open_get_tr() argument
489 if (tr && trace_array_get(tr) < 0) in tracing_check_open_get_tr()
914 static inline void ftrace_trace_stack(struct trace_array *tr,
925 static inline void ftrace_trace_stack(struct trace_array *tr, in ftrace_trace_stack() argument
958 void tracer_tracing_on(struct trace_array *tr) in tracer_tracing_on() argument
960 if (tr->array_buffer.buffer) in tracer_tracing_on()
961 ring_buffer_record_on(tr->array_buffer.buffer); in tracer_tracing_on()
970 tr->buffer_disabled = 0; in tracer_tracing_on()
1105 static void tracing_snapshot_instance_cond(struct trace_array *tr, in tracing_snapshot_instance_cond() argument
1108 struct tracer *tracer = tr->current_trace; in tracing_snapshot_instance_cond()
1117 if (!tr->allocated_snapshot) { in tracing_snapshot_instance_cond()
1132 update_max_tr(tr, current, smp_processor_id(), cond_data); in tracing_snapshot_instance_cond()
1136 void tracing_snapshot_instance(struct trace_array *tr) in tracing_snapshot_instance() argument
1138 tracing_snapshot_instance_cond(tr, NULL); in tracing_snapshot_instance()
1157 struct trace_array *tr = &global_trace; in tracing_snapshot() local
1159 tracing_snapshot_instance(tr); in tracing_snapshot()
1176 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) in tracing_snapshot_cond() argument
1178 tracing_snapshot_instance_cond(tr, cond_data); in tracing_snapshot_cond()
1196 void *tracing_cond_snapshot_data(struct trace_array *tr) in tracing_cond_snapshot_data() argument
1201 arch_spin_lock(&tr->max_lock); in tracing_cond_snapshot_data()
1203 if (tr->cond_snapshot) in tracing_cond_snapshot_data()
1204 cond_data = tr->cond_snapshot->cond_data; in tracing_cond_snapshot_data()
1206 arch_spin_unlock(&tr->max_lock); in tracing_cond_snapshot_data()
1217 int tracing_alloc_snapshot_instance(struct trace_array *tr) in tracing_alloc_snapshot_instance() argument
1221 if (!tr->allocated_snapshot) { in tracing_alloc_snapshot_instance()
1224 ret = resize_buffer_duplicate_size(&tr->max_buffer, in tracing_alloc_snapshot_instance()
1225 &tr->array_buffer, RING_BUFFER_ALL_CPUS); in tracing_alloc_snapshot_instance()
1229 tr->allocated_snapshot = true; in tracing_alloc_snapshot_instance()
1235 static void free_snapshot(struct trace_array *tr) in free_snapshot() argument
1242 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); in free_snapshot()
1243 set_buffer_entries(&tr->max_buffer, 1); in free_snapshot()
1244 tracing_reset_online_cpus(&tr->max_buffer); in free_snapshot()
1245 tr->allocated_snapshot = false; in free_snapshot()
1260 struct trace_array *tr = &global_trace; in tracing_alloc_snapshot() local
1263 ret = tracing_alloc_snapshot_instance(tr); in tracing_alloc_snapshot()
1306 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, in tracing_snapshot_cond_enable() argument
1321 ret = tracing_alloc_snapshot_instance(tr); in tracing_snapshot_cond_enable()
1325 if (tr->current_trace->use_max_tr) { in tracing_snapshot_cond_enable()
1338 if (tr->cond_snapshot) { in tracing_snapshot_cond_enable()
1344 arch_spin_lock(&tr->max_lock); in tracing_snapshot_cond_enable()
1345 tr->cond_snapshot = cond_snapshot; in tracing_snapshot_cond_enable()
1346 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_cond_enable()
1370 int tracing_snapshot_cond_disable(struct trace_array *tr) in tracing_snapshot_cond_disable() argument
1375 arch_spin_lock(&tr->max_lock); in tracing_snapshot_cond_disable()
1377 if (!tr->cond_snapshot) in tracing_snapshot_cond_disable()
1380 kfree(tr->cond_snapshot); in tracing_snapshot_cond_disable()
1381 tr->cond_snapshot = NULL; in tracing_snapshot_cond_disable()
1384 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_cond_disable()
1396 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) in tracing_snapshot_cond() argument
1413 void *tracing_cond_snapshot_data(struct trace_array *tr) in tracing_cond_snapshot_data() argument
1418 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) in tracing_snapshot_cond_enable() argument
1423 int tracing_snapshot_cond_disable(struct trace_array *tr) in tracing_snapshot_cond_disable() argument
1430 void tracer_tracing_off(struct trace_array *tr) in tracer_tracing_off() argument
1432 if (tr->array_buffer.buffer) in tracer_tracing_off()
1433 ring_buffer_record_off(tr->array_buffer.buffer); in tracer_tracing_off()
1442 tr->buffer_disabled = 1; in tracer_tracing_off()
1476 bool tracer_tracing_is_on(struct trace_array *tr) in tracer_tracing_is_on() argument
1478 if (tr->array_buffer.buffer) in tracer_tracing_is_on()
1479 return ring_buffer_record_is_on(tr->array_buffer.buffer); in tracer_tracing_is_on()
1480 return !tr->buffer_disabled; in tracer_tracing_is_on()
1560 bool trace_clock_in_ns(struct trace_array *tr) in trace_clock_in_ns() argument
1562 if (trace_clocks[tr->clock_id].in_ns) in trace_clock_in_ns()
1707 struct trace_array *tr = container_of(work, struct trace_array, in latency_fsnotify_workfn() local
1709 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); in latency_fsnotify_workfn()
1714 struct trace_array *tr = container_of(iwork, struct trace_array, in latency_fsnotify_workfn_irq() local
1716 queue_work(fsnotify_wq, &tr->fsnotify_work); in latency_fsnotify_workfn_irq()
1719 static void trace_create_maxlat_file(struct trace_array *tr, in trace_create_maxlat_file() argument
1722 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); in trace_create_maxlat_file()
1723 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); in trace_create_maxlat_file()
1724 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644, in trace_create_maxlat_file()
1725 d_tracer, &tr->max_latency, in trace_create_maxlat_file()
1742 void latency_fsnotify(struct trace_array *tr) in latency_fsnotify() argument
1751 irq_work_queue(&tr->fsnotify_irqwork); in latency_fsnotify()
1760 #define trace_create_maxlat_file(tr, d_tracer) \ argument
1762 &tr->max_latency, &tracing_max_lat_fops)
1773 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) in __update_max_tr() argument
1775 struct array_buffer *trace_buf = &tr->array_buffer; in __update_max_tr()
1776 struct array_buffer *max_buf = &tr->max_buffer; in __update_max_tr()
1783 max_data->saved_latency = tr->max_latency; in __update_max_tr()
1804 latency_fsnotify(tr); in __update_max_tr()
1818 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, in update_max_tr() argument
1821 if (tr->stop_count) in update_max_tr()
1826 if (!tr->allocated_snapshot) { in update_max_tr()
1828 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr()
1832 arch_spin_lock(&tr->max_lock); in update_max_tr()
1835 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) in update_max_tr()
1836 ring_buffer_record_on(tr->max_buffer.buffer); in update_max_tr()
1838 ring_buffer_record_off(tr->max_buffer.buffer); in update_max_tr()
1841 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) in update_max_tr()
1844 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); in update_max_tr()
1846 __update_max_tr(tr, tsk, cpu); in update_max_tr()
1849 arch_spin_unlock(&tr->max_lock); in update_max_tr()
1861 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) in update_max_tr_single() argument
1865 if (tr->stop_count) in update_max_tr_single()
1869 if (!tr->allocated_snapshot) { in update_max_tr_single()
1871 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr_single()
1875 arch_spin_lock(&tr->max_lock); in update_max_tr_single()
1877 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); in update_max_tr_single()
1886 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, in update_max_tr_single()
1892 __update_max_tr(tr, tsk, cpu); in update_max_tr_single()
1893 arch_spin_unlock(&tr->max_lock); in update_max_tr_single()
1932 struct trace_array *tr = &global_trace; in run_tracer_selftest() local
1933 struct tracer *saved_tracer = tr->current_trace; in run_tracer_selftest()
1954 tracing_reset_online_cpus(&tr->array_buffer); in run_tracer_selftest()
1956 tr->current_trace = type; in run_tracer_selftest()
1962 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, in run_tracer_selftest()
1964 tr->allocated_snapshot = true; in run_tracer_selftest()
1970 ret = type->selftest(type, tr); in run_tracer_selftest()
1972 tr->current_trace = saved_tracer; in run_tracer_selftest()
1980 tracing_reset_online_cpus(&tr->array_buffer); in run_tracer_selftest()
1984 tr->allocated_snapshot = false; in run_tracer_selftest()
1988 ring_buffer_resize(tr->max_buffer.buffer, 1, in run_tracer_selftest()
2050 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2183 struct trace_array *tr; in tracing_reset_all_online_cpus() local
2185 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in tracing_reset_all_online_cpus()
2186 if (!tr->clear_trace) in tracing_reset_all_online_cpus()
2188 tr->clear_trace = false; in tracing_reset_all_online_cpus()
2189 tracing_reset_online_cpus(&tr->array_buffer); in tracing_reset_all_online_cpus()
2191 tracing_reset_online_cpus(&tr->max_buffer); in tracing_reset_all_online_cpus()
2323 static void tracing_start_tr(struct trace_array *tr) in tracing_start_tr() argument
2332 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_start_tr()
2335 raw_spin_lock_irqsave(&tr->start_lock, flags); in tracing_start_tr()
2337 if (--tr->stop_count) { in tracing_start_tr()
2338 if (tr->stop_count < 0) { in tracing_start_tr()
2341 tr->stop_count = 0; in tracing_start_tr()
2346 buffer = tr->array_buffer.buffer; in tracing_start_tr()
2351 raw_spin_unlock_irqrestore(&tr->start_lock, flags); in tracing_start_tr()
2388 static void tracing_stop_tr(struct trace_array *tr) in tracing_stop_tr() argument
2394 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_stop_tr()
2397 raw_spin_lock_irqsave(&tr->start_lock, flags); in tracing_stop_tr()
2398 if (tr->stop_count++) in tracing_stop_tr()
2401 buffer = tr->array_buffer.buffer; in tracing_stop_tr()
2406 raw_spin_unlock_irqrestore(&tr->start_lock, flags); in tracing_stop_tr()
2770 *current_rb = trace_file->tr->array_buffer.buffer; in trace_event_buffer_lock_reserve()
2895 void trace_buffer_unlock_commit_regs(struct trace_array *tr, in trace_buffer_unlock_commit_regs() argument
2909 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); in trace_buffer_unlock_commit_regs()
2910 ftrace_trace_userstack(tr, buffer, flags, pc); in trace_buffer_unlock_commit_regs()
2924 trace_function(struct trace_array *tr, in trace_function() argument
2929 struct trace_buffer *buffer = tr->array_buffer.buffer; in trace_function()
3036 static inline void ftrace_trace_stack(struct trace_array *tr, in ftrace_trace_stack() argument
3041 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) in ftrace_trace_stack()
3047 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, in __trace_stack() argument
3050 struct trace_buffer *buffer = tr->array_buffer.buffer; in __trace_stack()
3097 ftrace_trace_userstack(struct trace_array *tr, in ftrace_trace_userstack() argument
3104 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) in ftrace_trace_userstack()
3143 static void ftrace_trace_userstack(struct trace_array *tr, in ftrace_trace_userstack() argument
3273 struct trace_array *tr = &global_trace; in trace_vbprintk() local
3301 buffer = tr->array_buffer.buffer; in trace_vbprintk()
3314 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); in trace_vbprintk()
3388 int trace_array_vprintk(struct trace_array *tr, in trace_array_vprintk() argument
3391 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); in trace_array_vprintk()
3415 int trace_array_printk(struct trace_array *tr, in trace_array_printk() argument
3421 if (!tr) in trace_array_printk()
3425 if (tr == &global_trace) in trace_array_printk()
3428 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) in trace_array_printk()
3432 ret = trace_array_vprintk(tr, ip, fmt, ap); in trace_array_printk()
3446 int trace_array_init_printk(struct trace_array *tr) in trace_array_init_printk() argument
3448 if (!tr) in trace_array_init_printk()
3452 if (tr == &global_trace) in trace_array_init_printk()
3706 struct trace_array *tr = iter->tr; in s_start() local
3719 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) in s_start()
3720 *iter->trace = *tr->current_trace; in s_start()
3813 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) in trace_total_entries_cpu() argument
3817 if (!tr) in trace_total_entries_cpu()
3818 tr = &global_trace; in trace_total_entries_cpu()
3820 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); in trace_total_entries_cpu()
3825 unsigned long trace_total_entries(struct trace_array *tr) in trace_total_entries() argument
3829 if (!tr) in trace_total_entries()
3830 tr = &global_trace; in trace_total_entries()
3832 get_total_entries(&tr->array_buffer, &total, &entries); in trace_total_entries()
3956 struct trace_array *tr = iter->tr; in test_cpu_buff_start() local
3958 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) in test_cpu_buff_start()
3982 struct trace_array *tr = iter->tr; in print_trace_fmt() local
3984 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); in print_trace_fmt()
3994 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_trace_fmt()
4014 struct trace_array *tr = iter->tr; in print_raw_fmt() local
4021 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) in print_raw_fmt()
4039 struct trace_array *tr = iter->tr; in print_hex_fmt() local
4047 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_hex_fmt()
4069 struct trace_array *tr = iter->tr; in print_bin_fmt() local
4076 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_bin_fmt()
4125 struct trace_array *tr = iter->tr; in print_trace_line() local
4126 unsigned long trace_flags = tr->trace_flags; in print_trace_line()
4176 struct trace_array *tr = iter->tr; in trace_latency_header() local
4185 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) in trace_latency_header()
4192 struct trace_array *tr = iter->tr; in trace_default_header() local
4193 unsigned long trace_flags = tr->trace_flags; in trace_default_header()
4253 if (iter->tr->allocated_snapshot) in print_snapshot_help()
4275 if (iter->tr) { in s_show()
4334 struct trace_array *tr = inode->i_private; in __tracing_open() local
4371 *iter->trace = *tr->current_trace; in __tracing_open()
4376 iter->tr = tr; in __tracing_open()
4380 if (tr->current_trace->print_max || snapshot) in __tracing_open()
4381 iter->array_buffer = &tr->max_buffer; in __tracing_open()
4384 iter->array_buffer = &tr->array_buffer; in __tracing_open()
4399 if (trace_clocks[tr->clock_id].in_ns) in __tracing_open()
4406 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) in __tracing_open()
4407 tracing_stop_tr(tr); in __tracing_open()
4467 struct trace_array *tr = inode->i_private; in tracing_open_generic_tr() local
4470 ret = tracing_check_open_get_tr(tr); in tracing_open_generic_tr()
4481 struct trace_array *tr = inode->i_private; in tracing_release() local
4487 trace_array_put(tr); in tracing_release()
4503 if (!iter->snapshot && tr->stop_count) in tracing_release()
4505 tracing_start_tr(tr); in tracing_release()
4507 __trace_array_put(tr); in tracing_release()
4523 struct trace_array *tr = inode->i_private; in tracing_release_generic_tr() local
4525 trace_array_put(tr); in tracing_release_generic_tr()
4531 struct trace_array *tr = inode->i_private; in tracing_single_release_tr() local
4533 trace_array_put(tr); in tracing_single_release_tr()
4540 struct trace_array *tr = inode->i_private; in tracing_open() local
4544 ret = tracing_check_open_get_tr(tr); in tracing_open()
4551 struct array_buffer *trace_buf = &tr->array_buffer; in tracing_open()
4554 if (tr->current_trace->print_max) in tracing_open()
4555 trace_buf = &tr->max_buffer; in tracing_open()
4568 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open()
4573 trace_array_put(tr); in tracing_open()
4584 trace_ok_for_array(struct tracer *t, struct trace_array *tr) in trace_ok_for_array() argument
4586 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; in trace_ok_for_array()
4591 get_tracer_for_array(struct trace_array *tr, struct tracer *t) in get_tracer_for_array() argument
4593 while (t && !trace_ok_for_array(t, tr)) in get_tracer_for_array()
4602 struct trace_array *tr = m->private; in t_next() local
4608 t = get_tracer_for_array(tr, t->next); in t_next()
4615 struct trace_array *tr = m->private; in t_start() local
4621 t = get_tracer_for_array(tr, trace_types); in t_start()
4658 struct trace_array *tr = inode->i_private; in show_traces_open() local
4662 ret = tracing_check_open_get_tr(tr); in show_traces_open()
4668 trace_array_put(tr); in show_traces_open()
4673 m->private = tr; in show_traces_open()
4680 struct trace_array *tr = inode->i_private; in show_traces_release() local
4682 trace_array_put(tr); in show_traces_release()
4726 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_read() local
4731 cpumask_pr_args(tr->tracing_cpumask)) + 1; in tracing_cpumask_read()
4737 cpumask_pr_args(tr->tracing_cpumask)); in tracing_cpumask_read()
4750 int tracing_set_cpumask(struct trace_array *tr, in tracing_set_cpumask() argument
4755 if (!tr) in tracing_set_cpumask()
4759 arch_spin_lock(&tr->max_lock); in tracing_set_cpumask()
4765 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_set_cpumask()
4767 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask()
4768 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); in tracing_set_cpumask()
4770 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_set_cpumask()
4772 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask()
4773 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); in tracing_set_cpumask()
4776 arch_spin_unlock(&tr->max_lock); in tracing_set_cpumask()
4779 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); in tracing_set_cpumask()
4788 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_write() local
4799 err = tracing_set_cpumask(tr, tracing_cpumask_new); in tracing_cpumask_write()
4824 struct trace_array *tr = m->private; in tracing_trace_options_show() local
4829 tracer_flags = tr->current_trace->flags->val; in tracing_trace_options_show()
4830 trace_opts = tr->current_trace->flags->opts; in tracing_trace_options_show()
4833 if (tr->trace_flags & (1 << i)) in tracing_trace_options_show()
4850 static int __set_tracer_option(struct trace_array *tr, in __set_tracer_option() argument
4857 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); in __set_tracer_option()
4869 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) in set_tracer_option() argument
4871 struct tracer *trace = tr->current_trace; in set_tracer_option()
4880 return __set_tracer_option(tr, trace->flags, opts, neg); in set_tracer_option()
4895 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) in set_tracer_flag() argument
4904 if (!!(tr->trace_flags & mask) == !!enabled) in set_tracer_flag()
4908 if (tr->current_trace->flag_changed) in set_tracer_flag()
4909 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) in set_tracer_flag()
4913 tr->trace_flags |= mask; in set_tracer_flag()
4915 tr->trace_flags &= ~mask; in set_tracer_flag()
4935 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; in set_tracer_flag()
4943 trace_event_follow_fork(tr, enabled); in set_tracer_flag()
4946 ftrace_pid_follow_fork(tr, enabled); in set_tracer_flag()
4949 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); in set_tracer_flag()
4951 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); in set_tracer_flag()
4963 int trace_set_options(struct trace_array *tr, char *option) in trace_set_options() argument
4985 ret = set_tracer_option(tr, cmp, neg); in trace_set_options()
4987 ret = set_tracer_flag(tr, 1 << ret, !neg); in trace_set_options()
5027 struct trace_array *tr = m->private; in tracing_trace_options_write() local
5039 ret = trace_set_options(tr, buf); in tracing_trace_options_write()
5050 struct trace_array *tr = inode->i_private; in tracing_trace_options_open() local
5053 ret = tracing_check_open_get_tr(tr); in tracing_trace_options_open()
5059 trace_array_put(tr); in tracing_trace_options_open()
5741 struct trace_array *tr = filp->private_data; in tracing_set_trace_read() local
5746 r = sprintf(buf, "%s\n", tr->current_trace->name); in tracing_set_trace_read()
5752 int tracer_init(struct tracer *t, struct trace_array *tr) in tracer_init() argument
5754 tracing_reset_online_cpus(&tr->array_buffer); in tracer_init()
5755 return t->init(tr); in tracer_init()
5794 static int __tracing_resize_ring_buffer(struct trace_array *tr, in __tracing_resize_ring_buffer() argument
5807 if (!tr->array_buffer.buffer) in __tracing_resize_ring_buffer()
5810 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
5815 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || in __tracing_resize_ring_buffer()
5816 !tr->current_trace->use_max_tr) in __tracing_resize_ring_buffer()
5819 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
5821 int r = resize_buffer_duplicate_size(&tr->array_buffer, in __tracing_resize_ring_buffer()
5822 &tr->array_buffer, cpu); in __tracing_resize_ring_buffer()
5845 set_buffer_entries(&tr->max_buffer, size); in __tracing_resize_ring_buffer()
5847 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; in __tracing_resize_ring_buffer()
5853 set_buffer_entries(&tr->array_buffer, size); in __tracing_resize_ring_buffer()
5855 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size; in __tracing_resize_ring_buffer()
5860 ssize_t tracing_resize_ring_buffer(struct trace_array *tr, in tracing_resize_ring_buffer() argument
5875 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); in tracing_resize_ring_buffer()
5912 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5918 static void tracing_set_nop(struct trace_array *tr) in tracing_set_nop() argument
5920 if (tr->current_trace == &nop_trace) in tracing_set_nop()
5923 tr->current_trace->enabled--; in tracing_set_nop()
5925 if (tr->current_trace->reset) in tracing_set_nop()
5926 tr->current_trace->reset(tr); in tracing_set_nop()
5928 tr->current_trace = &nop_trace; in tracing_set_nop()
5933 static void add_tracer_options(struct trace_array *tr, struct tracer *t) in add_tracer_options() argument
5936 if (!tr->dir) in add_tracer_options()
5943 create_trace_option_files(tr, t); in add_tracer_options()
5946 int tracing_set_tracer(struct trace_array *tr, const char *buf) in tracing_set_tracer() argument
5957 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, in tracing_set_tracer()
5972 if (t == tr->current_trace) in tracing_set_tracer()
5978 arch_spin_lock(&tr->max_lock); in tracing_set_tracer()
5979 if (tr->cond_snapshot) in tracing_set_tracer()
5981 arch_spin_unlock(&tr->max_lock); in tracing_set_tracer()
5995 if (!trace_ok_for_array(t, tr)) { in tracing_set_tracer()
6001 if (tr->trace_ref) { in tracing_set_tracer()
6008 tr->current_trace->enabled--; in tracing_set_tracer()
6010 if (tr->current_trace->reset) in tracing_set_tracer()
6011 tr->current_trace->reset(tr); in tracing_set_tracer()
6014 had_max_tr = tr->current_trace->use_max_tr; in tracing_set_tracer()
6017 tr->current_trace = &nop_trace; in tracing_set_tracer()
6028 free_snapshot(tr); in tracing_set_tracer()
6031 if (t->use_max_tr && !tr->allocated_snapshot) { in tracing_set_tracer()
6032 ret = tracing_alloc_snapshot_instance(tr); in tracing_set_tracer()
6037 tr->current_trace = &nop_trace; in tracing_set_tracer()
6041 ret = tracer_init(t, tr); in tracing_set_tracer()
6046 tr->current_trace = t; in tracing_set_tracer()
6047 tr->current_trace->enabled++; in tracing_set_tracer()
6048 trace_branch_enable(tr); in tracing_set_tracer()
6059 struct trace_array *tr = filp->private_data; in tracing_set_trace_write() local
6079 err = tracing_set_tracer(tr, buf); in tracing_set_trace_write()
6129 struct trace_array *tr = filp->private_data; in tracing_thresh_write() local
6137 if (tr->current_trace->update_thresh) { in tracing_thresh_write()
6138 ret = tr->current_trace->update_thresh(tr); in tracing_thresh_write()
6170 struct trace_array *tr = inode->i_private; in tracing_open_pipe() local
6174 ret = tracing_check_open_get_tr(tr); in tracing_open_pipe()
6184 __trace_array_put(tr); in tracing_open_pipe()
6189 iter->trace = tr->current_trace; in tracing_open_pipe()
6199 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open_pipe()
6203 if (trace_clocks[tr->clock_id].in_ns) in tracing_open_pipe()
6206 iter->tr = tr; in tracing_open_pipe()
6207 iter->array_buffer = &tr->array_buffer; in tracing_open_pipe()
6217 tr->trace_ref++; in tracing_open_pipe()
6224 __trace_array_put(tr); in tracing_open_pipe()
6232 struct trace_array *tr = inode->i_private; in tracing_release_pipe() local
6236 tr->trace_ref--; in tracing_release_pipe()
6247 trace_array_put(tr); in tracing_release_pipe()
6255 struct trace_array *tr = iter->tr; in trace_poll() local
6261 if (tr->trace_flags & TRACE_ITER_BLOCK) in trace_poll()
6268 filp, poll_table, iter->tr->buffer_percent); in trace_poll()
6300 if (!tracer_tracing_is_on(iter->tr) && iter->pos) in tracing_wait_pipe()
6572 struct trace_array *tr = inode->i_private; in tracing_entries_read() local
6590 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; in tracing_entries_read()
6591 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { in tracing_entries_read()
6607 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); in tracing_entries_read()
6620 struct trace_array *tr = inode->i_private; in tracing_entries_write() local
6634 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); in tracing_entries_write()
6647 struct trace_array *tr = filp->private_data; in tracing_total_entries_read() local
6654 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; in tracing_total_entries_read()
6684 struct trace_array *tr = inode->i_private; in tracing_free_buffer_release() local
6687 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) in tracing_free_buffer_release()
6688 tracer_tracing_off(tr); in tracing_free_buffer_release()
6690 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); in tracing_free_buffer_release()
6692 trace_array_put(tr); in tracing_free_buffer_release()
6701 struct trace_array *tr = filp->private_data; in tracing_mark_write() local
6718 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_write()
6733 buffer = tr->array_buffer.buffer; in tracing_mark_write()
6751 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { in tracing_mark_write()
6754 tt = event_triggers_call(tr->trace_marker_file, entry, event); in tracing_mark_write()
6768 event_triggers_post_call(tr->trace_marker_file, tt); in tracing_mark_write()
6783 struct trace_array *tr = filp->private_data; in tracing_mark_raw_write() local
6797 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_raw_write()
6814 buffer = tr->array_buffer.buffer; in tracing_mark_raw_write()
6841 struct trace_array *tr = m->private; in tracing_clock_show() local
6847 i == tr->clock_id ? "[" : "", trace_clocks[i].name, in tracing_clock_show()
6848 i == tr->clock_id ? "]" : ""); in tracing_clock_show()
6854 int tracing_set_clock(struct trace_array *tr, const char *clockstr) in tracing_set_clock() argument
6867 tr->clock_id = i; in tracing_set_clock()
6869 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
6875 tracing_reset_online_cpus(&tr->array_buffer); in tracing_set_clock()
6878 if (tr->max_buffer.buffer) in tracing_set_clock()
6879 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
6880 tracing_reset_online_cpus(&tr->max_buffer); in tracing_set_clock()
6892 struct trace_array *tr = m->private; in tracing_clock_write() local
6907 ret = tracing_set_clock(tr, clockstr); in tracing_clock_write()
6918 struct trace_array *tr = inode->i_private; in tracing_clock_open() local
6921 ret = tracing_check_open_get_tr(tr); in tracing_clock_open()
6927 trace_array_put(tr); in tracing_clock_open()
6934 struct trace_array *tr = m->private; in tracing_time_stamp_mode_show() local
6938 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) in tracing_time_stamp_mode_show()
6950 struct trace_array *tr = inode->i_private; in tracing_time_stamp_mode_open() local
6953 ret = tracing_check_open_get_tr(tr); in tracing_time_stamp_mode_open()
6959 trace_array_put(tr); in tracing_time_stamp_mode_open()
6964 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) in tracing_set_time_stamp_abs() argument
6970 if (abs && tr->time_stamp_abs_ref++) in tracing_set_time_stamp_abs()
6974 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { in tracing_set_time_stamp_abs()
6979 if (--tr->time_stamp_abs_ref) in tracing_set_time_stamp_abs()
6983 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs); in tracing_set_time_stamp_abs()
6986 if (tr->max_buffer.buffer) in tracing_set_time_stamp_abs()
6987 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); in tracing_set_time_stamp_abs()
7005 struct trace_array *tr = inode->i_private; in tracing_snapshot_open() local
7010 ret = tracing_check_open_get_tr(tr); in tracing_snapshot_open()
7031 iter->tr = tr; in tracing_snapshot_open()
7032 iter->array_buffer = &tr->max_buffer; in tracing_snapshot_open()
7039 trace_array_put(tr); in tracing_snapshot_open()
7050 struct trace_array *tr = iter->tr; in tracing_snapshot_write() local
7064 if (tr->current_trace->use_max_tr) { in tracing_snapshot_write()
7070 arch_spin_lock(&tr->max_lock); in tracing_snapshot_write()
7071 if (tr->cond_snapshot) in tracing_snapshot_write()
7073 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_write()
7084 if (tr->allocated_snapshot) in tracing_snapshot_write()
7085 free_snapshot(tr); in tracing_snapshot_write()
7095 if (tr->allocated_snapshot) in tracing_snapshot_write()
7096 ret = resize_buffer_duplicate_size(&tr->max_buffer, in tracing_snapshot_write()
7097 &tr->array_buffer, iter->cpu_file); in tracing_snapshot_write()
7099 ret = tracing_alloc_snapshot_instance(tr); in tracing_snapshot_write()
7105 update_max_tr(tr, current, smp_processor_id(), NULL); in tracing_snapshot_write()
7107 update_max_tr_single(tr, current, iter->cpu_file); in tracing_snapshot_write()
7111 if (tr->allocated_snapshot) { in tracing_snapshot_write()
7113 tracing_reset_online_cpus(&tr->max_buffer); in tracing_snapshot_write()
7115 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); in tracing_snapshot_write()
7172 info->iter.array_buffer = &info->iter.tr->max_buffer; in snapshot_raw_open()
7302 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) in get_tracing_log_err() argument
7306 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { in get_tracing_log_err()
7311 tr->n_err_log_entries++; in get_tracing_log_err()
7316 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); in get_tracing_log_err()
7375 void tracing_log_err(struct trace_array *tr, in tracing_log_err() argument
7381 if (!tr) in tracing_log_err()
7382 tr = &global_trace; in tracing_log_err()
7385 err = get_tracing_log_err(tr); in tracing_log_err()
7399 list_add_tail(&err->list, &tr->err_log); in tracing_log_err()
7403 static void clear_tracing_err_log(struct trace_array *tr) in clear_tracing_err_log() argument
7408 list_for_each_entry_safe(err, next, &tr->err_log, list) { in clear_tracing_err_log()
7413 tr->n_err_log_entries = 0; in clear_tracing_err_log()
7419 struct trace_array *tr = m->private; in tracing_err_log_seq_start() local
7423 return seq_list_start(&tr->err_log, *pos); in tracing_err_log_seq_start()
7428 struct trace_array *tr = m->private; in tracing_err_log_seq_next() local
7430 return seq_list_next(v, &tr->err_log, pos); in tracing_err_log_seq_next()
7477 struct trace_array *tr = inode->i_private; in tracing_err_log_open() local
7480 ret = tracing_check_open_get_tr(tr); in tracing_err_log_open()
7486 clear_tracing_err_log(tr); in tracing_err_log_open()
7492 m->private = tr; in tracing_err_log_open()
7494 trace_array_put(tr); in tracing_err_log_open()
7509 struct trace_array *tr = inode->i_private; in tracing_err_log_release() local
7511 trace_array_put(tr); in tracing_err_log_release()
7529 struct trace_array *tr = inode->i_private; in tracing_buffers_open() local
7533 ret = tracing_check_open_get_tr(tr); in tracing_buffers_open()
7539 trace_array_put(tr); in tracing_buffers_open()
7545 info->iter.tr = tr; in tracing_buffers_open()
7547 info->iter.trace = tr->current_trace; in tracing_buffers_open()
7548 info->iter.array_buffer = &tr->array_buffer; in tracing_buffers_open()
7555 tr->trace_ref++; in tracing_buffers_open()
7561 trace_array_put(tr); in tracing_buffers_open()
7588 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_read()
7656 iter->tr->trace_ref--; in tracing_buffers_release()
7658 __trace_array_put(iter->tr); in tracing_buffers_release()
7746 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_splice_read()
7820 ret = wait_on_pipe(iter, iter->tr->buffer_percent); in tracing_buffers_splice_read()
7848 struct trace_array *tr = inode->i_private; in tracing_stats_read() local
7849 struct array_buffer *trace_buf = &tr->array_buffer; in tracing_stats_read()
7874 if (trace_clocks[tr->clock_id].in_ns) { in tracing_stats_read()
7949 struct trace_array *tr, struct ftrace_probe_ops *ops, in ftrace_snapshot() argument
7952 tracing_snapshot_instance(tr); in ftrace_snapshot()
7957 struct trace_array *tr, struct ftrace_probe_ops *ops, in ftrace_count_snapshot() argument
7974 tracing_snapshot_instance(tr); in ftrace_count_snapshot()
8000 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, in ftrace_snapshot_init() argument
8016 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, in ftrace_snapshot_free() argument
8044 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, in ftrace_trace_snapshot_callback() argument
8052 if (!tr) in ftrace_trace_snapshot_callback()
8062 return unregister_ftrace_function_probe_func(glob+1, tr, ops); in ftrace_trace_snapshot_callback()
8081 ret = tracing_alloc_snapshot_instance(tr); in ftrace_trace_snapshot_callback()
8085 ret = register_ftrace_function_probe(glob, tr, ops, count); in ftrace_trace_snapshot_callback()
8104 static struct dentry *tracing_get_dentry(struct trace_array *tr) in tracing_get_dentry() argument
8106 if (WARN_ON(!tr->dir)) in tracing_get_dentry()
8110 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_get_dentry()
8114 return tr->dir; in tracing_get_dentry()
8117 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) in tracing_dentry_percpu() argument
8121 if (tr->percpu_dir) in tracing_dentry_percpu()
8122 return tr->percpu_dir; in tracing_dentry_percpu()
8124 d_tracer = tracing_get_dentry(tr); in tracing_dentry_percpu()
8128 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); in tracing_dentry_percpu()
8130 MEM_FAIL(!tr->percpu_dir, in tracing_dentry_percpu()
8133 return tr->percpu_dir; in tracing_dentry_percpu()
8148 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) in tracing_init_tracefs_percpu() argument
8150 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); in tracing_init_tracefs_percpu()
8166 tr, cpu, &tracing_pipe_fops); in tracing_init_tracefs_percpu()
8170 tr, cpu, &tracing_fops); in tracing_init_tracefs_percpu()
8173 tr, cpu, &tracing_buffers_fops); in tracing_init_tracefs_percpu()
8176 tr, cpu, &tracing_stats_fops); in tracing_init_tracefs_percpu()
8179 tr, cpu, &tracing_entries_fops); in tracing_init_tracefs_percpu()
8183 tr, cpu, &snapshot_fops); in tracing_init_tracefs_percpu()
8186 tr, cpu, &snapshot_raw_fops); in tracing_init_tracefs_percpu()
8227 ret = __set_tracer_option(topt->tr, topt->flags, in trace_options_write()
8285 struct trace_array *tr; in trace_options_core_read() local
8289 get_tr_index(tr_index, &tr, &index); in trace_options_core_read()
8291 if (tr->trace_flags & (1 << index)) in trace_options_core_read()
8304 struct trace_array *tr; in trace_options_core_write() local
8309 get_tr_index(tr_index, &tr, &index); in trace_options_core_write()
8320 ret = set_tracer_flag(tr, 1 << index, val); in trace_options_core_write()
8355 static struct dentry *trace_options_init_dentry(struct trace_array *tr) in trace_options_init_dentry() argument
8359 if (tr->options) in trace_options_init_dentry()
8360 return tr->options; in trace_options_init_dentry()
8362 d_tracer = tracing_get_dentry(tr); in trace_options_init_dentry()
8366 tr->options = tracefs_create_dir("options", d_tracer); in trace_options_init_dentry()
8367 if (!tr->options) { in trace_options_init_dentry()
8372 return tr->options; in trace_options_init_dentry()
8376 create_trace_option_file(struct trace_array *tr, in create_trace_option_file() argument
8383 t_options = trace_options_init_dentry(tr); in create_trace_option_file()
8389 topt->tr = tr; in create_trace_option_file()
8397 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) in create_trace_option_files() argument
8418 if (!trace_ok_for_array(tracer, tr)) in create_trace_option_files()
8421 for (i = 0; i < tr->nr_topts; i++) { in create_trace_option_files()
8423 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) in create_trace_option_files()
8436 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), in create_trace_option_files()
8443 tr->topts = tr_topts; in create_trace_option_files()
8444 tr->topts[tr->nr_topts].tracer = tracer; in create_trace_option_files()
8445 tr->topts[tr->nr_topts].topts = topts; in create_trace_option_files()
8446 tr->nr_topts++; in create_trace_option_files()
8449 create_trace_option_file(tr, &topts[cnt], flags, in create_trace_option_files()
8458 create_trace_option_core_file(struct trace_array *tr, in create_trace_option_core_file() argument
8463 t_options = trace_options_init_dentry(tr); in create_trace_option_core_file()
8468 (void *)&tr->trace_flags_index[index], in create_trace_option_core_file()
8472 static void create_trace_options_dir(struct trace_array *tr) in create_trace_options_dir() argument
8475 bool top_level = tr == &global_trace; in create_trace_options_dir()
8478 t_options = trace_options_init_dentry(tr); in create_trace_options_dir()
8485 create_trace_option_core_file(tr, trace_options[i], i); in create_trace_options_dir()
8493 struct trace_array *tr = filp->private_data; in rb_simple_read() local
8497 r = tracer_tracing_is_on(tr); in rb_simple_read()
8507 struct trace_array *tr = filp->private_data; in rb_simple_write() local
8508 struct trace_buffer *buffer = tr->array_buffer.buffer; in rb_simple_write()
8518 if (!!val == tracer_tracing_is_on(tr)) { in rb_simple_write()
8521 tracer_tracing_on(tr); in rb_simple_write()
8522 if (tr->current_trace->start) in rb_simple_write()
8523 tr->current_trace->start(tr); in rb_simple_write()
8525 tracer_tracing_off(tr); in rb_simple_write()
8526 if (tr->current_trace->stop) in rb_simple_write()
8527 tr->current_trace->stop(tr); in rb_simple_write()
8549 struct trace_array *tr = filp->private_data; in buffer_percent_read() local
8553 r = tr->buffer_percent; in buffer_percent_read()
8563 struct trace_array *tr = filp->private_data; in buffer_percent_write() local
8574 tr->buffer_percent = val; in buffer_percent_write()
8592 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8595 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) in allocate_trace_buffer() argument
8599 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; in allocate_trace_buffer()
8601 buf->tr = tr; in allocate_trace_buffer()
8615 set_buffer_entries(&tr->array_buffer, in allocate_trace_buffer()
8616 ring_buffer_size(tr->array_buffer.buffer, 0)); in allocate_trace_buffer()
8621 static int allocate_trace_buffers(struct trace_array *tr, int size) in allocate_trace_buffers() argument
8625 ret = allocate_trace_buffer(tr, &tr->array_buffer, size); in allocate_trace_buffers()
8630 ret = allocate_trace_buffer(tr, &tr->max_buffer, in allocate_trace_buffers()
8633 ring_buffer_free(tr->array_buffer.buffer); in allocate_trace_buffers()
8634 tr->array_buffer.buffer = NULL; in allocate_trace_buffers()
8635 free_percpu(tr->array_buffer.data); in allocate_trace_buffers()
8636 tr->array_buffer.data = NULL; in allocate_trace_buffers()
8639 tr->allocated_snapshot = allocate_snapshot; in allocate_trace_buffers()
8661 static void free_trace_buffers(struct trace_array *tr) in free_trace_buffers() argument
8663 if (!tr) in free_trace_buffers()
8666 free_trace_buffer(&tr->array_buffer); in free_trace_buffers()
8669 free_trace_buffer(&tr->max_buffer); in free_trace_buffers()
8673 static void init_trace_flags_index(struct trace_array *tr) in init_trace_flags_index() argument
8679 tr->trace_flags_index[i] = i; in init_trace_flags_index()
8682 static void __update_tracer_options(struct trace_array *tr) in __update_tracer_options() argument
8687 add_tracer_options(tr, t); in __update_tracer_options()
8690 static void update_tracer_options(struct trace_array *tr) in update_tracer_options() argument
8694 __update_tracer_options(tr); in update_tracer_options()
8701 struct trace_array *tr, *found = NULL; in trace_array_find() local
8703 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_find()
8704 if (tr->name && strcmp(tr->name, instance) == 0) { in trace_array_find()
8705 found = tr; in trace_array_find()
8715 struct trace_array *tr; in trace_array_find_get() local
8718 tr = trace_array_find(instance); in trace_array_find_get()
8719 if (tr) in trace_array_find_get()
8720 tr->ref++; in trace_array_find_get()
8723 return tr; in trace_array_find_get()
8726 static int trace_array_create_dir(struct trace_array *tr) in trace_array_create_dir() argument
8730 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); in trace_array_create_dir()
8731 if (!tr->dir) in trace_array_create_dir()
8734 ret = event_trace_add_tracer(tr->dir, tr); in trace_array_create_dir()
8736 tracefs_remove(tr->dir); in trace_array_create_dir()
8740 init_tracer_tracefs(tr, tr->dir); in trace_array_create_dir()
8741 __update_tracer_options(tr); in trace_array_create_dir()
8748 struct trace_array *tr; in trace_array_create() local
8752 tr = kzalloc(sizeof(*tr), GFP_KERNEL); in trace_array_create()
8753 if (!tr) in trace_array_create()
8756 tr->name = kstrdup(name, GFP_KERNEL); in trace_array_create()
8757 if (!tr->name) in trace_array_create()
8760 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) in trace_array_create()
8763 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; in trace_array_create()
8765 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); in trace_array_create()
8767 raw_spin_lock_init(&tr->start_lock); in trace_array_create()
8769 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in trace_array_create()
8771 tr->current_trace = &nop_trace; in trace_array_create()
8773 INIT_LIST_HEAD(&tr->systems); in trace_array_create()
8774 INIT_LIST_HEAD(&tr->events); in trace_array_create()
8775 INIT_LIST_HEAD(&tr->hist_vars); in trace_array_create()
8776 INIT_LIST_HEAD(&tr->err_log); in trace_array_create()
8778 if (allocate_trace_buffers(tr, trace_buf_size) < 0) in trace_array_create()
8781 if (ftrace_allocate_ftrace_ops(tr) < 0) in trace_array_create()
8784 ftrace_init_trace_array(tr); in trace_array_create()
8786 init_trace_flags_index(tr); in trace_array_create()
8789 ret = trace_array_create_dir(tr); in trace_array_create()
8793 __trace_early_add_events(tr); in trace_array_create()
8795 list_add(&tr->list, &ftrace_trace_arrays); in trace_array_create()
8797 tr->ref++; in trace_array_create()
8799 return tr; in trace_array_create()
8802 ftrace_free_ftrace_ops(tr); in trace_array_create()
8803 free_trace_buffers(tr); in trace_array_create()
8804 free_cpumask_var(tr->tracing_cpumask); in trace_array_create()
8805 kfree(tr->name); in trace_array_create()
8806 kfree(tr); in trace_array_create()
8813 struct trace_array *tr; in instance_mkdir() local
8823 tr = trace_array_create(name); in instance_mkdir()
8825 ret = PTR_ERR_OR_ZERO(tr); in instance_mkdir()
8851 struct trace_array *tr; in trace_array_get_by_name() local
8856 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_get_by_name()
8857 if (tr->name && strcmp(tr->name, name) == 0) in trace_array_get_by_name()
8861 tr = trace_array_create(name); in trace_array_get_by_name()
8863 if (IS_ERR(tr)) in trace_array_get_by_name()
8864 tr = NULL; in trace_array_get_by_name()
8866 if (tr) in trace_array_get_by_name()
8867 tr->ref++; in trace_array_get_by_name()
8871 return tr; in trace_array_get_by_name()
8875 static int __remove_instance(struct trace_array *tr) in __remove_instance() argument
8880 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) in __remove_instance()
8883 list_del(&tr->list); in __remove_instance()
8888 set_tracer_flag(tr, 1 << i, 0); in __remove_instance()
8891 tracing_set_nop(tr); in __remove_instance()
8892 clear_ftrace_function_probes(tr); in __remove_instance()
8893 event_trace_del_tracer(tr); in __remove_instance()
8894 ftrace_clear_pids(tr); in __remove_instance()
8895 ftrace_destroy_function_files(tr); in __remove_instance()
8896 tracefs_remove(tr->dir); in __remove_instance()
8897 free_trace_buffers(tr); in __remove_instance()
8898 clear_tracing_err_log(tr); in __remove_instance()
8900 for (i = 0; i < tr->nr_topts; i++) { in __remove_instance()
8901 kfree(tr->topts[i].topts); in __remove_instance()
8903 kfree(tr->topts); in __remove_instance()
8905 free_cpumask_var(tr->tracing_cpumask); in __remove_instance()
8906 kfree(tr->name); in __remove_instance()
8907 kfree(tr); in __remove_instance()
8914 struct trace_array *tr; in trace_array_destroy() local
8926 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_destroy()
8927 if (tr == this_tr) { in trace_array_destroy()
8928 ret = __remove_instance(tr); in trace_array_destroy()
8942 struct trace_array *tr; in instance_rmdir() local
8949 tr = trace_array_find(name); in instance_rmdir()
8950 if (tr) in instance_rmdir()
8951 ret = __remove_instance(tr); in instance_rmdir()
8961 struct trace_array *tr; in create_trace_instances() local
8972 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in create_trace_instances()
8973 if (!tr->name) in create_trace_instances()
8975 if (MEM_FAIL(trace_array_create_dir(tr) < 0, in create_trace_instances()
8985 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) in init_tracer_tracefs() argument
8991 tr, &show_traces_fops); in init_tracer_tracefs()
8994 tr, &set_tracer_fops); in init_tracer_tracefs()
8997 tr, &tracing_cpumask_fops); in init_tracer_tracefs()
9000 tr, &tracing_iter_fops); in init_tracer_tracefs()
9003 tr, &tracing_fops); in init_tracer_tracefs()
9006 tr, &tracing_pipe_fops); in init_tracer_tracefs()
9009 tr, &tracing_entries_fops); in init_tracer_tracefs()
9012 tr, &tracing_total_entries_fops); in init_tracer_tracefs()
9015 tr, &tracing_free_buffer_fops); in init_tracer_tracefs()
9018 tr, &tracing_mark_fops); in init_tracer_tracefs()
9020 file = __find_event_file(tr, "ftrace", "print"); in init_tracer_tracefs()
9024 tr->trace_marker_file = file; in init_tracer_tracefs()
9027 tr, &tracing_mark_raw_fops); in init_tracer_tracefs()
9029 trace_create_file("trace_clock", 0644, d_tracer, tr, in init_tracer_tracefs()
9033 tr, &rb_simple_fops); in init_tracer_tracefs()
9035 trace_create_file("timestamp_mode", 0444, d_tracer, tr, in init_tracer_tracefs()
9038 tr->buffer_percent = 50; in init_tracer_tracefs()
9041 tr, &buffer_percent_fops); in init_tracer_tracefs()
9043 create_trace_options_dir(tr); in init_tracer_tracefs()
9046 trace_create_maxlat_file(tr, d_tracer); in init_tracer_tracefs()
9049 if (ftrace_create_function_files(tr, d_tracer)) in init_tracer_tracefs()
9054 tr, &snapshot_fops); in init_tracer_tracefs()
9058 tr, &tracing_err_log_fops); in init_tracer_tracefs()
9061 tracing_init_tracefs_percpu(tr, cpu); in init_tracer_tracefs()
9063 ftrace_init_tracefs(tr, d_tracer); in init_tracer_tracefs()
9097 struct trace_array *tr = &global_trace; in tracing_init_dentry() local
9105 if (tr->dir) in tracing_init_dentry()
9117 tr->dir = debugfs_create_automount("tracing", NULL, in tracing_init_dentry()
9327 iter->tr = &global_trace; in trace_init_global_iter()
9328 iter->trace = iter->tr->current_trace; in trace_init_global_iter()
9340 if (trace_clocks[iter->tr->clock_id].in_ns) in trace_init_global_iter()
9349 struct trace_array *tr = &global_trace; in ftrace_dump() local
9383 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; in ftrace_dump()
9386 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; in ftrace_dump()
9445 tr->trace_flags |= old_userobj; in ftrace_dump()