Lines Matching refs:iter
1903 static int wait_on_pipe(struct trace_iterator *iter, int full) in wait_on_pipe() argument
1908 if (trace_buffer_iter(iter, iter->cpu_file)) in wait_on_pipe()
1911 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full); in wait_on_pipe()
1918 if (iter->snapshot) in wait_on_pipe()
1919 iter->array_buffer = &iter->tr->max_buffer; in wait_on_pipe()
2807 struct trace_iterator *iter = tracepoint_print_iter; in output_printk() local
2810 if (WARN_ON_ONCE(!iter)) in output_printk()
2827 trace_seq_init(&iter->seq); in output_printk()
2828 iter->ent = fbuffer->entry; in output_printk()
2829 event_call->event.funcs->trace(iter, 0, event); in output_printk()
2830 trace_seq_putc(&iter->seq, 0); in output_printk()
2831 printk("%s", iter->seq.buffer); in output_printk()
3478 static void trace_iterator_increment(struct trace_iterator *iter) in trace_iterator_increment() argument
3480 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); in trace_iterator_increment()
3482 iter->idx++; in trace_iterator_increment()
3488 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, in peek_next_entry() argument
3492 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); in peek_next_entry()
3500 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, in peek_next_entry()
3505 iter->ent_size = ring_buffer_event_length(event); in peek_next_entry()
3508 iter->ent_size = 0; in peek_next_entry()
3513 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, in __find_next_entry() argument
3516 struct trace_buffer *buffer = iter->array_buffer->buffer; in __find_next_entry()
3519 int cpu_file = iter->cpu_file; in __find_next_entry()
3532 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); in __find_next_entry()
3544 ent = peek_next_entry(iter, cpu, &ts, &lost_events); in __find_next_entry()
3554 next_size = iter->ent_size; in __find_next_entry()
3558 iter->ent_size = next_size; in __find_next_entry()
3576 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, in trace_find_next_entry() argument
3580 int ent_size = iter->ent_size; in trace_find_next_entry()
3591 if (iter->temp == static_temp_buf && in trace_find_next_entry()
3600 if (iter->ent && iter->ent != iter->temp) { in trace_find_next_entry()
3601 if ((!iter->temp || iter->temp_size < iter->ent_size) && in trace_find_next_entry()
3602 !WARN_ON_ONCE(iter->temp == static_temp_buf)) { in trace_find_next_entry()
3604 temp = kmalloc(iter->ent_size, GFP_KERNEL); in trace_find_next_entry()
3607 kfree(iter->temp); in trace_find_next_entry()
3608 iter->temp = temp; in trace_find_next_entry()
3609 iter->temp_size = iter->ent_size; in trace_find_next_entry()
3611 memcpy(iter->temp, iter->ent, iter->ent_size); in trace_find_next_entry()
3612 iter->ent = iter->temp; in trace_find_next_entry()
3614 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); in trace_find_next_entry()
3616 iter->ent_size = ent_size; in trace_find_next_entry()
3622 void *trace_find_next_entry_inc(struct trace_iterator *iter) in trace_find_next_entry_inc() argument
3624 iter->ent = __find_next_entry(iter, &iter->cpu, in trace_find_next_entry_inc()
3625 &iter->lost_events, &iter->ts); in trace_find_next_entry_inc()
3627 if (iter->ent) in trace_find_next_entry_inc()
3628 trace_iterator_increment(iter); in trace_find_next_entry_inc()
3630 return iter->ent ? iter : NULL; in trace_find_next_entry_inc()
3633 static void trace_consume(struct trace_iterator *iter) in trace_consume() argument
3635 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, in trace_consume()
3636 &iter->lost_events); in trace_consume()
3641 struct trace_iterator *iter = m->private; in s_next() local
3645 WARN_ON_ONCE(iter->leftover); in s_next()
3650 if (iter->idx > i) in s_next()
3653 if (iter->idx < 0) in s_next()
3654 ent = trace_find_next_entry_inc(iter); in s_next()
3656 ent = iter; in s_next()
3658 while (ent && iter->idx < i) in s_next()
3659 ent = trace_find_next_entry_inc(iter); in s_next()
3661 iter->pos = *pos; in s_next()
3666 void tracing_iter_reset(struct trace_iterator *iter, int cpu) in tracing_iter_reset() argument
3672 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; in tracing_iter_reset()
3674 buf_iter = trace_buffer_iter(iter, cpu); in tracing_iter_reset()
3686 if (ts >= iter->array_buffer->time_start) in tracing_iter_reset()
3692 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; in tracing_iter_reset()
3701 struct trace_iterator *iter = m->private; in s_start() local
3702 struct trace_array *tr = iter->tr; in s_start()
3703 int cpu_file = iter->cpu_file; in s_start()
3715 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) { in s_start()
3717 if (iter->trace->close) in s_start()
3718 iter->trace->close(iter); in s_start()
3719 *iter->trace = *tr->current_trace; in s_start()
3721 if (iter->trace->open) in s_start()
3722 iter->trace->open(iter); in s_start()
3727 if (iter->snapshot && iter->trace->use_max_tr) in s_start()
3731 if (*pos != iter->pos) { in s_start()
3732 iter->ent = NULL; in s_start()
3733 iter->cpu = 0; in s_start()
3734 iter->idx = -1; in s_start()
3738 tracing_iter_reset(iter, cpu); in s_start()
3740 tracing_iter_reset(iter, cpu_file); in s_start()
3742 iter->leftover = 0; in s_start()
3743 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) in s_start()
3751 if (iter->leftover) in s_start()
3752 p = iter; in s_start()
3766 struct trace_iterator *iter = m->private; in s_stop() local
3769 if (iter->snapshot && iter->trace->use_max_tr) in s_stop()
3773 trace_access_unlock(iter->cpu_file); in s_stop()
3893 print_trace_header(struct seq_file *m, struct trace_iterator *iter) in print_trace_header() argument
3896 struct array_buffer *buf = iter->array_buffer; in print_trace_header()
3898 struct tracer *type = iter->trace; in print_trace_header()
3945 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); in print_trace_header()
3946 trace_print_seq(m, &iter->seq); in print_trace_header()
3948 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); in print_trace_header()
3949 trace_print_seq(m, &iter->seq); in print_trace_header()
3956 static void test_cpu_buff_start(struct trace_iterator *iter) in test_cpu_buff_start() argument
3958 struct trace_seq *s = &iter->seq; in test_cpu_buff_start()
3959 struct trace_array *tr = iter->tr; in test_cpu_buff_start()
3964 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) in test_cpu_buff_start()
3967 if (cpumask_available(iter->started) && in test_cpu_buff_start()
3968 cpumask_test_cpu(iter->cpu, iter->started)) in test_cpu_buff_start()
3971 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) in test_cpu_buff_start()
3974 if (cpumask_available(iter->started)) in test_cpu_buff_start()
3975 cpumask_set_cpu(iter->cpu, iter->started); in test_cpu_buff_start()
3978 if (iter->idx > 1) in test_cpu_buff_start()
3980 iter->cpu); in test_cpu_buff_start()
3983 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) in print_trace_fmt() argument
3985 struct trace_array *tr = iter->tr; in print_trace_fmt()
3986 struct trace_seq *s = &iter->seq; in print_trace_fmt()
3991 entry = iter->ent; in print_trace_fmt()
3993 test_cpu_buff_start(iter); in print_trace_fmt()
3998 if (iter->iter_flags & TRACE_FILE_LAT_FMT) in print_trace_fmt()
3999 trace_print_lat_context(iter); in print_trace_fmt()
4001 trace_print_context(iter); in print_trace_fmt()
4008 return event->funcs->trace(iter, sym_flags, event); in print_trace_fmt()
4015 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) in print_raw_fmt() argument
4017 struct trace_array *tr = iter->tr; in print_raw_fmt()
4018 struct trace_seq *s = &iter->seq; in print_raw_fmt()
4022 entry = iter->ent; in print_raw_fmt()
4026 entry->pid, iter->cpu, iter->ts); in print_raw_fmt()
4033 return event->funcs->raw(iter, 0, event); in print_raw_fmt()
4040 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) in print_hex_fmt() argument
4042 struct trace_array *tr = iter->tr; in print_hex_fmt()
4043 struct trace_seq *s = &iter->seq; in print_hex_fmt()
4048 entry = iter->ent; in print_hex_fmt()
4052 SEQ_PUT_HEX_FIELD(s, iter->cpu); in print_hex_fmt()
4053 SEQ_PUT_HEX_FIELD(s, iter->ts); in print_hex_fmt()
4060 enum print_line_t ret = event->funcs->hex(iter, 0, event); in print_hex_fmt()
4070 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) in print_bin_fmt() argument
4072 struct trace_array *tr = iter->tr; in print_bin_fmt()
4073 struct trace_seq *s = &iter->seq; in print_bin_fmt()
4077 entry = iter->ent; in print_bin_fmt()
4081 SEQ_PUT_FIELD(s, iter->cpu); in print_bin_fmt()
4082 SEQ_PUT_FIELD(s, iter->ts); in print_bin_fmt()
4088 return event ? event->funcs->binary(iter, 0, event) : in print_bin_fmt()
4092 int trace_empty(struct trace_iterator *iter) in trace_empty() argument
4098 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { in trace_empty()
4099 cpu = iter->cpu_file; in trace_empty()
4100 buf_iter = trace_buffer_iter(iter, cpu); in trace_empty()
4105 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) in trace_empty()
4112 buf_iter = trace_buffer_iter(iter, cpu); in trace_empty()
4117 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) in trace_empty()
4126 enum print_line_t print_trace_line(struct trace_iterator *iter) in print_trace_line() argument
4128 struct trace_array *tr = iter->tr; in print_trace_line()
4132 if (iter->lost_events) { in print_trace_line()
4133 if (iter->lost_events == (unsigned long)-1) in print_trace_line()
4134 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", in print_trace_line()
4135 iter->cpu); in print_trace_line()
4137 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", in print_trace_line()
4138 iter->cpu, iter->lost_events); in print_trace_line()
4139 if (trace_seq_has_overflowed(&iter->seq)) in print_trace_line()
4143 if (iter->trace && iter->trace->print_line) { in print_trace_line()
4144 ret = iter->trace->print_line(iter); in print_trace_line()
4149 if (iter->ent->type == TRACE_BPUTS && in print_trace_line()
4152 return trace_print_bputs_msg_only(iter); in print_trace_line()
4154 if (iter->ent->type == TRACE_BPRINT && in print_trace_line()
4157 return trace_print_bprintk_msg_only(iter); in print_trace_line()
4159 if (iter->ent->type == TRACE_PRINT && in print_trace_line()
4162 return trace_print_printk_msg_only(iter); in print_trace_line()
4165 return print_bin_fmt(iter); in print_trace_line()
4168 return print_hex_fmt(iter); in print_trace_line()
4171 return print_raw_fmt(iter); in print_trace_line()
4173 return print_trace_fmt(iter); in print_trace_line()
4178 struct trace_iterator *iter = m->private; in trace_latency_header() local
4179 struct trace_array *tr = iter->tr; in trace_latency_header()
4182 if (trace_empty(iter)) in trace_latency_header()
4185 if (iter->iter_flags & TRACE_FILE_LAT_FMT) in trace_latency_header()
4186 print_trace_header(m, iter); in trace_latency_header()
4194 struct trace_iterator *iter = m->private; in trace_default_header() local
4195 struct trace_array *tr = iter->tr; in trace_default_header()
4201 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { in trace_default_header()
4203 if (trace_empty(iter)) in trace_default_header()
4205 print_trace_header(m, iter); in trace_default_header()
4211 print_func_help_header_irq(iter->array_buffer, in trace_default_header()
4214 print_func_help_header(iter->array_buffer, m, in trace_default_header()
4254 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) in print_snapshot_help() argument
4256 if (iter->tr->allocated_snapshot) in print_snapshot_help()
4262 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) in print_snapshot_help()
4269 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } in print_snapshot_help() argument
4274 struct trace_iterator *iter = v; in s_show() local
4277 if (iter->ent == NULL) { in s_show()
4278 if (iter->tr) { in s_show()
4279 seq_printf(m, "# tracer: %s\n", iter->trace->name); in s_show()
4283 if (iter->snapshot && trace_empty(iter)) in s_show()
4284 print_snapshot_help(m, iter); in s_show()
4285 else if (iter->trace && iter->trace->print_header) in s_show()
4286 iter->trace->print_header(m); in s_show()
4290 } else if (iter->leftover) { in s_show()
4295 ret = trace_print_seq(m, &iter->seq); in s_show()
4298 iter->leftover = ret; in s_show()
4301 ret = print_trace_line(iter); in s_show()
4303 iter->seq.full = 0; in s_show()
4304 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); in s_show()
4306 ret = trace_print_seq(m, &iter->seq); in s_show()
4314 iter->leftover = ret; in s_show()
4342 struct trace_iterator *iter; in __tracing_open() local
4348 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); in __tracing_open()
4349 if (!iter) in __tracing_open()
4352 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), in __tracing_open()
4354 if (!iter->buffer_iter) in __tracing_open()
4365 iter->temp = kmalloc(128, GFP_KERNEL); in __tracing_open()
4366 if (iter->temp) in __tracing_open()
4367 iter->temp_size = 128; in __tracing_open()
4374 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); in __tracing_open()
4375 if (!iter->trace) in __tracing_open()
4378 *iter->trace = *tr->current_trace; in __tracing_open()
4380 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) in __tracing_open()
4383 iter->tr = tr; in __tracing_open()
4388 iter->array_buffer = &tr->max_buffer; in __tracing_open()
4391 iter->array_buffer = &tr->array_buffer; in __tracing_open()
4392 iter->snapshot = snapshot; in __tracing_open()
4393 iter->pos = -1; in __tracing_open()
4394 iter->cpu_file = tracing_get_cpu(inode); in __tracing_open()
4395 mutex_init(&iter->mutex); in __tracing_open()
4398 if (iter->trace->open) in __tracing_open()
4399 iter->trace->open(iter); in __tracing_open()
4402 if (ring_buffer_overruns(iter->array_buffer->buffer)) in __tracing_open()
4403 iter->iter_flags |= TRACE_FILE_ANNOTATE; in __tracing_open()
4407 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; in __tracing_open()
4413 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) in __tracing_open()
4416 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { in __tracing_open()
4418 iter->buffer_iter[cpu] = in __tracing_open()
4419 ring_buffer_read_prepare(iter->array_buffer->buffer, in __tracing_open()
4424 ring_buffer_read_start(iter->buffer_iter[cpu]); in __tracing_open()
4425 tracing_iter_reset(iter, cpu); in __tracing_open()
4428 cpu = iter->cpu_file; in __tracing_open()
4429 iter->buffer_iter[cpu] = in __tracing_open()
4430 ring_buffer_read_prepare(iter->array_buffer->buffer, in __tracing_open()
4433 ring_buffer_read_start(iter->buffer_iter[cpu]); in __tracing_open()
4434 tracing_iter_reset(iter, cpu); in __tracing_open()
4439 return iter; in __tracing_open()
4443 kfree(iter->trace); in __tracing_open()
4444 kfree(iter->temp); in __tracing_open()
4445 kfree(iter->buffer_iter); in __tracing_open()
4517 struct trace_iterator *iter; in tracing_release() local
4526 iter = m->private; in tracing_release()
4530 if (iter->buffer_iter[cpu]) in tracing_release()
4531 ring_buffer_read_finish(iter->buffer_iter[cpu]); in tracing_release()
4534 if (iter->trace && iter->trace->close) in tracing_release()
4535 iter->trace->close(iter); in tracing_release()
4537 if (!iter->snapshot && tr->stop_count) in tracing_release()
4545 mutex_destroy(&iter->mutex); in tracing_release()
4546 free_cpumask_var(iter->started); in tracing_release()
4547 kfree(iter->temp); in tracing_release()
4548 kfree(iter->trace); in tracing_release()
4549 kfree(iter->buffer_iter); in tracing_release()
4575 struct trace_iterator *iter; in tracing_open() local
4599 iter = __tracing_open(inode, file, false); in tracing_open()
4600 if (IS_ERR(iter)) in tracing_open()
4601 ret = PTR_ERR(iter); in tracing_open()
4603 iter->iter_flags |= TRACE_FILE_LAT_FMT; in tracing_open()
6205 struct trace_iterator *iter; in tracing_open_pipe() local
6215 iter = kzalloc(sizeof(*iter), GFP_KERNEL); in tracing_open_pipe()
6216 if (!iter) { in tracing_open_pipe()
6222 trace_seq_init(&iter->seq); in tracing_open_pipe()
6223 iter->trace = tr->current_trace; in tracing_open_pipe()
6225 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { in tracing_open_pipe()
6231 cpumask_setall(iter->started); in tracing_open_pipe()
6234 iter->iter_flags |= TRACE_FILE_LAT_FMT; in tracing_open_pipe()
6238 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; in tracing_open_pipe()
6240 iter->tr = tr; in tracing_open_pipe()
6241 iter->array_buffer = &tr->array_buffer; in tracing_open_pipe()
6242 iter->cpu_file = tracing_get_cpu(inode); in tracing_open_pipe()
6243 mutex_init(&iter->mutex); in tracing_open_pipe()
6244 filp->private_data = iter; in tracing_open_pipe()
6246 if (iter->trace->pipe_open) in tracing_open_pipe()
6247 iter->trace->pipe_open(iter); in tracing_open_pipe()
6257 kfree(iter); in tracing_open_pipe()
6265 struct trace_iterator *iter = file->private_data; in tracing_release_pipe() local
6272 if (iter->trace->pipe_close) in tracing_release_pipe()
6273 iter->trace->pipe_close(iter); in tracing_release_pipe()
6277 free_cpumask_var(iter->started); in tracing_release_pipe()
6278 kfree(iter->temp); in tracing_release_pipe()
6279 mutex_destroy(&iter->mutex); in tracing_release_pipe()
6280 kfree(iter); in tracing_release_pipe()
6288 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) in trace_poll() argument
6290 struct trace_array *tr = iter->tr; in trace_poll()
6293 if (trace_buffer_iter(iter, iter->cpu_file)) in trace_poll()
6302 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, in trace_poll()
6303 filp, poll_table, iter->tr->buffer_percent); in trace_poll()
6309 struct trace_iterator *iter = filp->private_data; in tracing_poll_pipe() local
6311 return trace_poll(iter, filp, poll_table); in tracing_poll_pipe()
6317 struct trace_iterator *iter = filp->private_data; in tracing_wait_pipe() local
6320 while (trace_empty(iter)) { in tracing_wait_pipe()
6335 if (!tracer_tracing_is_on(iter->tr) && iter->pos) in tracing_wait_pipe()
6338 mutex_unlock(&iter->mutex); in tracing_wait_pipe()
6340 ret = wait_on_pipe(iter, 0); in tracing_wait_pipe()
6342 mutex_lock(&iter->mutex); in tracing_wait_pipe()
6358 struct trace_iterator *iter = filp->private_data; in tracing_read_pipe() local
6366 mutex_lock(&iter->mutex); in tracing_read_pipe()
6369 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); in tracing_read_pipe()
6373 trace_seq_init(&iter->seq); in tracing_read_pipe()
6375 if (iter->trace->read) { in tracing_read_pipe()
6376 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); in tracing_read_pipe()
6387 if (trace_empty(iter)) { in tracing_read_pipe()
6396 memset(&iter->seq, 0, in tracing_read_pipe()
6399 cpumask_clear(iter->started); in tracing_read_pipe()
6400 trace_seq_init(&iter->seq); in tracing_read_pipe()
6401 iter->pos = -1; in tracing_read_pipe()
6404 trace_access_lock(iter->cpu_file); in tracing_read_pipe()
6405 while (trace_find_next_entry_inc(iter) != NULL) { in tracing_read_pipe()
6407 int save_len = iter->seq.seq.len; in tracing_read_pipe()
6409 ret = print_trace_line(iter); in tracing_read_pipe()
6418 iter->seq.full = 0; in tracing_read_pipe()
6419 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); in tracing_read_pipe()
6420 trace_consume(iter); in tracing_read_pipe()
6425 iter->seq.seq.len = save_len; in tracing_read_pipe()
6429 trace_consume(iter); in tracing_read_pipe()
6431 if (trace_seq_used(&iter->seq) >= cnt) in tracing_read_pipe()
6439 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", in tracing_read_pipe()
6440 iter->ent->type); in tracing_read_pipe()
6442 trace_access_unlock(iter->cpu_file); in tracing_read_pipe()
6446 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); in tracing_read_pipe()
6447 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) in tracing_read_pipe()
6448 trace_seq_init(&iter->seq); in tracing_read_pipe()
6458 mutex_unlock(&iter->mutex); in tracing_read_pipe()
6470 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) in tracing_fill_pipe_page() argument
6478 save_len = iter->seq.seq.len; in tracing_fill_pipe_page()
6479 ret = print_trace_line(iter); in tracing_fill_pipe_page()
6481 if (trace_seq_has_overflowed(&iter->seq)) { in tracing_fill_pipe_page()
6482 iter->seq.seq.len = save_len; in tracing_fill_pipe_page()
6492 iter->seq.seq.len = save_len; in tracing_fill_pipe_page()
6496 count = trace_seq_used(&iter->seq) - save_len; in tracing_fill_pipe_page()
6499 iter->seq.seq.len = save_len; in tracing_fill_pipe_page()
6504 trace_consume(iter); in tracing_fill_pipe_page()
6506 if (!trace_find_next_entry_inc(iter)) { in tracing_fill_pipe_page()
6508 iter->ent = NULL; in tracing_fill_pipe_page()
6524 struct trace_iterator *iter = filp->private_data; in tracing_splice_read_pipe() local
6540 mutex_lock(&iter->mutex); in tracing_splice_read_pipe()
6542 if (iter->trace->splice_read) { in tracing_splice_read_pipe()
6543 ret = iter->trace->splice_read(iter, filp, in tracing_splice_read_pipe()
6553 if (!iter->ent && !trace_find_next_entry_inc(iter)) { in tracing_splice_read_pipe()
6559 trace_access_lock(iter->cpu_file); in tracing_splice_read_pipe()
6567 rem = tracing_fill_pipe_page(rem, iter); in tracing_splice_read_pipe()
6570 ret = trace_seq_to_buffer(&iter->seq, in tracing_splice_read_pipe()
6572 trace_seq_used(&iter->seq)); in tracing_splice_read_pipe()
6578 spd.partial[i].len = trace_seq_used(&iter->seq); in tracing_splice_read_pipe()
6580 trace_seq_init(&iter->seq); in tracing_splice_read_pipe()
6583 trace_access_unlock(iter->cpu_file); in tracing_splice_read_pipe()
6585 mutex_unlock(&iter->mutex); in tracing_splice_read_pipe()
6598 mutex_unlock(&iter->mutex); in tracing_splice_read_pipe()
7031 struct trace_iterator iter; member
7041 struct trace_iterator *iter; in tracing_snapshot_open() local
7050 iter = __tracing_open(inode, file, true); in tracing_snapshot_open()
7051 if (IS_ERR(iter)) in tracing_snapshot_open()
7052 ret = PTR_ERR(iter); in tracing_snapshot_open()
7059 iter = kzalloc(sizeof(*iter), GFP_KERNEL); in tracing_snapshot_open()
7060 if (!iter) { in tracing_snapshot_open()
7066 iter->tr = tr; in tracing_snapshot_open()
7067 iter->array_buffer = &tr->max_buffer; in tracing_snapshot_open()
7068 iter->cpu_file = tracing_get_cpu(inode); in tracing_snapshot_open()
7069 m->private = iter; in tracing_snapshot_open()
7089 struct trace_iterator *iter = m->private; in tracing_snapshot_write() local
7090 struct trace_array *tr = iter->tr; in tracing_snapshot_write()
7120 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { in tracing_snapshot_write()
7130 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { in tracing_snapshot_write()
7137 &tr->array_buffer, iter->cpu_file); in tracing_snapshot_write()
7143 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { in tracing_snapshot_write()
7148 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, in tracing_snapshot_write()
7154 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) in tracing_snapshot_write()
7157 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); in tracing_snapshot_write()
7208 if (info->iter.trace->use_max_tr) { in snapshot_raw_open()
7213 info->iter.snapshot = true; in snapshot_raw_open()
7214 info->iter.array_buffer = &info->iter.tr->max_buffer; in snapshot_raw_open()
7588 info->iter.tr = tr; in tracing_buffers_open()
7589 info->iter.cpu_file = tracing_get_cpu(inode); in tracing_buffers_open()
7590 info->iter.trace = tr->current_trace; in tracing_buffers_open()
7591 info->iter.array_buffer = &tr->array_buffer; in tracing_buffers_open()
7613 struct trace_iterator *iter = &info->iter; in tracing_buffers_poll() local
7615 return trace_poll(iter, filp, poll_table); in tracing_buffers_poll()
7623 struct trace_iterator *iter = &info->iter; in tracing_buffers_read() local
7631 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_read()
7636 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, in tracing_buffers_read()
7637 iter->cpu_file); in tracing_buffers_read()
7642 info->spare_cpu = iter->cpu_file; in tracing_buffers_read()
7653 trace_access_lock(iter->cpu_file); in tracing_buffers_read()
7654 ret = ring_buffer_read_page(iter->array_buffer->buffer, in tracing_buffers_read()
7657 iter->cpu_file, 0); in tracing_buffers_read()
7658 trace_access_unlock(iter->cpu_file); in tracing_buffers_read()
7661 if (trace_empty(iter)) { in tracing_buffers_read()
7665 ret = wait_on_pipe(iter, 0); in tracing_buffers_read()
7695 struct trace_iterator *iter = &info->iter; in tracing_buffers_release() local
7699 iter->tr->trace_ref--; in tracing_buffers_release()
7701 __trace_array_put(iter->tr); in tracing_buffers_release()
7704 ring_buffer_free_read_page(iter->array_buffer->buffer, in tracing_buffers_release()
7774 struct trace_iterator *iter = &info->iter; in tracing_buffers_splice_read() local
7789 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_splice_read()
7806 trace_access_lock(iter->cpu_file); in tracing_buffers_splice_read()
7807 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); in tracing_buffers_splice_read()
7820 ref->buffer = iter->array_buffer->buffer; in tracing_buffers_splice_read()
7821 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); in tracing_buffers_splice_read()
7828 ref->cpu = iter->cpu_file; in tracing_buffers_splice_read()
7831 len, iter->cpu_file, 1); in tracing_buffers_splice_read()
7848 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); in tracing_buffers_splice_read()
7851 trace_access_unlock(iter->cpu_file); in tracing_buffers_splice_read()
7863 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent); in tracing_buffers_splice_read()
9417 void trace_init_global_iter(struct trace_iterator *iter) in trace_init_global_iter() argument
9419 iter->tr = &global_trace; in trace_init_global_iter()
9420 iter->trace = iter->tr->current_trace; in trace_init_global_iter()
9421 iter->cpu_file = RING_BUFFER_ALL_CPUS; in trace_init_global_iter()
9422 iter->array_buffer = &global_trace.array_buffer; in trace_init_global_iter()
9424 if (iter->trace && iter->trace->open) in trace_init_global_iter()
9425 iter->trace->open(iter); in trace_init_global_iter()
9428 if (ring_buffer_overruns(iter->array_buffer->buffer)) in trace_init_global_iter()
9429 iter->iter_flags |= TRACE_FILE_ANNOTATE; in trace_init_global_iter()
9432 if (trace_clocks[iter->tr->clock_id].in_ns) in trace_init_global_iter()
9433 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; in trace_init_global_iter()
9439 static struct trace_iterator iter; in ftrace_dump() local
9468 trace_init_global_iter(&iter); in ftrace_dump()
9470 iter.temp = static_temp_buf; in ftrace_dump()
9471 iter.temp_size = STATIC_TEMP_BUF_SIZE; in ftrace_dump()
9474 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in ftrace_dump()
9475 size = ring_buffer_size(iter.array_buffer->buffer, cpu); in ftrace_dump()
9489 iter.cpu_file = RING_BUFFER_ALL_CPUS; in ftrace_dump()
9492 iter.cpu_file = raw_smp_processor_id(); in ftrace_dump()
9498 iter.cpu_file = RING_BUFFER_ALL_CPUS; in ftrace_dump()
9516 while (!trace_empty(&iter)) { in ftrace_dump()
9524 trace_iterator_reset(&iter); in ftrace_dump()
9527 iter.iter_flags |= TRACE_FILE_LAT_FMT; in ftrace_dump()
9529 if (trace_find_next_entry_inc(&iter) != NULL) { in ftrace_dump()
9532 ret = print_trace_line(&iter); in ftrace_dump()
9534 trace_consume(&iter); in ftrace_dump()
9538 trace_printk_seq(&iter.seq); in ftrace_dump()
9550 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in ftrace_dump()