Lines Matching refs:ring_buffer_per_cpu
458 struct ring_buffer_per_cpu { struct
515 struct ring_buffer_per_cpu **buffers; argument
527 struct ring_buffer_per_cpu *cpu_buffer;
689 static inline bool rb_has_ext_writer(struct ring_buffer_per_cpu *cpu_buffer) in rb_has_ext_writer()
702 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event()
729 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event()
776 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
852 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
897 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters()
943 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait()
1061 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait()
1115 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1116 struct ring_buffer_per_cpu *__b = \
1302 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate()
1327 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate()
1338 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set()
1361 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update()
1370 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head()
1379 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal()
1396 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page()
1451 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update()
1512 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage()
1530 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages()
1556 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages()
1630 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages()
1655 static struct ring_buffer_per_cpu *
1658 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer()
1716 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer()
1881 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1894 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages()
2007 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages()
2083 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages()
2099 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler()
2100 struct ring_buffer_per_cpu, update_pages_work); in update_pages_handler()
2118 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize()
2328 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event()
2400 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index()
2415 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
2441 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page()
2596 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail()
2672 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2678 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail()
2810 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp()
2828 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp()
2881 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event()
2941 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard()
3006 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit()
3013 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write()
3069 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit()
3114 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_commit()
3122 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups()
3226 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock()
3253 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock()
3277 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start()
3297 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end()
3320 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit()
3393 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer()
3481 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer()
3489 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next()
3638 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event()
3730 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve()
3777 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry()
3832 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit()
3882 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write()
3933 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty()
4088 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu()
4108 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu()
4125 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries()
4139 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts()
4171 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu()
4191 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu()
4210 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu()
4233 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu()
4255 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu()
4276 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu()
4295 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries()
4318 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns()
4334 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
4362 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset()
4382 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty()
4425 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp()
4535 noinline rb_swap_reader_page_ext(struct ring_buffer_per_cpu *cpu_buffer) in rb_swap_reader_page_ext()
4594 rb_swap_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_swap_reader_page()
4683 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page()
4777 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader()
4803 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter()
4830 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events()
4836 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek()
4913 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek()
4999 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock()
5024 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock()
5045 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
5093 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
5123 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume()
5158 static void ring_buffer_update_view(struct ring_buffer_per_cpu *cpu_buffer) in ring_buffer_update_view()
5212 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poke()
5249 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare()
5306 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start()
5332 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
5360 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
5400 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu()
5447 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer()
5473 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5506 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus()
5547 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset()
5582 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty()
5612 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu()
5646 struct ring_buffer_per_cpu *cpu_buffer_a; in ring_buffer_swap_cpu()
5647 struct ring_buffer_per_cpu *cpu_buffer_b; in ring_buffer_swap_cpu()
5737 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page()
5784 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page()
5850 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
6072 struct ring_buffer_per_cpu *rb = trace_buffer->buffers[cpu]; in trace_buffer_pack_size()
6095 struct ring_buffer_per_cpu *rb; in trace_buffer_pack()