Lines Matching defs:cpu_buffer
531 struct ring_buffer_per_cpu *cpu_buffer; member
592 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event()
619 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event()
666 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp() local
727 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit() local
761 struct ring_buffer_per_cpu *cpu_buffer = in rb_wake_up_waiters() local
787 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters() local
819 struct ring_buffer_per_cpu *cpu_buffer; in rb_watermark_hit() local
918 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait() local
977 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
1229 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate()
1259 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate()
1270 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set()
1293 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update()
1302 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head()
1311 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal()
1328 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page()
1380 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update()
1441 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage()
1449 static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_links()
1470 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages()
1610 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx) in rb_range_buffer()
1780 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_meta_validate_events()
1944 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_start() local
1969 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_show() local
2016 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_meta_buffer_update()
2030 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages()
2155 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages()
2183 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
2310 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer()
2583 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages()
2693 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages()
2774 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages()
2790 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
2809 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
3034 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event()
3106 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index()
3112 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) in rb_event_index()
3123 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
3151 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_head()
3168 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_reader()
3195 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page()
3352 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail()
3435 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail()
3539 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_time_stamp()
3568 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp()
3586 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp()
3639 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event()
3699 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard()
3764 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit()
3771 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write()
3831 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit()
3876 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit()
3883 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups()
3987 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock()
4014 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock()
4038 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
4058 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
4079 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
4245 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer()
4298 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer()
4306 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next()
4450 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event()
4547 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
4594 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry()
4649 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
4696 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
4754 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries()
4760 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty()
4878 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
4898 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
4916 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
4948 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
4968 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
4987 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
5010 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
5032 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
5053 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
5072 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
5095 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
5111 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
5140 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
5160 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
5203 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp()
5262 static bool rb_read_writer_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_read_writer_meta_page()
5277 __rb_get_reader_page_from_writer(struct ring_buffer_per_cpu *cpu_buffer) in __rb_get_reader_page_from_writer()
5318 __rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in __rb_get_reader_page()
5490 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page()
5496 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader()
5522 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
5549 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events()
5555 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek()
5632 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
5719 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock()
5744 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock()
5764 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
5812 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
5842 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
5893 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
5935 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
5955 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance() local
6003 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_meta_page()
6023 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu()
6096 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer()
6122 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
6160 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus() local
6207 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset() local
6242 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
6272 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
6293 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_writer() local
6447 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
6503 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page() local
6576 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
6814 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_subbuf_order_set() local
6970 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_alloc_meta_page()
6986 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_meta_page()
6994 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_setup_ids_meta_page()
7031 struct ring_buffer_per_cpu *cpu_buffer; in rb_get_mapped_buffer() local
7048 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_put_mapped_buffer()
7057 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer, in __rb_inc_dec_mapped()
7103 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma()
7200 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma()
7210 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map() local
7278 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unmap() local
7322 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map_get_reader() local