Lines Matching refs:buffer
279 #define for_each_buffer_cpu(buffer, cpu) \ argument
280 for_each_cpu(cpu, buffer->cpumask)
282 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
283 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
498 struct trace_buffer *buffer; member
767 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
769 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
779 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
785 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
786 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
787 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
803 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
805 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
818 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1; in full_hit()
848 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
856 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
857 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
859 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
861 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
883 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) in ring_buffer_wait() argument
897 work = &buffer->irq_work; in ring_buffer_wait()
901 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
903 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
945 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait()
949 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait()
959 done = !pagebusy && full_hit(buffer, cpu, full); in ring_buffer_wait()
1000 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1007 work = &buffer->irq_work; in ring_buffer_poll_wait()
1010 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1013 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1044 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; in ring_buffer_poll_wait()
1046 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1047 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1060 atomic_inc(&__b->buffer->record_disabled); \
1071 static inline u64 rb_time_stamp(struct trace_buffer *buffer) in rb_time_stamp() argument
1076 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1079 ts = buffer->clock(); in rb_time_stamp()
1085 u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu) in ring_buffer_time_stamp() argument
1090 time = rb_time_stamp(buffer); in ring_buffer_time_stamp()
1097 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, in ring_buffer_normalize_time_stamp() argument
1598 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1611 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1613 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1697 struct trace_buffer *buffer; in __ring_buffer_alloc() local
1704 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), in __ring_buffer_alloc()
1706 if (!buffer) in __ring_buffer_alloc()
1709 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1713 buffer->flags = flags; in __ring_buffer_alloc()
1714 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1715 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1717 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1718 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1724 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1727 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1729 if (!buffer->buffers) in __ring_buffer_alloc()
1733 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1734 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1735 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1738 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1742 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1744 return buffer; in __ring_buffer_alloc()
1747 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1748 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1749 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1751 kfree(buffer->buffers); in __ring_buffer_alloc()
1754 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1757 kfree(buffer); in __ring_buffer_alloc()
1767 ring_buffer_free(struct trace_buffer *buffer) in ring_buffer_free() argument
1771 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
1773 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
1775 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1776 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1778 kfree(buffer->buffers); in ring_buffer_free()
1779 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1781 kfree(buffer); in ring_buffer_free()
1785 void ring_buffer_set_clock(struct trace_buffer *buffer, in ring_buffer_set_clock() argument
1788 buffer->clock = clock; in ring_buffer_set_clock()
1791 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) in ring_buffer_set_time_stamp_abs() argument
1793 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1796 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) in ring_buffer_time_stamp_abs() argument
1798 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
2031 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, in ring_buffer_resize() argument
2041 if (!buffer) in ring_buffer_resize()
2046 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2058 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2059 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2067 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2068 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2076 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2077 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2107 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2108 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2123 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2124 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2136 if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2139 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2188 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2189 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2197 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2198 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2201 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2204 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2205 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2209 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2212 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2225 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2226 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2231 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) in ring_buffer_change_overwrite() argument
2233 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2235 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2237 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2238 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2603 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail() local
2646 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
3073 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3075 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3076 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3078 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3098 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3222 void ring_buffer_nest_start(struct trace_buffer *buffer) in ring_buffer_nest_start() argument
3230 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3242 void ring_buffer_nest_end(struct trace_buffer *buffer) in ring_buffer_nest_end() argument
3249 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3264 int ring_buffer_unlock_commit(struct trace_buffer *buffer, in ring_buffer_unlock_commit() argument
3270 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3274 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
3302 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3386 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3438 rb_reserve_next_event(struct trace_buffer *buffer, in rb_reserve_next_event() argument
3464 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3473 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3529 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) in ring_buffer_lock_reserve() argument
3538 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
3543 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3546 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3557 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
3630 void ring_buffer_discard_commit(struct trace_buffer *buffer, in ring_buffer_discard_commit() argument
3640 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3647 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3676 int ring_buffer_write(struct trace_buffer *buffer, in ring_buffer_write() argument
3688 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3693 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3696 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3707 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3717 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3776 void ring_buffer_record_disable(struct trace_buffer *buffer) in ring_buffer_record_disable() argument
3778 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
3789 void ring_buffer_record_enable(struct trace_buffer *buffer) in ring_buffer_record_enable() argument
3791 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
3806 void ring_buffer_record_off(struct trace_buffer *buffer) in ring_buffer_record_off() argument
3812 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
3814 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_off()
3829 void ring_buffer_record_on(struct trace_buffer *buffer) in ring_buffer_record_on() argument
3835 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
3837 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_on()
3847 bool ring_buffer_record_is_on(struct trace_buffer *buffer) in ring_buffer_record_is_on() argument
3849 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
3863 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) in ring_buffer_record_is_set_on() argument
3865 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
3878 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
3882 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
3885 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
3898 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
3902 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
3905 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
3928 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
3935 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
3938 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
3961 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
3966 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
3969 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
3981 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
3985 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
3988 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4000 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
4005 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4008 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4023 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
4028 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4031 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4045 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
4050 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4053 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4066 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
4070 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4073 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4085 unsigned long ring_buffer_entries(struct trace_buffer *buffer) in ring_buffer_entries() argument
4092 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
4093 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4108 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) in ring_buffer_overruns() argument
4115 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
4116 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4550 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4560 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4578 struct trace_buffer *buffer; in rb_iter_peek() local
4587 buffer = cpu_buffer->buffer; in rb_iter_peek()
4642 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4652 ring_buffer_normalize_time_stamp(buffer, in rb_iter_peek()
4708 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
4711 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4716 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
4786 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
4798 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
4801 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4846 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_prepare() argument
4851 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
4865 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4973 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
4981 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
4984 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5067 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
5069 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5071 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5075 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
5088 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
5100 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) in ring_buffer_reset_online_cpus() argument
5106 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5108 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5109 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5118 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5119 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5134 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5141 void ring_buffer_reset(struct trace_buffer *buffer) in ring_buffer_reset() argument
5147 mutex_lock(&buffer->mutex); in ring_buffer_reset()
5149 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5150 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5159 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5160 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5168 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
5176 bool ring_buffer_empty(struct trace_buffer *buffer) in ring_buffer_empty() argument
5185 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
5186 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5206 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
5213 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5216 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5298 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
5299 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
5328 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
5335 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5338 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5375 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) in ring_buffer_free_read_page() argument
5382 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
5385 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5440 int ring_buffer_read_page(struct trace_buffer *buffer, in ring_buffer_read_page() argument
5443 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5454 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5619 struct trace_buffer *buffer; in trace_rb_cpu_prepare() local
5624 buffer = container_of(node, struct trace_buffer, node); in trace_rb_cpu_prepare()
5625 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
5631 for_each_buffer_cpu(buffer, cpu_i) { in trace_rb_cpu_prepare()
5634 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
5635 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
5643 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
5644 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
5645 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
5651 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
5674 struct trace_buffer *buffer; member
5730 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
5744 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
5770 ring_buffer_unlock_commit(data->buffer, event); in rb_write_something()
5816 struct trace_buffer *buffer; in test_ringbuffer() local
5827 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); in test_ringbuffer()
5828 if (WARN_ON(!buffer)) in test_ringbuffer()
5832 ring_buffer_record_off(buffer); in test_ringbuffer()
5835 rb_data[cpu].buffer = buffer; in test_ringbuffer()
5858 ring_buffer_record_on(buffer); in test_ringbuffer()
5884 ring_buffer_free(buffer); in test_ringbuffer()
5924 if (RB_WARN_ON(buffer, total_dropped)) in test_ringbuffer()
5929 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
5938 RB_WARN_ON(buffer, 1); in test_ringbuffer()
5958 if (RB_WARN_ON(buffer, total_len != total_alloc || in test_ringbuffer()
5962 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) in test_ringbuffer()
5970 ring_buffer_free(buffer); in test_ringbuffer()