Lines Matching refs:buffer
272 #define for_each_buffer_cpu(buffer, cpu) \ argument
273 for_each_cpu(cpu, buffer->cpumask)
275 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
276 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
462 struct trace_buffer *buffer; member
684 static inline bool has_ext_writer(struct trace_buffer *buffer) in has_ext_writer() argument
686 return !!buffer->ext_cb; in has_ext_writer()
691 return has_ext_writer(cpu_buffer->buffer); in rb_has_ext_writer()
754 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
773 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, in ring_buffer_event_time_stamp() argument
776 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
802 ts = rb_time_stamp(cpu_buffer->buffer); in ring_buffer_event_time_stamp()
814 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
816 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
826 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
832 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
833 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
834 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
850 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
852 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
865 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1; in full_hit()
895 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
900 if (!buffer) in ring_buffer_wake_waiters()
906 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
907 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
909 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
911 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
916 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
941 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) in ring_buffer_wait() argument
955 work = &buffer->irq_work; in ring_buffer_wait()
959 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
961 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
1003 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait()
1007 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait()
1017 done = !pagebusy && full_hit(buffer, cpu, full); in ring_buffer_wait()
1058 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1065 work = &buffer->irq_work; in ring_buffer_poll_wait()
1068 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1071 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1102 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; in ring_buffer_poll_wait()
1104 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1105 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1118 atomic_inc(&__b->buffer->record_disabled); \
1129 static inline u64 rb_time_stamp(struct trace_buffer *buffer) in rb_time_stamp() argument
1134 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1137 ts = buffer->clock(); in rb_time_stamp()
1143 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) in ring_buffer_time_stamp() argument
1148 time = rb_time_stamp(buffer); in ring_buffer_time_stamp()
1155 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, in ring_buffer_normalize_time_stamp() argument
1656 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1669 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1671 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1755 struct trace_buffer *buffer; in __ring_buffer_alloc() local
1762 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), in __ring_buffer_alloc()
1764 if (!buffer) in __ring_buffer_alloc()
1767 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1771 buffer->flags = flags; in __ring_buffer_alloc()
1772 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1773 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1775 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1776 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1782 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1785 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1787 if (!buffer->buffers) in __ring_buffer_alloc()
1791 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1792 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1793 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1796 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1800 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1802 return buffer; in __ring_buffer_alloc()
1805 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1806 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1807 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1809 kfree(buffer->buffers); in __ring_buffer_alloc()
1812 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1815 kfree(buffer); in __ring_buffer_alloc()
1823 struct trace_buffer *buffer; in ring_buffer_alloc_ext() local
1828 buffer = ring_buffer_alloc(size, RB_FL_OVERWRITE); in ring_buffer_alloc_ext()
1829 if (!buffer) in ring_buffer_alloc_ext()
1833 &buffer->node)); in ring_buffer_alloc_ext()
1834 buffer->ext_cb = cb; in ring_buffer_alloc_ext()
1835 atomic_set(&buffer->record_disabled, 1); in ring_buffer_alloc_ext()
1837 return buffer; in ring_buffer_alloc_ext()
1845 ring_buffer_free(struct trace_buffer *buffer) in ring_buffer_free() argument
1849 if (!has_ext_writer(buffer)) in ring_buffer_free()
1851 &buffer->node); in ring_buffer_free()
1853 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
1855 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1856 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1858 kfree(buffer->buffers); in ring_buffer_free()
1859 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1861 kfree(buffer); in ring_buffer_free()
1865 void ring_buffer_set_clock(struct trace_buffer *buffer, in ring_buffer_set_clock() argument
1868 buffer->clock = clock; in ring_buffer_set_clock()
1871 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) in ring_buffer_set_time_stamp_abs() argument
1873 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1876 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) in ring_buffer_time_stamp_abs() argument
1878 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
2115 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, in ring_buffer_resize() argument
2122 if (unlikely(has_ext_writer(buffer))) in ring_buffer_resize()
2127 if (!buffer) in ring_buffer_resize()
2132 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2142 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2143 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2151 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2152 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2160 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2161 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2191 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2192 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2207 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2208 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2219 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2268 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2269 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2277 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2278 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2281 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2284 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2285 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2289 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2292 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2305 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2306 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2311 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) in ring_buffer_change_overwrite() argument
2313 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2315 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2317 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2318 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2683 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail() local
2726 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
3122 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3124 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3125 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3127 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3147 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3275 void ring_buffer_nest_start(struct trace_buffer *buffer) in ring_buffer_nest_start() argument
3283 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3295 void ring_buffer_nest_end(struct trace_buffer *buffer) in ring_buffer_nest_end() argument
3302 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3317 int ring_buffer_unlock_commit(struct trace_buffer *buffer, in ring_buffer_unlock_commit() argument
3323 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3327 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
3506 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3577 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3637 rb_reserve_next_event(struct trace_buffer *buffer, in rb_reserve_next_event() argument
3663 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3672 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3728 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) in ring_buffer_lock_reserve() argument
3737 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
3742 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3745 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3756 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
3829 void ring_buffer_discard_commit(struct trace_buffer *buffer, in ring_buffer_discard_commit() argument
3835 if (unlikely(has_ext_writer(buffer))) in ring_buffer_discard_commit()
3842 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3849 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3878 int ring_buffer_write(struct trace_buffer *buffer, in ring_buffer_write() argument
3890 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3895 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3898 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3909 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3919 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3978 void ring_buffer_record_disable(struct trace_buffer *buffer) in ring_buffer_record_disable() argument
3980 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
3991 void ring_buffer_record_enable(struct trace_buffer *buffer) in ring_buffer_record_enable() argument
3993 if (unlikely(has_ext_writer(buffer))) in ring_buffer_record_enable()
3996 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
4011 void ring_buffer_record_off(struct trace_buffer *buffer) in ring_buffer_record_off() argument
4017 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
4019 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_off()
4034 void ring_buffer_record_on(struct trace_buffer *buffer) in ring_buffer_record_on() argument
4039 if (unlikely(has_ext_writer(buffer))) in ring_buffer_record_on()
4043 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4045 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_on()
4055 bool ring_buffer_record_is_on(struct trace_buffer *buffer) in ring_buffer_record_is_on() argument
4057 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4071 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) in ring_buffer_record_is_set_on() argument
4073 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4086 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
4090 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4093 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4106 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
4110 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4113 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4136 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
4143 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4146 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4169 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
4174 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4177 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4189 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
4193 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4196 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4208 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
4213 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4216 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4231 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
4236 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4239 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4253 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
4258 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4261 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4274 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
4278 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4281 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4293 unsigned long ring_buffer_entries(struct trace_buffer *buffer) in ring_buffer_entries() argument
4300 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
4301 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4316 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) in ring_buffer_overruns() argument
4323 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
4324 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4541 if (cpu_buffer->buffer->ext_cb->swap_reader(cpu_buffer->cpu)) { in rb_swap_reader_page_ext()
4884 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4894 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4912 struct trace_buffer *buffer; in rb_iter_peek() local
4921 buffer = cpu_buffer->buffer; in rb_iter_peek()
4976 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4986 ring_buffer_normalize_time_stamp(buffer, in rb_iter_peek()
5042 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
5045 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
5050 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
5120 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
5132 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5135 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5169 cpu_buffer->buffer->ext_cb->update_footers(cpu_buffer->cpu); in ring_buffer_update_view()
5210 int ring_buffer_poke(struct trace_buffer *buffer, int cpu) in ring_buffer_poke() argument
5214 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poke()
5217 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poke()
5220 rb_wakeups(buffer, cpu_buffer); in ring_buffer_poke()
5247 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_prepare() argument
5252 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5266 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5376 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
5384 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5387 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5471 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
5473 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5475 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5479 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
5492 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
5504 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) in ring_buffer_reset_online_cpus() argument
5510 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5512 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5513 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5522 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5523 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5538 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5545 void ring_buffer_reset(struct trace_buffer *buffer) in ring_buffer_reset() argument
5551 mutex_lock(&buffer->mutex); in ring_buffer_reset()
5553 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5554 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5563 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5564 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5572 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
5580 bool ring_buffer_empty(struct trace_buffer *buffer) in ring_buffer_empty() argument
5589 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
5590 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5610 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
5617 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5620 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5705 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
5706 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
5735 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
5742 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5745 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5782 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) in ring_buffer_free_read_page() argument
5789 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
5792 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5847 int ring_buffer_read_page(struct trace_buffer *buffer, in ring_buffer_read_page() argument
5850 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5861 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5903 unlikely(has_ext_writer(buffer))) { in ring_buffer_read_page()
6027 struct trace_buffer *buffer; in trace_rb_cpu_prepare() local
6032 buffer = container_of(node, struct trace_buffer, node); in trace_rb_cpu_prepare()
6033 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
6039 for_each_buffer_cpu(buffer, cpu_i) { in trace_rb_cpu_prepare()
6042 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
6043 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
6051 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
6052 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
6053 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
6059 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
6150 struct trace_buffer *buffer; member
6206 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
6220 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
6246 ring_buffer_unlock_commit(data->buffer, event); in rb_write_something()
6292 struct trace_buffer *buffer; in test_ringbuffer() local
6303 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); in test_ringbuffer()
6304 if (WARN_ON(!buffer)) in test_ringbuffer()
6308 ring_buffer_record_off(buffer); in test_ringbuffer()
6311 rb_data[cpu].buffer = buffer; in test_ringbuffer()
6331 ring_buffer_record_on(buffer); in test_ringbuffer()
6357 ring_buffer_free(buffer); in test_ringbuffer()
6397 if (RB_WARN_ON(buffer, total_dropped)) in test_ringbuffer()
6402 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
6411 RB_WARN_ON(buffer, 1); in test_ringbuffer()
6431 if (RB_WARN_ON(buffer, total_len != total_alloc || in test_ringbuffer()
6435 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) in test_ringbuffer()
6443 ring_buffer_free(buffer); in test_ringbuffer()