• Home
  • Raw
  • Download

Lines Matching refs:buffer

270 #define for_each_buffer_cpu(buffer, cpu)		\  argument
271 for_each_cpu(cpu, buffer->cpumask)
446 struct ring_buffer *buffer; member
521 size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
523 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
533 size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
538 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
539 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
576 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full) in ring_buffer_wait() argument
589 work = &buffer->irq_work; in ring_buffer_wait()
593 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
595 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
636 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait()
640 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait()
652 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); in ring_buffer_wait()
687 __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
694 work = &buffer->irq_work; in ring_buffer_poll_wait()
696 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
699 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
720 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
721 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
734 atomic_inc(&__b->buffer->record_disabled); \
745 static inline u64 rb_time_stamp(struct ring_buffer *buffer) in rb_time_stamp() argument
748 return buffer->clock() << DEBUG_SHIFT; in rb_time_stamp()
751 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) in ring_buffer_time_stamp() argument
756 time = rb_time_stamp(buffer); in ring_buffer_time_stamp()
763 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, in ring_buffer_normalize_time_stamp() argument
1286 rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1299 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1301 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1380 struct ring_buffer *buffer; in __ring_buffer_alloc() local
1387 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), in __ring_buffer_alloc()
1389 if (!buffer) in __ring_buffer_alloc()
1392 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1396 buffer->flags = flags; in __ring_buffer_alloc()
1397 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1398 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1400 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1401 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1407 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1410 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1412 if (!buffer->buffers) in __ring_buffer_alloc()
1416 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1417 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1418 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1421 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1425 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1427 return buffer; in __ring_buffer_alloc()
1430 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1431 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1432 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1434 kfree(buffer->buffers); in __ring_buffer_alloc()
1437 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1440 kfree(buffer); in __ring_buffer_alloc()
1450 ring_buffer_free(struct ring_buffer *buffer) in ring_buffer_free() argument
1454 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
1456 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1457 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1459 kfree(buffer->buffers); in ring_buffer_free()
1460 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1462 kfree(buffer); in ring_buffer_free()
1466 void ring_buffer_set_clock(struct ring_buffer *buffer, in ring_buffer_set_clock() argument
1469 buffer->clock = clock; in ring_buffer_set_clock()
1472 void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs) in ring_buffer_set_time_stamp_abs() argument
1474 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1477 bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer) in ring_buffer_time_stamp_abs() argument
1479 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
1715 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, in ring_buffer_resize() argument
1725 if (!buffer) in ring_buffer_resize()
1730 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
1746 if (atomic_read(&buffer->resize_disabled)) in ring_buffer_resize()
1750 mutex_lock(&buffer->mutex); in ring_buffer_resize()
1754 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1755 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1783 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1784 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1799 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1800 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1812 if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
1815 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
1854 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
1855 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
1863 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1864 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1867 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
1870 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
1874 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1877 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1889 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
1894 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) in ring_buffer_change_overwrite() argument
1896 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
1898 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
1900 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
1901 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2209 struct ring_buffer *buffer = cpu_buffer->buffer; in rb_move_tail() local
2252 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
2359 bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer); in rb_update_event()
2612 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
2618 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
2619 buffer->irq_work.waiters_pending = false; in rb_wakeups()
2621 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
2643 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu); in rb_wakeups()
2736 void ring_buffer_nest_start(struct ring_buffer *buffer) in ring_buffer_nest_start() argument
2744 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
2756 void ring_buffer_nest_end(struct ring_buffer *buffer) in ring_buffer_nest_end() argument
2763 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
2778 int ring_buffer_unlock_commit(struct ring_buffer *buffer, in ring_buffer_unlock_commit() argument
2784 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
2788 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
2843 if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer)) in __rb_reserve_next()
2871 rb_reserve_next_event(struct ring_buffer *buffer, in rb_reserve_next_event() argument
2890 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
2914 info.ts = rb_time_stamp(cpu_buffer->buffer); in rb_reserve_next_event()
2920 if (ring_buffer_time_stamp_abs(buffer)) { in rb_reserve_next_event()
2964 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) in ring_buffer_lock_reserve() argument
2973 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
2978 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
2981 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
2992 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
3065 void ring_buffer_discard_commit(struct ring_buffer *buffer, in ring_buffer_discard_commit() argument
3075 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3082 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3116 int ring_buffer_write(struct ring_buffer *buffer, in ring_buffer_write() argument
3128 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3133 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3136 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3147 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3157 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3196 void ring_buffer_record_disable(struct ring_buffer *buffer) in ring_buffer_record_disable() argument
3198 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
3209 void ring_buffer_record_enable(struct ring_buffer *buffer) in ring_buffer_record_enable() argument
3211 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
3226 void ring_buffer_record_off(struct ring_buffer *buffer) in ring_buffer_record_off() argument
3232 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
3234 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_off()
3249 void ring_buffer_record_on(struct ring_buffer *buffer) in ring_buffer_record_on() argument
3255 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
3257 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_on()
3267 bool ring_buffer_record_is_on(struct ring_buffer *buffer) in ring_buffer_record_is_on() argument
3269 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
3283 bool ring_buffer_record_is_set_on(struct ring_buffer *buffer) in ring_buffer_record_is_set_on() argument
3285 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
3298 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
3302 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
3305 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
3318 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
3322 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
3325 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
3348 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
3355 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
3358 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
3381 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
3386 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
3389 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
3401 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
3405 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
3408 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
3420 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
3425 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
3428 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
3443 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
3448 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
3451 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
3465 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
3470 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
3473 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
3486 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
3490 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
3493 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
3505 unsigned long ring_buffer_entries(struct ring_buffer *buffer) in ring_buffer_entries() argument
3512 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
3513 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
3528 unsigned long ring_buffer_overruns(struct ring_buffer *buffer) in ring_buffer_overruns() argument
3535 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
3536 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
3924 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
3934 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
3952 struct ring_buffer *buffer; in rb_iter_peek() local
3961 buffer = cpu_buffer->buffer; in rb_iter_peek()
4014 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4024 ring_buffer_normalize_time_stamp(buffer, in rb_iter_peek()
4080 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
4083 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4088 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
4144 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
4156 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
4159 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4204 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_prepare() argument
4209 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
4216 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4220 atomic_inc(&buffer->resize_disabled); in ring_buffer_read_prepare()
4295 atomic_dec(&cpu_buffer->buffer->resize_disabled); in ring_buffer_read_finish()
4335 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) in ring_buffer_size() argument
4343 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
4346 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
4401 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
4403 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
4406 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
4409 atomic_inc(&buffer->resize_disabled); in ring_buffer_reset_cpu()
4430 atomic_dec(&buffer->resize_disabled); in ring_buffer_reset_cpu()
4438 void ring_buffer_reset(struct ring_buffer *buffer) in ring_buffer_reset() argument
4442 for_each_buffer_cpu(buffer, cpu) in ring_buffer_reset()
4443 ring_buffer_reset_cpu(buffer, cpu); in ring_buffer_reset()
4451 bool ring_buffer_empty(struct ring_buffer *buffer) in ring_buffer_empty() argument
4460 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
4461 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
4481 bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
4488 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
4491 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
4563 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
4564 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
4593 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
4600 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
4603 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
4640 void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data) in ring_buffer_free_read_page() argument
4642 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
4700 int ring_buffer_read_page(struct ring_buffer *buffer, in ring_buffer_read_page() argument
4703 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
4714 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
4871 struct ring_buffer *buffer; in trace_rb_cpu_prepare() local
4876 buffer = container_of(node, struct ring_buffer, node); in trace_rb_cpu_prepare()
4877 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
4883 for_each_buffer_cpu(buffer, cpu_i) { in trace_rb_cpu_prepare()
4886 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
4887 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
4895 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
4896 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
4897 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
4903 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
4926 struct ring_buffer *buffer; member
4982 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
4996 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
5022 ring_buffer_unlock_commit(data->buffer, event); in rb_write_something()
5068 struct ring_buffer *buffer; in test_ringbuffer() local
5079 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); in test_ringbuffer()
5080 if (WARN_ON(!buffer)) in test_ringbuffer()
5084 ring_buffer_record_off(buffer); in test_ringbuffer()
5087 rb_data[cpu].buffer = buffer; in test_ringbuffer()
5110 ring_buffer_record_on(buffer); in test_ringbuffer()
5136 ring_buffer_free(buffer); in test_ringbuffer()
5176 if (RB_WARN_ON(buffer, total_dropped)) in test_ringbuffer()
5181 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
5190 RB_WARN_ON(buffer, 1); in test_ringbuffer()
5210 if (RB_WARN_ON(buffer, total_len != total_alloc || in test_ringbuffer()
5214 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) in test_ringbuffer()
5222 ring_buffer_free(buffer); in test_ringbuffer()