• Home
  • Raw
  • Download

Lines Matching refs:buffer

203 #define for_each_buffer_cpu(buffer, cpu)		\  argument
204 for_each_cpu(cpu, buffer->cpumask)
256 struct ring_buffer *buffer; member
292 #define RB_WARN_ON(buffer, cond) \ argument
296 atomic_inc(&buffer->record_disabled); \
369 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) in rb_allocate_cpu_buffer() argument
382 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
401 ret = rb_allocate_pages(cpu_buffer, buffer->pages); in rb_allocate_cpu_buffer()
452 struct ring_buffer *buffer; in ring_buffer_alloc() local
462 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), in ring_buffer_alloc()
464 if (!buffer) in ring_buffer_alloc()
467 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in ring_buffer_alloc()
470 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); in ring_buffer_alloc()
471 buffer->flags = flags; in ring_buffer_alloc()
474 if (buffer->pages == 1) in ring_buffer_alloc()
475 buffer->pages++; in ring_buffer_alloc()
477 cpumask_copy(buffer->cpumask, cpu_possible_mask); in ring_buffer_alloc()
478 buffer->cpus = nr_cpu_ids; in ring_buffer_alloc()
481 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in ring_buffer_alloc()
483 if (!buffer->buffers) in ring_buffer_alloc()
486 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_alloc()
487 buffer->buffers[cpu] = in ring_buffer_alloc()
488 rb_allocate_cpu_buffer(buffer, cpu); in ring_buffer_alloc()
489 if (!buffer->buffers[cpu]) in ring_buffer_alloc()
493 mutex_init(&buffer->mutex); in ring_buffer_alloc()
495 return buffer; in ring_buffer_alloc()
498 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_alloc()
499 if (buffer->buffers[cpu]) in ring_buffer_alloc()
500 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_alloc()
502 kfree(buffer->buffers); in ring_buffer_alloc()
505 free_cpumask_var(buffer->cpumask); in ring_buffer_alloc()
508 kfree(buffer); in ring_buffer_alloc()
518 ring_buffer_free(struct ring_buffer *buffer) in ring_buffer_free() argument
522 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
523 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
525 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
527 kfree(buffer); in ring_buffer_free()
602 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) in ring_buffer_resize() argument
615 if (!buffer) in ring_buffer_resize()
620 buffer_size = buffer->pages * BUF_PAGE_SIZE; in ring_buffer_resize()
629 mutex_lock(&buffer->mutex); in ring_buffer_resize()
636 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { in ring_buffer_resize()
637 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
641 rm_pages = buffer->pages - nr_pages; in ring_buffer_resize()
643 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
644 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
658 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { in ring_buffer_resize()
659 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
663 new_pages = nr_pages - buffer->pages; in ring_buffer_resize()
665 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
681 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
682 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
686 if (RB_WARN_ON(buffer, !list_empty(&pages))) { in ring_buffer_resize()
687 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
692 buffer->pages = nr_pages; in ring_buffer_resize()
693 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
702 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
990 struct ring_buffer *buffer = cpu_buffer->buffer; in __rb_reserve_next() local
1028 if (!(buffer->flags & RB_FL_OVERWRITE)) in __rb_reserve_next()
1281 ring_buffer_lock_reserve(struct ring_buffer *buffer, in ring_buffer_lock_reserve() argument
1292 if (atomic_read(&buffer->record_disabled)) in ring_buffer_lock_reserve()
1300 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_lock_reserve()
1303 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
1356 int ring_buffer_unlock_commit(struct ring_buffer *buffer, in ring_buffer_unlock_commit() argument
1363 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
1392 int ring_buffer_write(struct ring_buffer *buffer, in ring_buffer_write() argument
1406 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
1413 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
1416 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
1462 void ring_buffer_record_disable(struct ring_buffer *buffer) in ring_buffer_record_disable() argument
1464 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
1475 void ring_buffer_record_enable(struct ring_buffer *buffer) in ring_buffer_record_enable() argument
1477 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
1491 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
1495 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
1498 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
1511 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
1515 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
1518 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
1528 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
1532 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
1535 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
1545 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
1549 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
1552 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
1564 unsigned long ring_buffer_entries(struct ring_buffer *buffer) in ring_buffer_entries() argument
1571 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
1572 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
1587 unsigned long ring_buffer_overruns(struct ring_buffer *buffer) in ring_buffer_overruns() argument
1594 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
1595 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
1817 struct ring_buffer *buffer; in rb_advance_iter() local
1823 buffer = cpu_buffer->buffer; in rb_advance_iter()
1829 if (RB_WARN_ON(buffer, in rb_advance_iter()
1860 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) in rb_buffer_peek() argument
1867 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in rb_buffer_peek()
1870 cpu_buffer = buffer->buffers[cpu]; in rb_buffer_peek()
1924 struct ring_buffer *buffer; in rb_iter_peek() local
1933 buffer = cpu_buffer->buffer; in rb_iter_peek()
1992 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) in ring_buffer_peek() argument
1994 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
1999 event = rb_buffer_peek(buffer, cpu, ts); in ring_buffer_peek()
2036 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) in ring_buffer_consume() argument
2038 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
2042 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
2047 event = rb_buffer_peek(buffer, cpu, ts); in ring_buffer_consume()
2073 ring_buffer_read_start(struct ring_buffer *buffer, int cpu) in ring_buffer_read_start() argument
2079 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_start()
2086 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_start()
2151 unsigned long ring_buffer_size(struct ring_buffer *buffer) in ring_buffer_size() argument
2153 return BUF_PAGE_SIZE * buffer->pages; in ring_buffer_size()
2187 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
2189 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
2192 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
2211 void ring_buffer_reset(struct ring_buffer *buffer) in ring_buffer_reset() argument
2215 for_each_buffer_cpu(buffer, cpu) in ring_buffer_reset()
2216 ring_buffer_reset_cpu(buffer, cpu); in ring_buffer_reset()
2224 int ring_buffer_empty(struct ring_buffer *buffer) in ring_buffer_empty() argument
2230 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
2231 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
2244 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
2248 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
2251 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
2295 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
2296 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
2341 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) in ring_buffer_alloc_read_page() argument
2362 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) in ring_buffer_free_read_page() argument
2399 int ring_buffer_read_page(struct ring_buffer *buffer, in ring_buffer_read_page() argument
2402 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
2421 event = rb_buffer_peek(buffer, cpu, NULL); in ring_buffer_read_page()